├── .dockerignore
├── .gitignore
├── .goxc.json
├── .ruby-version
├── Dockerfile
├── Gemfile
├── Gemfile.lock
├── LICENSE
├── README.md
├── Rakefile
├── agent
├── agent.go
├── agent_test.go
├── client.go
├── client_test.go
├── detect
│ ├── bindata.go
│ └── detect.go
├── mocks.go
├── processwatcher.go
├── resources
│ └── detect_linux.sh
├── server.go
├── server_test.go
├── textwatcher.go
├── upgrade.go
├── upgrade_test.go
├── watcher.go
└── watcher_test.go
├── appcanary-hero.png
├── circle.yml
├── conf
├── common.go
├── conf_test.go
├── consts.go
├── env.go
├── toml.go
├── yaml.go
└── yaml_test.go
├── dist
└── .gitignore
├── examples
└── agent.conf
├── main.go
├── package
├── packager.rb
├── packager_mgmt.rb
├── packager_models.rb
├── packager_recipes.rb
└── prune.rb
├── package_files
├── config
│ ├── etc
│ │ └── appcanary
│ │ │ ├── agent.yml
│ │ │ ├── dpkg.agent.yml
│ │ │ └── rpm.agent.yml
│ └── var
│ │ └── db
│ │ └── appcanary
│ │ └── server.yml
├── deb
│ ├── systemd
│ │ ├── files
│ │ │ └── etc
│ │ │ │ ├── logrotate.d
│ │ │ │ └── appcanary
│ │ │ │ └── systemd
│ │ │ │ └── system
│ │ │ │ └── appcanary.service
│ │ ├── post-install.sh
│ │ ├── post-remove.sh
│ │ └── post-upgrade.sh
│ ├── systemv
│ │ ├── files
│ │ │ └── etc
│ │ │ │ ├── default
│ │ │ │ └── appcanary
│ │ │ │ ├── init.d
│ │ │ │ └── appcanary
│ │ │ │ └── logrotate.d
│ │ │ │ └── appcanary
│ │ ├── post-install.sh
│ │ ├── post-remove.sh
│ │ └── post-upgrade.sh
│ └── upstart
│ │ ├── files
│ │ └── etc
│ │ │ ├── init
│ │ │ └── appcanary.conf
│ │ │ └── logrotate.d
│ │ │ └── appcanary
│ │ ├── post-install.sh
│ │ ├── post-remove.sh
│ │ └── post-upgrade.sh
└── rpm
│ ├── systemd
│ ├── files
│ │ └── etc
│ │ │ ├── logrotate.d
│ │ │ └── appcanary
│ │ │ └── systemd
│ │ │ └── system
│ │ │ └── appcanary.service
│ ├── post-install.sh
│ ├── post-remove.sh
│ └── post-upgrade.sh
│ └── systemv
│ ├── files
│ └── etc
│ │ ├── logrotate.d
│ │ └── appcanary
│ │ └── rc.d
│ │ └── init.d
│ │ └── appcanary
│ ├── post-install.sh
│ ├── post-remove.sh
│ └── post-upgrade.sh
├── readme.gif
├── releases
└── .gitignore
└── test
├── data
├── .gitignore
├── Gemfile.lock
├── agent.yml
├── dpkg
│ └── available
├── lsb-release
├── old_toml_server.conf
├── old_toml_test.conf
├── os-release
├── pointless
├── redhat-release
├── server.yml
├── spector.conf
├── test2.conf
├── test3.yml
└── test_server3.yml
├── dump.json
├── pkg
└── Rakefile
├── test_detect.sh
├── test_server.rb
└── var
└── .gitkeep
/.dockerignore:
--------------------------------------------------------------------------------
1 | dist
2 | releases
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Compiled Object files, Static and Dynamic libs (Shared Objects)
2 | *.o
3 | *.a
4 | *.so
5 |
6 | # Folders
7 | _obj
8 | _test
9 |
10 | # Architecture specific extensions/prefixes
11 | *.[568vq]
12 | [568vq].out
13 |
14 | *.cgo1.go
15 | *.cgo2.c
16 | _cgo_defun.c
17 | _cgo_gotypes.go
18 | _cgo_export.*
19 |
20 | _testmain.go
21 |
22 | *.exe
23 | *.test
24 | *.prof
25 |
26 | *.db
27 |
28 | .vendor/
29 | cmd/canary-agent/tmp/
30 | bin/
31 |
32 |
33 | *~
34 | \#*\#
35 | .\#*
36 |
37 | *.test
38 | *.out
39 | test/var/
40 |
41 | .goxc.local.json
42 | .aws.yml
43 | .DS_Store
44 |
45 | test/pkg/centos
46 | test/pkg/debian
47 | test/pkg/ubuntu
48 |
49 | # Docker bits
50 | docker_build
51 | docker_build.pub
52 |
--------------------------------------------------------------------------------
/.goxc.json:
--------------------------------------------------------------------------------
1 | {
2 | "AppName": "appcanary",
3 | "PackageVersion": "0.0.1",
4 | "TaskSettings": {
5 | "bintray": {
6 | "downloadspage": "bintray.md",
7 | "package": "canary-agent",
8 | "repository": "Deb",
9 | "subject": "canary"
10 | },
11 | "deb": {
12 | "armarch": "",
13 | "bin-dir": "/usr/bin",
14 | "go-sources-dir": ".",
15 | "metadata": {
16 | "maintainer": "unknown",
17 | "maintainerEmail": "unknown@example.com"
18 | },
19 | "metadata-deb": {
20 | "Build-Depends": "debhelper (\u003e=4.0.0), golang-go, gcc",
21 | "Depends": ""
22 | },
23 | "other-mappped-files": {
24 | "/": "deb-contents/"
25 | },
26 | "rmtemp": true
27 | }
28 | },
29 | "ConfigVersion": "0.9"
30 | }
31 |
--------------------------------------------------------------------------------
/.ruby-version:
--------------------------------------------------------------------------------
1 | ruby-2.3.4
2 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:latest
2 |
3 | RUN yes | apt-get update
4 | RUN yes | apt-get upgrade
5 | RUN yes | apt-get install rake
6 |
7 | RUN mkdir -p /go/src/github.com/appcanary/agent
8 |
9 | RUN mkdir -p /root/.ssh
10 | RUN touch /root/.ssh/known_hosts
11 | RUN ssh-keyscan github.com >> /root/.ssh/known_hosts
12 |
13 | ADD . /go/src/github.com/appcanary/agent
14 | WORKDIR /go/src/github.com/appcanary/agent
15 |
16 | RUN go get -t -d -v ./...
17 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | # A sample Gemfile
2 | source "https://rubygems.org"
3 |
4 | gem "fpm"
5 | gem "rake"
6 | gem "rest-client"
7 | gem "package_cloud"
8 | gem 'pry'
9 | gem 'sinatra'
10 |
--------------------------------------------------------------------------------
/Gemfile.lock:
--------------------------------------------------------------------------------
1 | GEM
2 | remote: https://rubygems.org/
3 | specs:
4 | arr-pm (0.0.10)
5 | cabin (> 0)
6 | backports (3.6.4)
7 | cabin (0.7.1)
8 | childprocess (0.5.6)
9 | ffi (~> 1.0, >= 1.0.11)
10 | clamp (0.6.5)
11 | coderay (1.1.0)
12 | colorize (0.7.7)
13 | ffi (1.9.8)
14 | fpm (1.3.3)
15 | arr-pm (~> 0.0.9)
16 | backports (>= 2.6.2)
17 | cabin (>= 0.6.0)
18 | childprocess
19 | clamp (~> 0.6)
20 | ffi
21 | json (>= 1.7.7)
22 | highline (1.7.2)
23 | json (1.8.3)
24 | json_pure (1.8.2)
25 | method_source (0.8.2)
26 | mime-types (1.25.1)
27 | package_cloud (0.2.19)
28 | colorize (~> 0.6)
29 | highline (~> 1.6)
30 | json_pure (~> 1.8)
31 | rest-client (~> 1.6)
32 | thor (~> 0.18)
33 | pry (0.10.1)
34 | coderay (~> 1.1.0)
35 | method_source (~> 0.8.1)
36 | slop (~> 3.4)
37 | rack (1.6.4)
38 | rack-protection (1.5.3)
39 | rack
40 | rake (10.4.2)
41 | rdoc (5.1.0)
42 | rest-client (1.6.8)
43 | mime-types (~> 1.16)
44 | rdoc (>= 2.4.2)
45 | sinatra (1.4.7)
46 | rack (~> 1.5)
47 | rack-protection (~> 1.4)
48 | tilt (>= 1.3, < 3)
49 | slop (3.6.0)
50 | thor (0.19.1)
51 | tilt (2.0.2)
52 |
53 | PLATFORMS
54 | ruby
55 |
56 | DEPENDENCIES
57 | fpm
58 | package_cloud
59 | pry
60 | rake
61 | rest-client
62 | sinatra
63 |
64 | BUNDLED WITH
65 | 1.15.0
66 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 | {one line to give the program's name and a brief idea of what it does.}
635 | Copyright (C) {year} {name of author}
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | {project} Copyright (C) {year} {fullname}
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
676 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
4 |
5 | # Hello.
6 |
7 | 
8 |
9 | This repository holds the source for the [appcanary](https://appcanary.com) agent. **Please note**: To install the agent on your servers, please consult [the instructions](https://appcanary.com/servers/new). If you're interested in how it works, read on!
10 |
11 | The agent itself is pretty "dumb". Its sole purpose is to monitor files paths, supplied via config file, for changes and, whenever the file has changed, send it over to the appcanary api for us to parse and make decisions on.
12 |
13 | Oh, and it also pings us once an hour so we know it's still working.
14 |
15 | If you're reading this because you want to audit the code, the magic starts in [`main.go`](https://github.com/appcanary/agent/blob/master/main.go), [`agent/file.go`](https://github.com/appcanary/agent/blob/master/agent/file.go) and [`agent/agent.go`](https://github.com/appcanary/agent/blob/master/agent/agent.go). We think it's pretty straightforward!
16 |
17 | ## Setup
18 |
19 | 1. This project depends on a working golang and ruby environment, as well as docker.
20 | 2. First, let's set up go. Go to your `$GOPATH` and type:
21 |
22 | `go get github.com/appcanary/agent`
23 |
24 | 3. `cd` into the brand new agent folder, and install all of our go dependencies by typing:
25 |
26 | `go get -t -d -v ./...`
27 |
28 | 4. We'll also need [go-bindata](https://github.com/jteeuwen/go-bindata):
29 |
30 | `go get -u github.com/jteeuwen/go-bindata/...`
31 |
32 | We don't run it by default, cos it's annoying, but whenever you touch a binned file don't forget to run:
33 | ```bash
34 | go-bindata -pkg detect -o agent/detect/bindata.go agent/resources/
35 | ```
36 | and check in the result.
37 |
38 | 5. Now we set bundler and install our ruby dependencies. We use ruby to script all of our build and packaging tasks. Go ahead and type:
39 |
40 | ```bash
41 | gem install bundler # if you don't have it
42 | bundle install
43 | ```
44 |
45 | This gets you all the basics up on your machine.
46 |
47 | 6. In order to cross compile releases, you're going to need `goxc`, so visit [the goxc github page](https://github.com/laher/goxc) and install that (last we used was version 0.16.0).
48 |
49 | 7. We package releases using [`fpm`](https://github.com/jordansissel/fpm/). This is installed via bundler in step 4, HOWEVER, `fpm` requires `rpmbuild` in order to assemble rpm packages. We last used `rpmbuild` version 5.4.15. On OSX at least, that util is a apart of the `rpm` homebrew package, so:
50 |
51 | ```bash
52 | brew install rpm
53 | ```
54 |
55 | 8. At this stage you're able to build, test, package and deploy packages. But you know what you're missing? A way to test that the packages work on the (at time of writing) 10 different linux versions you support. We ended up using docker for this. We went and got [boot2docker](http://boot2docker.io/) (cli/docker version 1.6.2 is what we used).
56 |
57 | You may have to also fetch VirtualBox. There's instructions, docker is... complicated.
58 |
59 | ## Compiling
60 |
61 | Once you've done the above, you're all set!
62 |
63 | ```bash
64 | rake build # to compile
65 | rake test # to test
66 | rake test t=path/to/test # to test an individual file
67 | ```
68 |
69 | ## Testing on non-Debian based systems
70 |
71 | Note that tests will only pass completely on a system with `dpkg` installed. If you do not, we include a `Dockerfile` which should help. There are extra steps needed to get this working.
72 |
73 |
74 | Now you should be able to build the docker container and run the tests:
75 |
76 | ```shell
77 | $ cd $AGENT_REPO_ROOT
78 | $ docker build .
79 | [ ... lots of output elided ... ]
80 | Successfully built
81 | ```
82 |
83 | Where `AGENT_REPO_ROOT` is set to the correct location on your local filesystem and where `` is actually a hexadecimal container ID. Next, run the tests:
84 |
85 | ```shell
86 | $ docker run rake test
87 | ```
88 |
89 | You will need to rebuild the container for any code changes - don't forget! The container building process caches intermediate layers, so subsequent builds will be faster.
90 |
91 | ## Packaging
92 |
93 | ```bash
94 | rake package # just to create packages
95 | rake deploy # packages, then deploys to package cloud
96 |
97 | # actually deploy to 'production' package cloud repo
98 | CANARY_ENV=production rake deploy
99 | ```
100 |
101 | ## Testing the packaging
102 | ```bash
103 | boot2docker start # copy and paste the export fields it gives you
104 | rake integration:test
105 | ```
106 |
107 | or, alternatively, if you built a specific package:
108 |
109 | ```bash
110 | boot2docker start # again, make sure you copy those exports
111 | rake integration:single distro=debian release=jessie package=releases/appcanary_0.0.2-2015.11.10-212042-UTC_amd64_debian_jessie.deb
112 | ```
113 |
114 | Pro-tip! Don't forget to use the correct package architecture version for the machine you're on (probably, amd64).
115 |
116 | ## Contributing
117 |
118 | By submitting a pull request directly to us you hereby agree to assign the copyright of your patch to the Canary Computer Corporation. Our lives are made immensely easier by this and we hope you understand.
119 |
120 |
121 | 
122 |
--------------------------------------------------------------------------------
/Rakefile:
--------------------------------------------------------------------------------
1 | require 'rake/clean'
2 | require 'json'
3 | require 'yaml'
4 | load 'test/pkg/Rakefile'
5 |
6 | CURRENT_VERSION = "0.2.0"
7 | PC_USER = "appcanary"
8 | PC_REPO = "agent"
9 | PC_STAGING_REPO = "appcanary-stg"
10 |
11 | @built_packages = []
12 |
13 | def production?
14 | @isproduction ||= (ENV["CANARY_ENV"] == "production")
15 | end
16 |
17 | # gets result of shell command
18 | def shell(str)
19 | puts str
20 | `#{str}`.strip
21 | end
22 |
23 | # execute a shell command and print stderr
24 | def exec(str)
25 | puts str
26 | system str
27 | end
28 |
29 | task :default => :build
30 |
31 | task :build_all => [:setup, :build]
32 |
33 | desc "Build the program into ./bin/appcanary"
34 | task :build do
35 | @ldflags = %{"-X main.CanaryVersion=#{@release_version || "unreleased"}"}
36 | # actually, do we need to run this every time? let's not for now.
37 | # shell "go-bindata -pkg detect -o agent/detect/bindata.go agent/resources/"
38 | shell "go build -ldflags #{@ldflags} -o ./bin/appcanary"
39 | end
40 |
41 | desc "Build and run all go tests"
42 | task :test => :build_all do
43 | sh "go test -v ./... -race -timeout 100s"
44 | end
45 |
46 | desc "Build and run a specific go test"
47 | task :testr => :build_all do
48 | sh "go test -v ./... -race -timeout 100s -run #{ENV["t"]}"
49 | end
50 |
51 | desc "Generate release version from date"
52 | task :release_prep do
53 | if production?
54 | if `git diff --shortstat` != ""
55 | puts "Whoa there, partner. Dirty trees can't deploy. Git yourself clean"
56 | exit 1
57 | end
58 | end
59 |
60 | @date = `date -u +"%Y.%m.%d-%H%M%S-%Z"`.strip
61 | @release_version = "#{CURRENT_VERSION}-#{@date}"
62 | end
63 |
64 | desc "Cross compile a binary for every architecture"
65 | task :cross_compile => :release_prep do
66 | puts "\n\n\n#################################"
67 | puts "Cross compiling packages."
68 | puts "#################################\n\n\n"
69 |
70 | @ldflags = %{-X main.CanaryVersion=#{@release_version}}
71 | shell %{env GOARCH=386 GOOS=linux go build -ldflags "#{@ldflags}" -o dist/#{@release_version}/linux_i386/appcanary github.com/appcanary/agent}
72 | shell %{env GOARCH=amd64 GOOS=linux go build -ldflags "#{@ldflags}" -o dist/#{@release_version}/linux_amd64/appcanary github.com/appcanary/agent}
73 | end
74 |
75 |
76 | desc "Generate a package archive for every operating system we support"
77 | task :package => :cross_compile do
78 | load 'package/packager.rb'
79 | puts "\n\n\n#################################"
80 | puts "Building packages."
81 | puts "#################################\n\n\n"
82 |
83 | [UbuntuRecipe, CentosRecipe, DebianRecipe, MintRecipe, FedoraRecipe].each do |rcp|
84 | puts "#######"
85 | puts "#{rcp.distro}"
86 | puts "#######\n\n"
87 |
88 | @built_packages = @built_packages + rcp.new(@release_version).build_packages
89 | end
90 | end
91 |
92 | desc "Cross compile, package and deploy packages to package cloud"
93 | task :deploy => :package do # "integration:test" do
94 |
95 | publisher = nil
96 | if production?
97 | publisher = PackagePublisher.new(PC_USER, PC_REPO)
98 | else
99 | publisher = PackagePublisher.new(PC_USER, PC_STAGING_REPO)
100 | end
101 |
102 | @built_packages.each do |pkg|
103 | publisher.publish!(pkg)
104 | end
105 |
106 | sha = shell %{git rev-parse --short HEAD}
107 | user = `whoami`.strip
108 | commit_message = "#{user} deployed #{sha}"
109 |
110 |
111 | if production?
112 | shell %{git tag -a #{@release_version} -m "#{commit_message}"}
113 | shell %{git push origin #{@release_version}}
114 | end
115 |
116 | end
117 |
118 | task :release => [:release_prep, :default, :package]
119 |
120 | task :setup do
121 | `mkdir -p ./bin`
122 | `rm -f ./bin/*`
123 | end
124 |
--------------------------------------------------------------------------------
/agent/agent.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "os"
5 |
6 | "github.com/appcanary/agent/conf"
7 | )
8 |
9 | var CanaryVersion string
10 |
11 | type Agent struct {
12 | conf *conf.Conf
13 | client Client
14 | server *Server
15 | files Watchers
16 | DoneChannel chan os.Signal
17 | }
18 |
19 | func NewAgent(version string, conf *conf.Conf, clients ...Client) *Agent {
20 | agent := &Agent{conf: conf, files: Watchers{}}
21 |
22 | // Find out what we need about machine
23 | // Fills out server conf if some values are missing
24 | agent.server = NewServer(conf, conf.ServerConf)
25 |
26 | if len(clients) > 0 {
27 | agent.client = clients[0]
28 | } else {
29 | agent.client = NewClient(conf.ApiKey, agent.server)
30 | }
31 |
32 | CanaryVersion = version
33 | return agent
34 | }
35 |
36 | // instantiate structs, fs hook
37 | func (agent *Agent) StartPolling() {
38 | for _, watcher := range agent.files {
39 | watcher.Start()
40 | }
41 | }
42 |
43 | func (agent *Agent) BuildAndSyncWatchers() {
44 | for _, w := range agent.conf.Watchers {
45 | var watcher Watcher
46 |
47 | if w.Process != "" {
48 | watcher = NewProcessWatcher(w.Process, agent.OnChange)
49 | } else if w.Command != "" {
50 | watcher = NewCommandOutputWatcher(w.Command, agent.OnChange)
51 | } else if w.Path != "" {
52 | watcher = NewFileWatcher(w.Path, agent.OnChange)
53 | }
54 | agent.files = append(agent.files, watcher)
55 | }
56 | }
57 |
58 | func (agent *Agent) OnChange(w Watcher) {
59 | log := conf.FetchLog()
60 |
61 | switch wt := w.(type) {
62 | default:
63 | log.Errorf("Don't know what to do with %T", wt)
64 | case TextWatcher:
65 | agent.handleTextChange(wt)
66 | case ProcessWatcher:
67 | agent.handleProcessChange(wt)
68 | }
69 | }
70 |
71 | func (agent *Agent) handleProcessChange(pw ProcessWatcher) {
72 | log := conf.FetchLog()
73 |
74 | match := pw.Match()
75 | if match == "*" {
76 | log.Infof("Shipping process map")
77 | } else {
78 | log.Infof("Shipping process map for %s", match)
79 | }
80 | agent.client.SendProcessState(match, pw.StateJson())
81 | }
82 |
83 | func (agent *Agent) handleTextChange(tw TextWatcher) {
84 | log := conf.FetchLog()
85 | log.Infof("File change: %s", tw.Path())
86 |
87 | // should probably be in the actual hook code
88 | contents, err := tw.Contents()
89 | if err != nil {
90 | // we couldn't read it; something weird is happening let's just wait
91 | // until this callback gets issued again when the file reappears.
92 | log.Infof("File contents error: %s", err)
93 | return
94 | }
95 |
96 | err = agent.client.SendFile(tw.Path(), tw.Kind(), contents)
97 | if err != nil {
98 | // TODO: some kind of queuing mechanism to keep trying beyond the
99 | // exponential backoff in the client. What if the connection fails for
100 | // whatever reason?
101 | log.Infof("Sendfile error: %s", err)
102 | }
103 | }
104 |
105 | func (agent *Agent) SyncAllFiles() {
106 | log := conf.FetchLog()
107 | log.Info("Synching all files.")
108 |
109 | for _, f := range agent.files {
110 | agent.OnChange(f)
111 | }
112 | }
113 |
114 | func (agent *Agent) Heartbeat() error {
115 | return agent.client.Heartbeat(agent.server.UUID, agent.files)
116 | }
117 |
118 | func (agent *Agent) FirstRun() bool {
119 | // the configuration didn't find a server uuid
120 | return agent.server.IsNew()
121 | }
122 |
123 | func (agent *Agent) RegisterServer() error {
124 | uuid, err := agent.client.CreateServer(agent.server)
125 |
126 | if err != nil {
127 | return err
128 | }
129 | agent.server.UUID = uuid
130 | agent.conf.ServerConf.UUID = uuid
131 | agent.conf.Save()
132 | return nil
133 | }
134 |
135 | func (agent *Agent) PerformUpgrade() {
136 | log := conf.FetchLog()
137 |
138 | var cmds UpgradeSequence
139 | packageList, err := agent.client.FetchUpgradeablePackages()
140 |
141 | if err != nil {
142 | log.Fatalf("Can't fetch upgrade info: %s", err)
143 | }
144 |
145 | if len(packageList) == 0 {
146 | log.Info("No vulnerable packages reported. Carry on!")
147 | return
148 | }
149 |
150 | if agent.server.IsUbuntu() {
151 | cmds = buildDebianUpgrade(packageList)
152 | } else if agent.server.IsCentOS() {
153 | cmds = buildCentOSUpgrade(packageList)
154 | } else {
155 | log.Fatal("Sorry, we don't support your operating system at the moment. Is this a mistake? Run `appcanary detect-os` and tell us about it at support@appcanary.com")
156 | }
157 |
158 | err = executeUpgradeSequence(cmds)
159 | if err != nil {
160 | log.Fatal(err)
161 | }
162 | }
163 |
164 | // This has to be called before exiting
165 | func (agent *Agent) CloseWatches() {
166 | for _, file := range agent.files {
167 | file.Stop()
168 | }
169 | }
170 |
--------------------------------------------------------------------------------
/agent/agent_test.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "os"
5 | "os/exec"
6 | "testing"
7 | "time"
8 |
9 | "github.com/appcanary/agent/conf"
10 | "github.com/appcanary/testify/assert"
11 | )
12 |
13 | func TestAgent(t *testing.T) {
14 | assert := assert.New(t)
15 |
16 | // setup
17 | serverUUID := "123456"
18 | conf.InitEnv("test")
19 | config, err := conf.NewConfFromEnv()
20 | assert.Nil(err)
21 |
22 | config.Watchers[0].Path = conf.DEV_CONF_PATH + "/dpkg/available"
23 |
24 | client := &MockClient{}
25 | client.On("CreateServer").Return(serverUUID)
26 | client.On("SendFile").Return(nil).Twice()
27 | client.On("Heartbeat").Return(nil).Once()
28 | client.On("SendProcessState").Return(nil).Twice()
29 |
30 | agent := NewAgent("test", config, client)
31 |
32 | // let's make sure stuff got set
33 | assert.Equal("deployment1", agent.server.Name)
34 | assert.NotEqual("", agent.server.Hostname)
35 | assert.NotEqual("", agent.server.Uname)
36 | assert.NotEqual("", agent.server.Distro)
37 | assert.NotEqual("", agent.server.Ip)
38 |
39 | // let's ensure our server is unregistered
40 | agent.server.UUID = ""
41 |
42 | assert.Equal(true, agent.FirstRun())
43 |
44 | agent.RegisterServer()
45 |
46 | // registering the server actually set the right val
47 | assert.Equal(serverUUID, agent.server.UUID)
48 |
49 | // Let's ensure that the client gets exercised.
50 | agent.BuildAndSyncWatchers()
51 | agent.StartPolling()
52 |
53 | // force a change in the process table
54 | proc := startProcess(assert)
55 | defer proc.Kill()
56 |
57 | agent.Heartbeat()
58 |
59 | // after a period of time, we sync all files
60 | agent.SyncAllFiles()
61 |
62 | // the filewatcher needs enough time to
63 | // actually be able to start watching
64 | // the file. This is clunky, but less clunky
65 | // than hacking some channel into this.
66 | <-time.After(200 * time.Millisecond)
67 | // close the hooks before asserting expectations
68 | // since the SendFiles happen in a go routine
69 | defer agent.CloseWatches()
70 | defer client.AssertExpectations(t)
71 | }
72 |
73 | func startProcess(assert *assert.Assertions) *os.Process {
74 | script := conf.DEV_CONF_PATH + "/pointless"
75 |
76 | cmd := exec.Command(script)
77 | err := cmd.Start()
78 | assert.Nil(err)
79 |
80 | return cmd.Process
81 | }
82 |
--------------------------------------------------------------------------------
/agent/client.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "bytes"
5 | "encoding/base64"
6 | "encoding/json"
7 | "errors"
8 | "fmt"
9 | "hash/crc32"
10 | "io/ioutil"
11 | "net/http"
12 | "time"
13 |
14 | _ "crypto/sha512"
15 | //http://bridge.grumpy-troll.org/2014/05/golang-tls-comodo/
16 |
17 | "github.com/appcanary/agent/conf"
18 | "github.com/cenkalti/backoff"
19 | )
20 |
21 | var (
22 | ErrApi = errors.New("api error")
23 | ErrDeprecated = errors.New("api deprecated")
24 | )
25 |
26 | type Client interface {
27 | Heartbeat(string, Watchers) error
28 | SendFile(string, string, []byte) error
29 | SendProcessState(string, []byte) error
30 | CreateServer(*Server) (string, error)
31 | FetchUpgradeablePackages() (map[string]string, error)
32 | }
33 |
34 | type CanaryClient struct {
35 | apiKey string
36 | server *Server
37 | }
38 |
39 | func NewClient(apiKey string, server *Server) *CanaryClient {
40 | client := &CanaryClient{apiKey: apiKey, server: server}
41 | return client
42 | }
43 |
44 | func (client *CanaryClient) Heartbeat(uuid string, files Watchers) error {
45 | log := conf.FetchLog()
46 |
47 | body, err := json.Marshal(map[string]interface{}{
48 | "files": files,
49 | "agent-version": CanaryVersion,
50 | "distro": client.server.Distro,
51 | "release": client.server.Release,
52 | "tags": client.server.Tags,
53 | })
54 |
55 | if err != nil {
56 | return err
57 | }
58 |
59 | // TODO SANITIZE UUID input cos this feels abusable
60 | respBody, err := client.post(conf.ApiHeartbeatPath(uuid), body)
61 |
62 | if err != nil {
63 | return err
64 | }
65 |
66 | type heartbeatResponse struct {
67 | Heartbeat time.Time
68 | }
69 |
70 | var t heartbeatResponse
71 |
72 | // TODO: do something with heartbeat resp
73 | err = json.Unmarshal(respBody, &t)
74 | log.Debug(fmt.Sprintf("Heartbeat: %s", t.Heartbeat))
75 | if err != nil {
76 | return err
77 | }
78 |
79 | return nil
80 | }
81 |
82 | func (client *CanaryClient) SendFile(path string, kind string, contents []byte) error {
83 | // Compute checksum of the file (not base64 encoding)
84 | crc := crc32.ChecksumIEEE(contents)
85 | // File needs to be sent base64 encoded
86 | b64buffer := new(bytes.Buffer)
87 | b64enc := base64.NewEncoder(base64.StdEncoding, b64buffer)
88 | b64enc.Write(contents)
89 | b64enc.Close()
90 |
91 | file_json, err := json.Marshal(map[string]interface{}{
92 | "name": "",
93 | "path": path,
94 | "kind": kind,
95 | "contents": string(b64buffer.Bytes()),
96 | "crc": crc,
97 | })
98 |
99 | if err != nil {
100 | return err
101 | }
102 |
103 | _, err = client.put(conf.ApiServerPath(client.server.UUID), file_json)
104 |
105 | return err
106 | }
107 |
108 | func (client *CanaryClient) SendProcessState(match string, body []byte) error {
109 | // match is unused for now - should it get shipped?
110 | _, err := client.put(conf.ApiServerProcsPath(client.server.UUID), body)
111 | return err
112 | }
113 |
114 | func (c *CanaryClient) CreateServer(srv *Server) (string, error) {
115 | body, err := json.Marshal(*srv)
116 |
117 | if err != nil {
118 | return "", err
119 | }
120 |
121 | respBody, err := c.post(conf.ApiServersPath(), body)
122 | if err != nil {
123 | return "", err
124 | }
125 |
126 | var respServer struct {
127 | UUID string `json:"uuid"`
128 | }
129 |
130 | json.Unmarshal(respBody, &respServer)
131 | return respServer.UUID, nil
132 | }
133 |
134 | func (client *CanaryClient) FetchUpgradeablePackages() (map[string]string, error) {
135 | respBody, err := client.get(conf.ApiServerPath(client.server.UUID))
136 |
137 | if err != nil {
138 | return nil, err
139 | }
140 |
141 | var packageList map[string]string
142 | err = json.Unmarshal(respBody, &packageList)
143 |
144 | if err != nil {
145 | return nil, err
146 | }
147 |
148 | return packageList, nil
149 | }
150 |
151 | func (client *CanaryClient) post(rPath string, body []byte) ([]byte, error) {
152 | return client.send("POST", rPath, body)
153 | }
154 |
155 | func (client *CanaryClient) put(rPath string, body []byte) ([]byte, error) {
156 | return client.send("PUT", rPath, body)
157 | }
158 |
159 | func (client *CanaryClient) get(rPath string) ([]byte, error) {
160 | return client.send("GET", rPath, []byte{})
161 | }
162 |
163 | func (c *CanaryClient) send(method string, uri string, body []byte) ([]byte, error) {
164 | log := conf.FetchLog()
165 |
166 | client := &http.Client{}
167 | req, err := http.NewRequest(method, uri, bytes.NewBuffer(body))
168 | if err != nil {
169 | return nil, err
170 | }
171 |
172 | // Ahem, http://stackoverflow.com/questions/17714494/golang-http-request-results-in-eof-errors-when-making-multiple-requests-successi
173 | req.Close = true
174 |
175 | req.Header.Add("Content-Type", "application/json")
176 | req.Header.Add("Authorization", "Token "+c.apiKey)
177 |
178 | var res *http.Response
179 |
180 | // if the request fails for whatever reason, keep
181 | // trying to reach the server
182 | err = backoff.Retry(func() error {
183 | log.Debugf("Request: %s %s", method, uri)
184 | res, err = client.Do(req)
185 | if err != nil {
186 | log.Errorf("Error in request %s", err)
187 | }
188 |
189 | return err
190 | }, backoff.NewExponentialBackOff())
191 |
192 | if err != nil {
193 | log.Debug("Do err: ", err.Error())
194 | return nil, err
195 | }
196 |
197 | defer res.Body.Close()
198 |
199 | if res.StatusCode < 200 || res.StatusCode > 299 {
200 | errorstr := fmt.Sprintf("API Error: %d %s", res.StatusCode, uri)
201 | if res.StatusCode == 401 {
202 | log.Fatal("Please double check your settings: " + errorstr)
203 | } else {
204 | return nil, errors.New(errorstr)
205 | }
206 | }
207 |
208 | respBody, err := ioutil.ReadAll(res.Body)
209 | if err != nil {
210 | return respBody, err
211 | }
212 |
213 | return respBody, nil
214 | }
215 |
--------------------------------------------------------------------------------
/agent/client_test.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "encoding/json"
5 | "io/ioutil"
6 | "net/http"
7 | "net/http/httptest"
8 | "os/exec"
9 | "testing"
10 | "time"
11 |
12 | "github.com/appcanary/agent/conf"
13 | "github.com/appcanary/testify/suite"
14 | )
15 |
16 | type TestJsonRequest map[string]interface{}
17 |
18 | type ClientTestSuite struct {
19 | suite.Suite
20 | apiKey string
21 | serverUUID string
22 | files Watchers
23 | client Client
24 | }
25 |
26 | func TestClient(t *testing.T) {
27 | suite.Run(t, new(ClientTestSuite))
28 | }
29 |
30 | func (t *ClientTestSuite) SetupTest() {
31 | conf.InitEnv("test")
32 | t.apiKey = "my api key"
33 | t.serverUUID = "server uuid"
34 |
35 | dpkgPath := conf.DEV_CONF_PATH + "/dpkg/available"
36 | dpkgFile := NewFileWatcher(dpkgPath, testCallbackNOP)
37 |
38 | gemfilePath := conf.DEV_CONF_PATH + "/Gemfile.lock"
39 | gemfile := NewFileWatcher(gemfilePath, testCallbackNOP)
40 |
41 | t.files = Watchers{dpkgFile, gemfile}
42 |
43 | t.client = NewClient(t.apiKey, &Server{
44 | UUID: t.serverUUID,
45 | Tags: []string{"dogs", "webserver"},
46 | })
47 | }
48 |
49 | func (t *ClientTestSuite) TestHeartbeat() {
50 | env := conf.FetchEnv()
51 | serverInvoked := false
52 | time.Sleep(conf.TEST_POLL_SLEEP)
53 | ts := testServer(t, "POST", "{\"success\": true}", func(r *http.Request, rBody TestJsonRequest) {
54 | serverInvoked = true
55 |
56 | t.Equal("Token "+t.apiKey, r.Header.Get("Authorization"), "heartbeat api key")
57 |
58 | jsonFiles := rBody["files"].([]interface{})
59 |
60 | // does the json we send look roughly like
61 | // it's supposed to?
62 | t.NotNil(jsonFiles)
63 | t.Equal(2, len(jsonFiles))
64 | monitoredFile := jsonFiles[0].(map[string]interface{})
65 |
66 | t.Equal("ubuntu", monitoredFile["kind"])
67 | t.NotNil(monitoredFile["path"])
68 | t.NotEqual("", monitoredFile["path"])
69 | t.NotNil(monitoredFile["updated-at"])
70 | t.NotEqual("", monitoredFile["updated-at"])
71 | t.Equal(true, monitoredFile["being-watched"])
72 |
73 | monitoredFile2 := jsonFiles[1].(map[string]interface{})
74 |
75 | t.Equal("gemfile", monitoredFile2["kind"])
76 | t.NotNil(monitoredFile2["path"])
77 | t.NotEqual("", monitoredFile2["path"])
78 | t.NotNil(monitoredFile2["updated-at"])
79 | t.NotEqual("", monitoredFile2["updated-at"])
80 | t.Equal(true, monitoredFile2["being-watched"])
81 |
82 | if rBody["tags"] == nil {
83 | t.Fail("tags should not be nil")
84 | } else {
85 | jsonTags := rBody["tags"].([]interface{})
86 | t.Equal("dogs", jsonTags[0].(string))
87 | t.Equal("webserver", jsonTags[1].(string))
88 | }
89 | })
90 |
91 | // the client uses BaseUrl to set up queries.
92 | env.BaseUrl = ts.URL
93 |
94 | // actual test execution
95 | t.client.Heartbeat(t.serverUUID, t.files)
96 |
97 | ts.Close()
98 | t.files[0].Stop()
99 | t.True(serverInvoked)
100 | }
101 |
102 | func (t *ClientTestSuite) TestSendProcessState() {
103 | env := conf.FetchEnv()
104 |
105 | serverInvoked := false
106 | ts := testServer(t, "PUT", "OK", func(r *http.Request, rBody TestJsonRequest) {
107 | serverInvoked = true
108 |
109 | t.Equal("Token "+t.apiKey, r.Header.Get("Authorization"), "heartbeat api key")
110 |
111 | // TODO Test what was received
112 | })
113 |
114 | env.BaseUrl = ts.URL
115 | script := conf.DEV_CONF_PATH + "/pointless"
116 |
117 | cmd := exec.Command(script)
118 | err := cmd.Start()
119 | t.Nil(err)
120 |
121 | defer cmd.Process.Kill()
122 |
123 | done := make(chan bool)
124 |
125 | watcher := NewProcessWatcher("pointless", func(w Watcher) {
126 | wt := w.(ProcessWatcher)
127 | jsonBytes := wt.StateJson()
128 | t.NotNil(jsonBytes)
129 |
130 | var pm map[string]interface{}
131 | json.Unmarshal(jsonBytes, &pm)
132 |
133 | server := pm["server"]
134 | t.NotNil(server)
135 |
136 | serverM := server.(map[string]interface{})
137 |
138 | processMap := serverM["system_state"]
139 | t.NotNil(processMap)
140 |
141 | processMapM := processMap.(map[string]interface{})
142 |
143 | processes := processMapM["processes"]
144 | t.NotNil(processes)
145 |
146 | processesS := processes.([]interface{})
147 |
148 | var watchedProc map[string]interface{}
149 | for _, proc := range processesS {
150 | procM := proc.(map[string]interface{})
151 | if int(procM["pid"].(float64)) == cmd.Process.Pid {
152 | watchedProc = procM
153 | break
154 | }
155 | }
156 |
157 | t.NotNil(watchedProc)
158 | t.Equal(false, watchedProc["outdated"])
159 | t.NotNil(watchedProc["libraries"])
160 | t.NotNil(watchedProc["started"])
161 |
162 | // Note this will fail if `dpkg` is unavailable
163 | if len(watchedProc["libraries"].([]interface{})) == 0 {
164 | t.Fail("No libraries were found - could be dpkg is not installed?")
165 | }
166 | done <- true
167 | })
168 |
169 | t.NotNil(watcher.(ProcessWatcher))
170 |
171 | // kick things off
172 | watcher.Start()
173 | defer watcher.Stop()
174 |
175 | <-done // wait
176 | }
177 |
178 | func (t *ClientTestSuite) TestSendFile() {
179 | env := conf.FetchEnv()
180 | testFilePath := "/var/foo/whatever"
181 |
182 | serverInvoked := false
183 | ts := testServer(t, "PUT", "OK", func(r *http.Request, rBody TestJsonRequest) {
184 | serverInvoked = true
185 |
186 | t.Equal("Token "+t.apiKey, r.Header.Get("Authorization"), "heartbeat api key")
187 |
188 | json := rBody
189 |
190 | t.Equal("", json["name"])
191 | t.Equal(testFilePath, json["path"])
192 | t.Equal("gemfile", json["kind"])
193 | t.NotEqual("", json["contents"])
194 |
195 | })
196 |
197 | env.BaseUrl = ts.URL
198 |
199 | contents, _ := t.files[0].(TextWatcher).Contents()
200 | t.client.SendFile(testFilePath, "gemfile", contents)
201 |
202 | ts.Close()
203 | t.True(serverInvoked)
204 | }
205 |
206 | func (t *ClientTestSuite) TestCreateServer() {
207 | env := conf.FetchEnv()
208 |
209 | server := NewServer(&conf.Conf{Tags: []string{"dogs", "webserver"}}, &conf.ServerConf{})
210 |
211 | testUUID := "12345"
212 | jsonResponse := "{\"uuid\":\"" + testUUID + "\"}"
213 | serverInvoked := false
214 |
215 | ts := testServer(t, "POST", jsonResponse, func(r *http.Request, rBody TestJsonRequest) {
216 | serverInvoked = true
217 |
218 | t.Equal("Token "+t.apiKey, r.Header.Get("Authorization"), "heartbeat api key")
219 |
220 | json := rBody
221 |
222 | t.Equal(server.Hostname, json["hostname"])
223 | t.Equal(server.Uname, json["uname"])
224 | t.Equal(server.Ip, json["ip"])
225 | t.Nil(json["uuid"])
226 |
227 | if json["tags"] == nil {
228 | t.Fail("tags should not be nil")
229 | } else {
230 | tags := json["tags"].([]interface{})
231 | t.Equal(server.Tags[0], tags[0].(string))
232 | t.Equal(server.Tags[1], tags[1].(string))
233 | }
234 | })
235 |
236 | env.BaseUrl = ts.URL
237 | responseUUID, _ := t.client.CreateServer(server)
238 | ts.Close()
239 | t.True(serverInvoked)
240 | t.Equal(testUUID, responseUUID)
241 | }
242 |
243 | func (t *ClientTestSuite) TestFetchUpgradeablePackages() {
244 | env := conf.FetchEnv()
245 |
246 | jsonResponse := "{\"libkrb5-3\":\"1.12+dfsg-2ubuntu5.2\",\"isc-dhcp-client\":\"4.2.4-7ubuntu12.4\"}"
247 | serverInvoked := false
248 | ts := testServerSansInput(t, "GET", jsonResponse, func(r *http.Request, rBody TestJsonRequest) {
249 | serverInvoked = true
250 |
251 | t.Equal("Token "+t.apiKey, r.Header.Get("Authorization"), "heartbeat api key")
252 | })
253 |
254 | env.BaseUrl = ts.URL
255 | packageList, _ := t.client.FetchUpgradeablePackages()
256 | ts.Close()
257 |
258 | t.Equal("1.12+dfsg-2ubuntu5.2", packageList["libkrb5-3"])
259 | t.True(serverInvoked)
260 | }
261 |
262 | func testCallbackNOP(foo Watcher) {
263 | // NOP
264 | }
265 |
266 | //Sends an http.ResponseWriter a string and status
267 | func tsrespond(w http.ResponseWriter, status int, v string) {
268 | w.Header().Set("Content-Type", "text/plain")
269 | w.WriteHeader(status)
270 | w.Write([]byte(v))
271 | }
272 |
273 | func testServer(assert *ClientTestSuite, method string, respondWithBody string, callback func(*http.Request, TestJsonRequest)) *httptest.Server {
274 | ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
275 | assert.Equal(method, r.Method, "method")
276 | assert.Equal("application/json", r.Header.Get("Content-Type"), "content type")
277 |
278 | body, _ := ioutil.ReadAll(r.Body)
279 | r.Body.Close()
280 |
281 | var datBody TestJsonRequest
282 | if err := json.Unmarshal(body, &datBody); err != nil {
283 | panic(err)
284 | }
285 |
286 | callback(r, datBody)
287 | tsrespond(w, 200, respondWithBody)
288 | }))
289 |
290 | return ts
291 | }
292 |
293 | func testServerSansInput(assert *ClientTestSuite, method string, respondWithBody string, callback func(*http.Request, TestJsonRequest)) *httptest.Server {
294 | ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
295 | assert.Equal(method, r.Method, "method")
296 | assert.Equal("application/json", r.Header.Get("Content-Type"), "content type")
297 |
298 | body, _ := ioutil.ReadAll(r.Body)
299 | r.Body.Close()
300 |
301 | var datBody TestJsonRequest
302 | if len(body) > 0 {
303 | if err := json.Unmarshal(body, &datBody); err != nil {
304 | panic(err)
305 | }
306 | }
307 |
308 | callback(r, datBody)
309 | tsrespond(w, 200, respondWithBody)
310 | }))
311 |
312 | return ts
313 | }
314 |
315 | // TODO: handle pathological cases, error handling?
316 |
--------------------------------------------------------------------------------
/agent/detect/bindata.go:
--------------------------------------------------------------------------------
1 | // Code generated by go-bindata.
2 | // sources:
3 | // agent/resources/detect_linux.sh
4 | // DO NOT EDIT!
5 |
6 | package detect
7 |
8 | import (
9 | "bytes"
10 | "compress/gzip"
11 | "fmt"
12 | "io"
13 | "io/ioutil"
14 | "os"
15 | "path/filepath"
16 | "strings"
17 | "time"
18 | )
19 |
20 | func bindataRead(data []byte, name string) ([]byte, error) {
21 | gz, err := gzip.NewReader(bytes.NewBuffer(data))
22 | if err != nil {
23 | return nil, fmt.Errorf("Read %q: %v", name, err)
24 | }
25 |
26 | var buf bytes.Buffer
27 | _, err = io.Copy(&buf, gz)
28 | clErr := gz.Close()
29 |
30 | if err != nil {
31 | return nil, fmt.Errorf("Read %q: %v", name, err)
32 | }
33 | if clErr != nil {
34 | return nil, err
35 | }
36 |
37 | return buf.Bytes(), nil
38 | }
39 |
40 | type asset struct {
41 | bytes []byte
42 | info os.FileInfo
43 | }
44 |
45 | type bindataFileInfo struct {
46 | name string
47 | size int64
48 | mode os.FileMode
49 | modTime time.Time
50 | }
51 |
52 | func (fi bindataFileInfo) Name() string {
53 | return fi.name
54 | }
55 | func (fi bindataFileInfo) Size() int64 {
56 | return fi.size
57 | }
58 | func (fi bindataFileInfo) Mode() os.FileMode {
59 | return fi.mode
60 | }
61 | func (fi bindataFileInfo) ModTime() time.Time {
62 | return fi.modTime
63 | }
64 | func (fi bindataFileInfo) IsDir() bool {
65 | return false
66 | }
67 | func (fi bindataFileInfo) Sys() interface{} {
68 | return nil
69 | }
70 |
71 | var _agentResourcesDetect_linuxSh = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xac\x56\xdf\x4f\xdb\x30\x10\x7e\xcf\x5f\x71\x2b\x15\x69\x25\x42\x28\x6c\xf0\x30\xf1\xc0\x44\x27\x55\x9a\x36\x09\xd0\x5e\x10\xaa\x5c\xe7\x42\x3c\x52\xbb\xb3\xdd\x06\xd6\xf5\x7f\xdf\xe5\x47\x9b\x1f\x84\x95\x8e\xbd\x25\xbe\xcf\xdf\x7d\xfe\xee\xce\xf2\xde\x3b\x7f\x6e\xb4\x3f\x11\xd2\x47\xb9\x80\x09\x33\x91\x63\xd0\x82\xf7\x88\x0a\x66\x62\x86\x21\x13\xb1\xe3\xcc\xe5\x83\x54\x89\x1c\x2b\xd3\xeb\xc3\xd2\x01\x40\x1e\x29\xf0\x24\x14\x01\x67\xe5\x38\x33\x2d\xa4\x1d\x2f\x50\x1b\xa1\x64\x01\x8b\x15\x67\x31\x04\xc2\x58\xad\xce\xbb\xcb\xc1\x0a\x34\xc6\xc8\x0c\xd2\xcf\xf1\xaa\xc2\xd3\x5d\xe6\xa0\x95\xdf\x5d\x16\x90\x2c\xfc\x28\x2c\x1c\xa5\xec\x01\x5a\xe4\x76\x1c\x0b\x39\x7f\xcc\xc8\x29\xba\x07\x37\x11\x1a\x04\x9b\x28\xe0\x4a\x5a\x26\x24\xa4\xa7\x58\x30\x2d\xd8\x24\x46\x73\x00\x93\xb9\x05\x1b\xe1\x13\x4c\xd9\x13\x48\x65\x21\x62\x0b\x4c\x57\x40\x49\x34\x90\x20\x48\xc4\xe0\x00\x8c\xca\xf8\xe8\x3f\x56\x2c\x48\x01\x53\x20\xb6\x50\x68\x63\x81\xc9\x6c\x45\x02\x8f\x90\x3f\x80\x08\x53\xdc\xbd\xca\x98\xcb\x64\xe9\x62\xc2\xa4\xc5\x80\xa8\x08\x73\x7b\x0b\x1e\x82\x8f\x96\xfb\xca\x78\xc5\xa1\xe0\xee\xee\x63\xc6\x45\x18\xa0\xac\x73\xcd\x9f\x61\x28\x14\x8a\x67\x1c\xb1\x99\x6c\x27\xa9\x80\x72\x96\x0d\x4d\x8f\x4c\xee\x74\x97\xa3\xcb\x55\xa7\x0f\xfb\xfb\xeb\xff\xef\xc3\xab\xeb\xd1\xb7\xaf\xe3\x6c\xbd\x46\xbb\x29\x1a\x85\xb2\x85\xb2\x72\x95\x4d\x4e\x16\xaa\x95\xbe\xac\x25\x54\x6a\x99\x16\x33\xae\x49\xb9\x1c\x5d\xdf\x5c\x8d\x3e\x8d\x9b\x92\xd6\xeb\x57\xc3\x2f\xc3\x8b\xeb\xe1\x8b\xba\x2a\xfb\x1b\xfa\x9a\x0c\x3b\x8b\x4c\x22\xc1\x23\x20\x33\xc7\x45\xa8\x25\x7f\xaf\x12\x06\x4f\xc0\x6f\xe0\xd4\x6b\x5e\x78\xdc\xaf\xab\xa9\xe3\x74\x15\xf7\x6a\x59\x15\xf3\xd6\xed\x10\xe0\x44\x30\xb9\xd9\x57\xb3\x68\x8f\x7a\x62\x8a\x70\x99\x41\x4c\xde\xf2\x3f\xd0\x18\x81\xbe\x11\x41\xda\xd7\x04\x15\xba\x8d\xa8\xd8\x4f\x06\xc4\x34\x22\x04\xd3\xc5\x7e\xf7\xf4\xf0\xe8\xf0\xcc\xad\x5b\xc0\x99\xcd\x49\x84\x31\x73\xa4\xa3\x45\x48\xc3\xe3\x0d\xe8\x8b\x25\x0f\xe0\x2e\xf3\xd3\x81\x55\xb1\x4a\x50\xf7\xba\x83\x3e\xac\xdc\xe2\xe0\x74\x9e\x7b\x8d\x33\xf0\x7e\x82\xeb\xbb\x6d\x62\x2a\x47\xaa\x1a\x9a\xf9\xe7\x05\xe4\xc8\x54\x58\xd4\xe7\xe9\x6e\x2f\x1c\xb4\x31\xe4\xb5\xc0\x38\x1b\x87\x2d\x2c\x87\x5b\x58\xf2\x69\xda\xb5\xd9\x37\x57\x80\x66\x3c\xc6\xf6\x09\x6e\x88\x0a\x3f\xd4\x85\x81\xdb\x4a\x91\x7b\xec\x7d\x86\x54\xf9\xc6\xea\xee\x20\x73\xb8\x52\x26\x57\xc5\xee\x5b\x94\x87\x18\x50\xe6\xd7\x29\x3f\x69\x57\x5e\xa7\xa8\xab\xcb\x63\x6f\x52\xa8\x31\x88\x98\x6d\x57\x98\x33\x8c\x23\xe2\xad\x36\x6c\x63\xcb\xb6\x86\x2d\xfa\x95\x72\x76\xd6\xa2\x32\xca\x55\x07\xce\xa1\xc3\x51\x5a\x65\x3a\xf5\xc4\x35\x73\x5e\x48\xdb\xc8\xda\x3d\xa1\x64\xdb\xeb\x5a\x7a\x97\x27\x76\x8b\x26\x7f\x59\xa0\xe1\x82\x90\x22\x14\xfc\x3f\x88\x7c\xbf\xa3\xc8\x32\xb9\xfb\x97\x69\xdc\xb5\x30\x67\xfd\x5c\x46\xde\x77\x83\xe6\x28\x37\x45\xe4\xb4\xa4\x03\x35\x11\x19\x7a\x35\x68\xea\x32\xf7\x5f\x07\x7b\x7d\x71\x5d\x4c\xd9\x2f\xc2\x96\x77\x60\xcb\x6c\xb8\xa7\xb5\x5b\xd3\x65\x49\x51\xb1\x57\xe4\x2b\x5e\x02\xe5\xf3\xab\xf9\x14\x72\xfe\x04\x00\x00\xff\xff\x91\x57\x92\xb5\xc1\x09\x00\x00")
72 |
73 | func agentResourcesDetect_linuxShBytes() ([]byte, error) {
74 | return bindataRead(
75 | _agentResourcesDetect_linuxSh,
76 | "agent/resources/detect_linux.sh",
77 | )
78 | }
79 |
80 | func agentResourcesDetect_linuxSh() (*asset, error) {
81 | bytes, err := agentResourcesDetect_linuxShBytes()
82 | if err != nil {
83 | return nil, err
84 | }
85 |
86 | info := bindataFileInfo{name: "agent/resources/detect_linux.sh", size: 2497, mode: os.FileMode(420), modTime: time.Unix(1480694563, 0)}
87 | a := &asset{bytes: bytes, info: info}
88 | return a, nil
89 | }
90 |
91 | // Asset loads and returns the asset for the given name.
92 | // It returns an error if the asset could not be found or
93 | // could not be loaded.
94 | func Asset(name string) ([]byte, error) {
95 | cannonicalName := strings.Replace(name, "\\", "/", -1)
96 | if f, ok := _bindata[cannonicalName]; ok {
97 | a, err := f()
98 | if err != nil {
99 | return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
100 | }
101 | return a.bytes, nil
102 | }
103 | return nil, fmt.Errorf("Asset %s not found", name)
104 | }
105 |
106 | // MustAsset is like Asset but panics when Asset would return an error.
107 | // It simplifies safe initialization of global variables.
108 | func MustAsset(name string) []byte {
109 | a, err := Asset(name)
110 | if err != nil {
111 | panic("asset: Asset(" + name + "): " + err.Error())
112 | }
113 |
114 | return a
115 | }
116 |
117 | // AssetInfo loads and returns the asset info for the given name.
118 | // It returns an error if the asset could not be found or
119 | // could not be loaded.
120 | func AssetInfo(name string) (os.FileInfo, error) {
121 | cannonicalName := strings.Replace(name, "\\", "/", -1)
122 | if f, ok := _bindata[cannonicalName]; ok {
123 | a, err := f()
124 | if err != nil {
125 | return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
126 | }
127 | return a.info, nil
128 | }
129 | return nil, fmt.Errorf("AssetInfo %s not found", name)
130 | }
131 |
132 | // AssetNames returns the names of the assets.
133 | func AssetNames() []string {
134 | names := make([]string, 0, len(_bindata))
135 | for name := range _bindata {
136 | names = append(names, name)
137 | }
138 | return names
139 | }
140 |
141 | // _bindata is a table, holding each asset generator, mapped to its name.
142 | var _bindata = map[string]func() (*asset, error){
143 | "agent/resources/detect_linux.sh": agentResourcesDetect_linuxSh,
144 | }
145 |
146 | // AssetDir returns the file names below a certain
147 | // directory embedded in the file by go-bindata.
148 | // For example if you run go-bindata on data/... and data contains the
149 | // following hierarchy:
150 | // data/
151 | // foo.txt
152 | // img/
153 | // a.png
154 | // b.png
155 | // then AssetDir("data") would return []string{"foo.txt", "img"}
156 | // AssetDir("data/img") would return []string{"a.png", "b.png"}
157 | // AssetDir("foo.txt") and AssetDir("notexist") would return an error
158 | // AssetDir("") will return []string{"data"}.
159 | func AssetDir(name string) ([]string, error) {
160 | node := _bintree
161 | if len(name) != 0 {
162 | cannonicalName := strings.Replace(name, "\\", "/", -1)
163 | pathList := strings.Split(cannonicalName, "/")
164 | for _, p := range pathList {
165 | node = node.Children[p]
166 | if node == nil {
167 | return nil, fmt.Errorf("Asset %s not found", name)
168 | }
169 | }
170 | }
171 | if node.Func != nil {
172 | return nil, fmt.Errorf("Asset %s not found", name)
173 | }
174 | rv := make([]string, 0, len(node.Children))
175 | for childName := range node.Children {
176 | rv = append(rv, childName)
177 | }
178 | return rv, nil
179 | }
180 |
181 | type bintree struct {
182 | Func func() (*asset, error)
183 | Children map[string]*bintree
184 | }
185 | var _bintree = &bintree{nil, map[string]*bintree{
186 | "agent": &bintree{nil, map[string]*bintree{
187 | "resources": &bintree{nil, map[string]*bintree{
188 | "detect_linux.sh": &bintree{agentResourcesDetect_linuxSh, map[string]*bintree{}},
189 | }},
190 | }},
191 | }}
192 |
193 | // RestoreAsset restores an asset under the given directory
194 | func RestoreAsset(dir, name string) error {
195 | data, err := Asset(name)
196 | if err != nil {
197 | return err
198 | }
199 | info, err := AssetInfo(name)
200 | if err != nil {
201 | return err
202 | }
203 | err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
204 | if err != nil {
205 | return err
206 | }
207 | err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
208 | if err != nil {
209 | return err
210 | }
211 | err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
212 | if err != nil {
213 | return err
214 | }
215 | return nil
216 | }
217 |
218 | // RestoreAssets restores an asset under the given directory recursively
219 | func RestoreAssets(dir, name string) error {
220 | children, err := AssetDir(name)
221 | // File
222 | if err != nil {
223 | return RestoreAsset(dir, name)
224 | }
225 | // Dir
226 | for _, child := range children {
227 | err = RestoreAssets(dir, filepath.Join(name, child))
228 | if err != nil {
229 | return err
230 | }
231 | }
232 | return nil
233 | }
234 |
235 | func _filePath(dir, name string) string {
236 | cannonicalName := strings.Replace(name, "\\", "/", -1)
237 | return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
238 | }
239 |
240 |
--------------------------------------------------------------------------------
/agent/detect/detect.go:
--------------------------------------------------------------------------------
1 | package detect
2 |
3 | import (
4 | "bytes"
5 | "errors"
6 | "os/exec"
7 | "strings"
8 | )
9 |
10 | type LinuxOSInfo struct {
11 | Distro string `yaml:"distro,omitempty" toml:"distro"`
12 | Release string `yaml:"release,omitempty" toml:"release"`
13 | }
14 |
15 | func loadScript() []byte {
16 | data, err := Asset("agent/resources/detect_linux.sh")
17 | if err != nil {
18 | panic("Can't find detect_linux.sh - something is *wrong*.")
19 | }
20 | return data
21 | }
22 |
23 | func run(c *exec.Cmd) (string, error) {
24 | if c.Stdout != nil {
25 | return "", errors.New("exec: Stdout already set")
26 | }
27 | if c.Stderr != nil {
28 | return "", errors.New("exec: Stderr already set")
29 | }
30 | var stdout, stderr bytes.Buffer
31 | c.Stdout = &stdout
32 | c.Stderr = &stderr
33 |
34 | err := c.Run()
35 | if err != nil {
36 | return "Detection failed:\n" + string(stderr.Bytes()), err
37 | } else {
38 | return string(stdout.Bytes()), err
39 | }
40 | }
41 |
42 | func parseDetectionString(str string) (*LinuxOSInfo, error) {
43 | if str == "unknown" {
44 | return nil, errors.New("Unknown linux distro")
45 | } else {
46 | str = strings.ToLower(str)
47 | splat := strings.Split(str, "/")
48 | return &LinuxOSInfo{Distro: splat[0], Release: splat[1]}, nil
49 | }
50 | }
51 |
52 | func DetectOS() (*LinuxOSInfo, error) {
53 | script := string(loadScript())
54 |
55 | _, err := exec.LookPath("bash")
56 | if err != nil {
57 | return nil, errors.New("Can't find bash.")
58 | }
59 |
60 | cmd := exec.Command("bash", "-c", script)
61 | output, err := run(cmd)
62 | if err != nil {
63 | return nil, errors.New(output)
64 | }
65 | return parseDetectionString(output)
66 |
67 | }
68 |
--------------------------------------------------------------------------------
/agent/mocks.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "github.com/appcanary/testify/mock"
5 | )
6 |
7 | type MockClient struct {
8 | mock.Mock
9 | }
10 |
11 | func (m *MockClient) Heartbeat(_a0 string, _a1 Watchers) error {
12 | ret := m.Called()
13 |
14 | r0 := ret.Error(0)
15 |
16 | return r0
17 | }
18 |
19 | func (m *MockClient) SendFile(_a0 string, _a1 string, _a2 []byte) error {
20 | ret := m.Called()
21 |
22 | r0 := ret.Error(0)
23 |
24 | return r0
25 | }
26 |
27 | func (m *MockClient) SendProcessState(_a0 string, _a1 []byte) error {
28 | return m.Called().Error(0)
29 | }
30 |
31 | func (m *MockClient) CreateServer(_a0 *Server) (string, error) {
32 | return m.Called().String(0), nil
33 | }
34 |
35 | func (m *MockClient) FetchUpgradeablePackages() (map[string]string, error) {
36 | ret := m.Called()
37 |
38 | var r0 map[string]string
39 | if ret.Get(0) != nil {
40 | r0 = ret.Get(0).(map[string]string)
41 | }
42 | r1 := ret.Error(1)
43 |
44 | return r0, r1
45 | }
46 |
--------------------------------------------------------------------------------
/agent/processwatcher.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "hash/crc32"
7 | "os"
8 | "strings"
9 | "sync"
10 | "time"
11 |
12 | "github.com/appcanary/agent/conf"
13 | "github.com/appcanary/libspector"
14 | )
15 |
16 | type ProcessWatcher interface {
17 | Start()
18 | Stop()
19 | Match() string
20 | StateJson() []byte
21 | }
22 |
23 | // Types
24 |
25 | // Singleton control object
26 | type processWatcher struct {
27 | sync.Mutex
28 | keepPolling bool
29 | UpdatedAt time.Time
30 | OnChange ChangeHandler
31 | pollSleep time.Duration
32 | BeingWatched bool
33 | match string
34 | stateJson []byte
35 | checksum uint32
36 | }
37 |
38 | // process objects with references to systemLibraries
39 | type systemProcesses []watchedProcess
40 |
41 | // indexed list of libraries
42 | type systemLibrary struct {
43 | Path string
44 | PackageName string
45 | PackageVersion string
46 | Modified time.Time
47 | }
48 |
49 | // "map" in the colloquial sense
50 | type systemState struct {
51 | processes systemProcesses
52 | libraries processLibraryMap
53 | }
54 |
55 | type processLibraryMap map[string]systemLibrary
56 |
57 | type watchedProcess struct {
58 | ProcessStartedAt time.Time
59 | ProcessLibraries []processLibrary
60 | Outdated bool
61 | Pid int
62 | CommandName string
63 | CommandArgs string
64 | }
65 |
66 | type processLibrary struct {
67 | libraryPath string // point to a systemLibrary
68 | Outdated bool
69 | }
70 |
71 | func (ss *systemState) MarshalJSON() ([]byte, error) {
72 | libraries := make(map[string]interface{}, len(ss.libraries))
73 | for path, lib := range ss.libraries {
74 | libraries[path] = libToMap(lib)
75 | }
76 |
77 | processes := make([]map[string]interface{}, len(ss.processes))
78 | for i, proc := range ss.processes {
79 | processes[i] = map[string]interface{}{
80 | "started": proc.ProcessStartedAt,
81 | "libraries": procLibsToMapArray(proc.ProcessLibraries),
82 | "outdated": proc.Outdated,
83 | "pid": proc.Pid,
84 | "name": proc.CommandName,
85 | "args": proc.CommandArgs,
86 | }
87 | }
88 |
89 | return json.Marshal(map[string]interface{}{
90 | "processes": processes,
91 | "libraries": libraries,
92 | })
93 | }
94 |
95 | func procLibsToMapArray(libs []processLibrary) []map[string]interface{} {
96 | procLibs := make([]map[string]interface{}, len(libs))
97 |
98 | for i, lib := range libs {
99 | procLibs[i] = map[string]interface{}{
100 | "outdated": lib.Outdated,
101 | "library_path": lib.libraryPath,
102 | }
103 | }
104 |
105 | return procLibs
106 | }
107 |
108 | func libToMap(lib systemLibrary) map[string]interface{} {
109 | return map[string]interface{}{
110 | "path": lib.Path,
111 | "modified": lib.Modified,
112 | "package_name": lib.PackageName,
113 | "package_version": lib.PackageVersion,
114 | }
115 | }
116 |
117 | func (pw *processWatcher) acquireState() *systemState {
118 | log := conf.FetchLog()
119 |
120 | lsProcs, err := pw.processes()
121 | if err != nil {
122 | log.Fatalf("Couldn't load processes: %s", err)
123 | }
124 |
125 | ss := systemState{
126 | processes: make(systemProcesses, 0, len(lsProcs)),
127 | libraries: make(processLibraryMap, 0), // ¯\_(ツ)_/¯
128 | }
129 |
130 | rejects := map[string]bool{}
131 |
132 | for _, lsProc := range lsProcs {
133 | started, err := lsProc.Started()
134 | if err != nil {
135 | log.Debugf("PID %d is not running, skipping", lsProc.PID())
136 | continue
137 | }
138 |
139 | commandArgs, err := lsProc.CommandArgs()
140 | if err != nil {
141 | log.Debugf("Can't read command line for PID %d: %v", lsProc.PID(), err)
142 | // fall through, we can live without this (?)
143 | }
144 |
145 | commandName, err := lsProc.CommandName()
146 | if err != nil {
147 | log.Debugf("Can't read command line for PID %d: %v", lsProc.PID(), err)
148 | // fall through, we can live without this (?)
149 | }
150 |
151 | spectorLibs, err := lsProc.Libraries()
152 | if err != nil {
153 | if os.Getuid() != 0 && os.Geteuid() != 0 {
154 | log.Debugf("Cannot examine libs for PID %d, with UID:%d, EUID:%d",
155 | lsProc.PID(), os.Getuid(), os.Geteuid())
156 | continue
157 | }
158 |
159 | if strings.Contains(err.Error(), "42") {
160 | // process went away
161 | log.Debugf("Cannot examine libs for PID %d, process disappeared",
162 | lsProc.PID())
163 | continue
164 | }
165 |
166 | // otherwise barf
167 | log.Fatalf("Couldn't load libs for process %v: %s", lsProc, err)
168 | }
169 |
170 | wp := watchedProcess{
171 | ProcessStartedAt: started,
172 | Pid: lsProc.PID(),
173 | ProcessLibraries: make([]processLibrary, 0, len(spectorLibs)),
174 | Outdated: false,
175 | CommandName: commandName,
176 | CommandArgs: commandArgs,
177 | }
178 |
179 | for _, spectorLib := range spectorLibs {
180 | path := spectorLib.Path()
181 | if rejects[path] {
182 | // logSDebugf("Already rejected %v", path)
183 | continue
184 | }
185 |
186 | if _, ok := ss.libraries[path]; !ok {
187 | sysLib, err := NewSystemLibrary(spectorLib)
188 | if err != nil {
189 | // log.Debugf("error introspecting system lib %s, %v; removing...", path, err)
190 | rejects[path] = true
191 | continue
192 | }
193 |
194 | ss.libraries[path] = sysLib
195 | }
196 |
197 | lib := processLibrary{
198 | libraryPath: path,
199 | Outdated: spectorLib.Outdated(lsProc),
200 | }
201 |
202 | if lib.Outdated && !wp.Outdated {
203 | wp.Outdated = true
204 | }
205 |
206 | wp.ProcessLibraries = append(wp.ProcessLibraries, lib)
207 | }
208 |
209 | ss.processes = append(ss.processes, wp)
210 | }
211 |
212 | return &ss
213 | }
214 |
215 | func NewSystemLibrary(lib libspector.Library) (sysLib systemLibrary, err error) {
216 | path := lib.Path()
217 |
218 | modified, err := lib.Ctime()
219 | if err != nil {
220 | return
221 | }
222 |
223 | pkg, err := lib.Package()
224 | if err != nil {
225 | return
226 | }
227 |
228 | pkgName := pkg.Name()
229 | pkgVersion := pkg.Version()
230 |
231 | sysLib = systemLibrary{
232 | Path: path,
233 | Modified: modified,
234 | PackageName: pkgName,
235 | PackageVersion: pkgVersion,
236 | }
237 |
238 | return
239 | }
240 |
241 | func NewProcessWatcher(match string, callback ChangeHandler) Watcher {
242 | env := conf.FetchEnv()
243 |
244 | watcher := &processWatcher{
245 | match: match,
246 | OnChange: callback,
247 | UpdatedAt: time.Now(),
248 | pollSleep: env.PollSleep,
249 | }
250 |
251 | // Don't scan from here, we just end up with two running at once
252 | return watcher
253 | }
254 |
255 | func NewAllProcessWatcher(callback ChangeHandler) Watcher {
256 | return NewProcessWatcher("*", callback)
257 | }
258 |
259 | func (pw *processWatcher) MarshalJSON() ([]byte, error) {
260 | pw.Lock()
261 | defer pw.Unlock()
262 | return json.Marshal(map[string]interface{}{
263 | "match": pw.Match(),
264 | "updated-at": pw.UpdatedAt,
265 | "being-watched": pw.BeingWatched,
266 | })
267 | }
268 |
269 | func (pw *processWatcher) Start() {
270 | pw.Lock()
271 | pw.keepPolling = true
272 | pw.Unlock()
273 | go pw.listen()
274 | }
275 |
276 | func (pw *processWatcher) Stop() {
277 | pw.Lock()
278 | pw.keepPolling = false
279 | pw.Unlock()
280 | }
281 |
282 | func (pw *processWatcher) Match() string {
283 | return pw.match
284 | }
285 |
286 | func (pw *processWatcher) KeepPolling() bool {
287 | pw.Lock()
288 | defer pw.Unlock()
289 | return pw.keepPolling
290 | }
291 |
292 | func (pw *processWatcher) setStateAttribute() {
293 | log := conf.FetchLog()
294 | state := pw.acquireState()
295 |
296 | json, err := json.Marshal(map[string]interface{}{
297 | "server": map[string]interface{}{
298 | "system_state": state,
299 | },
300 | })
301 |
302 | if err != nil {
303 | log.Fatal(err) // really shouldn't happen
304 | }
305 |
306 | pw.stateJson = json
307 | }
308 |
309 | func (pw *processWatcher) StateJson() []byte {
310 | pw.Lock()
311 | defer pw.Unlock()
312 | if pw.stateJson == nil {
313 | pw.setStateAttribute()
314 | }
315 | return pw.stateJson
316 | }
317 |
318 | func (pw *processWatcher) processes() (procs []libspector.Process, err error) {
319 | if pw.match == "*" {
320 | procs, err = libspector.AllProcesses()
321 | } else {
322 | procs, err = libspector.FindProcess(pw.match)
323 | }
324 | return
325 | }
326 |
327 | func (pw *processWatcher) scan() {
328 | pw.Lock()
329 |
330 | pw.setStateAttribute()
331 |
332 | newChecksum := crc32.ChecksumIEEE(pw.stateJson)
333 | changed := newChecksum != pw.checksum
334 | pw.checksum = newChecksum
335 |
336 | pw.Unlock() // ¯\_(ツ)_/¯
337 |
338 | if changed {
339 | go pw.OnChange(pw)
340 | }
341 | }
342 |
343 | func (pw *processWatcher) listen() {
344 | for pw.KeepPolling() {
345 | pw.scan()
346 | // TODO: make a new var for this, it shouldn't be bound to the other
347 | // watchers' schedules.
348 | time.Sleep(pw.pollSleep)
349 | }
350 | }
351 |
352 | func ShipProcessMap(a *Agent) {
353 | watcher := NewProcessWatcher("*", func(w Watcher) {}).(*processWatcher)
354 | a.OnChange(watcher)
355 | }
356 |
357 | func DumpProcessMap() {
358 | watcher := NewProcessWatcher("*", func(w Watcher) {}).(*processWatcher)
359 | fmt.Printf("%s\n", string(watcher.StateJson()))
360 | }
361 |
--------------------------------------------------------------------------------
/agent/resources/detect_linux.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -xeo pipefail
3 |
4 | unknown_os() {
5 | echo -n unknown
6 | }
7 |
8 | print_version() {
9 | local distro=${1} release=${2}
10 | echo -n ${distro}/${release}
11 | exit 0
12 | }
13 |
14 | detect_linux() {
15 |
16 | # These two contain env variables, but they may not have the ones we need, so
17 | # we load them in first and then check if we got the variables we wanted
18 | if [[ -e /etc/os-release ]]; then
19 | source /etc/os-release
20 | fi
21 | if [[ -e /etc/lsb-release ]]; then
22 | source /etc/lsb-release
23 | fi
24 |
25 | if [[ (-n "${ID}") && (-n "${VERSION_ID}")]]; then
26 | distro=${ID}
27 | release=${VERSION_ID}
28 |
29 | print_version ${distro} ${release}
30 |
31 | elif [[ (-n "${DISTRIB_ID}") && (-n "${DISTRIB_RELEASE}")]]; then
32 | distro=${DISTRIB_ID}
33 | release=${DISTRIB_RELEASE}
34 |
35 | print_version ${distro} ${release}
36 |
37 | elif which lsb_release; then
38 | distro=$(lsb_release -i | cut -f2)
39 | release=$(lsb_release -r | cut -f2)
40 |
41 | print_version ${distro} ${release}
42 |
43 |
44 | elif [[ -e /etc/debian_version ]]; then
45 | # some Debians have jessie/sid in their /etc/debian_version
46 | # while others have '6.0.7'
47 | distro=$(cat /etc/issue | head -1 | awk '{ print tolower($1) }')
48 |
49 | if grep -q '/' /etc/debian_version; then
50 | release=$(cut --delimiter='/' -f1 /etc/debian_version)
51 | else
52 | release=$(cut --delimiter='.' -f1 /etc/debian_version)
53 | fi
54 |
55 | print_version ${distro} ${release}
56 |
57 | elif [[ -e /etc/oracle-release ]]; then
58 | release=$(cut -f5 --delimiter=' ' /etc/oracle-release | awk -F '.' '{ print $1 }')
59 | distro='ol'
60 |
61 | print_version ${distro} ${release}
62 |
63 | elif [[ -e /etc/fedora-release ]]; then
64 | release=$(cut -f3 --delimiter=' ' /etc/fedora-release)
65 | distro='fedora'
66 |
67 | print_version ${distro} ${release}
68 |
69 | elif [[ -e /etc/redhat-release ]]; then
70 | distro_hint=$(cat /etc/redhat-release | awk '{ print tolower($1) }')
71 | if [[ "${distro_hint}" = "centos" ]]; then
72 | release=$(cat /etc/redhat-release | awk '{ print $3 }' | awk -F '.' '{ print $1 }')
73 | distro='centos'
74 | elif [[ "${distro_hint}" = "scientific" ]]; then
75 | release=$(cat /etc/redhat-release | awk '{ print $4 }' | awk -F '.' '{ print $1 }')
76 | distro='scientific'
77 | else
78 | release=$(cat /etc/redhat-release | awk '{ print tolower($7) }' | cut -f1 --delimiter='.')
79 | distro='redhatenterpriseserver'
80 | fi
81 |
82 | print_version ${distro} ${release}
83 |
84 | elif grep -q Amazon /etc/issue; then
85 | release='6'
86 | distro='aws'
87 | print_version ${distro} ${release}
88 | fi
89 | unknown_os
90 | }
91 |
92 | detect_linux
93 |
--------------------------------------------------------------------------------
/agent/server.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "net"
5 | "os"
6 | "os/exec"
7 |
8 | "github.com/appcanary/agent/agent/detect"
9 | "github.com/appcanary/agent/conf"
10 | )
11 |
12 | type Server struct {
13 | Hostname string `json:"hostname"`
14 | Uname string `json:"uname"`
15 | Ip string `json:"ip"`
16 | Name string `json:"name"`
17 | UUID string `json:"uuid,omitempty"`
18 | Distro string `json:"distro,omitempty"`
19 | Release string `json:"release,omitempty"`
20 | Tags []string `json:"tags,omitempty"`
21 | }
22 |
23 | // Creates a new server and syncs conf if needed
24 | func NewServer(agentConf *conf.Conf, serverConf *conf.ServerConf) *Server {
25 | log := conf.FetchLog()
26 |
27 | var err error
28 | var hostname, uname, thisIP, distro, release string
29 |
30 | hostname, err = os.Hostname()
31 | if err != nil {
32 | hostname = "unknown"
33 | }
34 |
35 | // syscall.Uname is only available in linux because in Darwin it's not a
36 | // syscall (who knew) furthermore, syscall.Uname returns a struct
37 | // (syscall.Utsname) of [65]int8 -- which are *signed* integers instead of
38 | // convering the signed integers into bytes and processing the whole thing
39 | // into a string, we're just going to call uname -a for now and upload the
40 | // returned string
41 | cmdUname, err := exec.Command("uname", "-a").Output()
42 | if err != nil {
43 | uname = "unknown"
44 | } else {
45 | uname = string(cmdUname)
46 | }
47 |
48 | // For now we only get ipv4 ips
49 | // If we don't find any
50 | addrs, err := net.InterfaceAddrs()
51 | if err == nil {
52 | for _, a := range addrs {
53 | //If we can't find a valid ipv4 ip, it will remain "unknown"
54 | if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() && ipnet.IP.To4() != nil {
55 | thisIP = ipnet.IP.String()
56 | break
57 | }
58 | }
59 | }
60 |
61 | confOSInfo := agentConf.OSInfo()
62 | if confOSInfo != nil {
63 | distro = confOSInfo.Distro
64 | release = confOSInfo.Release
65 |
66 | } else {
67 |
68 | osInfo, err := detect.DetectOS()
69 | if err != nil {
70 | log.Error(err.Error())
71 | distro = "unknown"
72 | release = "unknown"
73 | } else {
74 | distro = osInfo.Distro
75 | release = osInfo.Release
76 | }
77 | }
78 |
79 | return &Server{
80 | Name: agentConf.ServerName,
81 | Hostname: hostname,
82 | Uname: uname,
83 | Ip: thisIP,
84 | UUID: serverConf.UUID,
85 | Distro: distro,
86 | Release: release,
87 | Tags: agentConf.Tags,
88 | }
89 | }
90 |
91 | func (server *Server) IsNew() bool {
92 | return server.UUID == ""
93 | }
94 |
95 | func (server *Server) IsUbuntu() bool {
96 | return server.Distro == "ubuntu"
97 | }
98 |
99 | func (server *Server) IsCentOS() bool {
100 | return server.Distro == "centos"
101 | }
102 |
--------------------------------------------------------------------------------
/agent/server_test.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/appcanary/agent/agent/detect"
7 | "github.com/appcanary/agent/conf"
8 | "github.com/appcanary/testify/assert"
9 | )
10 |
11 | func TestServerConf(t *testing.T) {
12 | assert := assert.New(t)
13 |
14 | aconf := &conf.Conf{
15 | LinuxOSInfo: detect.LinuxOSInfo{
16 | Distro: "testDistro",
17 | Release: "testRelease",
18 | },
19 | ServerName: "TestName",
20 | Tags: []string{
21 | "simon",
22 | "dogs",
23 | },
24 | }
25 | server := NewServer(aconf, &conf.ServerConf{})
26 |
27 | assert.Equal("testDistro", server.Distro)
28 | assert.Equal("testRelease", server.Release)
29 | assert.Equal("TestName", server.Name)
30 | assert.Equal([]string{"simon", "dogs"}, server.Tags)
31 |
32 | aconf = &conf.Conf{}
33 | server = NewServer(aconf, &conf.ServerConf{})
34 |
35 | // amusingly, can't test generated values reliably
36 | // because these tests run in unpredictable linuxes
37 | assert.NotEqual("testDistro", server.Distro)
38 | assert.NotEqual("testRelease", server.Release)
39 | assert.Equal("", server.Name)
40 |
41 | }
42 |
--------------------------------------------------------------------------------
/agent/textwatcher.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "encoding/json"
5 | "io/ioutil"
6 | "os/exec"
7 | "path/filepath"
8 | "strings"
9 | "sync"
10 | "time"
11 |
12 | "github.com/appcanary/agent/conf"
13 |
14 | "hash/crc32"
15 | )
16 |
17 | type TextWatcher interface {
18 | Start()
19 | Stop()
20 | Contents() ([]byte, error)
21 | Path() string
22 | Kind() string
23 | MarshalJSON() ([]byte, error)
24 | }
25 |
26 | type textWatcher struct {
27 | sync.Mutex
28 | keepPolling bool
29 | kind string
30 | path string
31 | UpdatedAt time.Time
32 | BeingWatched bool
33 | OnChange ChangeHandler
34 | Checksum uint32
35 | CmdName string
36 | CmdArgs []string
37 | contents func() ([]byte, error)
38 | pollSleep time.Duration
39 | }
40 |
41 | // File watchers track changes in the contents of a file
42 | func NewFileWatcher(path string, callback ChangeHandler) Watcher {
43 | // TODO the kind attribute we receive in this payload is kind of useless. If
44 | // the agent just looks at the file name in order to set the kind, we can do
45 | // that just fine in the server also. It'd make more sense to set kind to be
46 | // the kind of watcher.
47 | env := conf.FetchEnv()
48 | var kind string
49 | filename := filepath.Base(path)
50 | switch filename {
51 | case "Gemfile.lock":
52 | kind = "gemfile"
53 | case "available":
54 | //todo support debian
55 | kind = "ubuntu"
56 | case "status":
57 | kind = "ubuntu"
58 | }
59 |
60 | watcher := &textWatcher{
61 | path: path,
62 | OnChange: callback,
63 | kind: kind,
64 | UpdatedAt: time.Now(),
65 | pollSleep: env.PollSleep,
66 | }
67 | watcher.contents = watcher.FileContents
68 |
69 | // Do a scan off the bat so we get a checksum, and PUT the file
70 | watcher.scan()
71 | return watcher
72 | }
73 |
74 | // Process watchers track changes in the output of a command
75 | func NewCommandOutputWatcher(process string, callback ChangeHandler) Watcher {
76 | env := conf.FetchEnv()
77 | splat := strings.Split(process, " ")
78 | name := splat[0]
79 | args := splat[1:]
80 |
81 | watcher := &textWatcher{
82 | path: process,
83 | OnChange: callback,
84 | kind: "centos",
85 | UpdatedAt: time.Now(),
86 | pollSleep: env.PollSleep,
87 | CmdName: name,
88 | CmdArgs: args,
89 | }
90 | watcher.contents = watcher.ProcessContents
91 |
92 | watcher.scan()
93 | return watcher
94 | }
95 |
96 | func (tw *textWatcher) MarshalJSON() ([]byte, error) {
97 | tw.Lock()
98 | defer tw.Unlock()
99 | ret, err := json.Marshal(map[string]interface{}{
100 | "path": tw.Path(),
101 | "kind": tw.Kind(),
102 | "updated-at": tw.UpdatedAt,
103 | "being-watched": tw.BeingWatched,
104 | "crc": tw.Checksum})
105 | return ret, err
106 | }
107 |
108 | func (wt *textWatcher) Kind() string {
109 | return wt.kind
110 | }
111 |
112 | func (wt *textWatcher) Path() string {
113 | return wt.path
114 | }
115 |
116 | func (wt *textWatcher) KeepPolling() bool {
117 | wt.Lock()
118 | defer wt.Unlock()
119 | return wt.keepPolling
120 | }
121 |
122 | func (wt *textWatcher) Start() {
123 | // log.Debug("Listening to: %s", wt.Path())
124 | wt.Lock()
125 | wt.keepPolling = true
126 | wt.Unlock()
127 | go wt.listen()
128 | }
129 |
130 | func (wt *textWatcher) Stop() {
131 | // log.Debug("No longer listening to: %s", wt.Path())
132 | wt.Lock()
133 | wt.keepPolling = false
134 | wt.Unlock()
135 | }
136 |
137 | func (wt *textWatcher) GetBeingWatched() bool {
138 | wt.Lock()
139 | defer wt.Unlock()
140 | return wt.BeingWatched
141 | }
142 |
143 | func (wt *textWatcher) SetBeingWatched(bw bool) {
144 | wt.Lock()
145 | wt.BeingWatched = bw
146 | wt.Unlock()
147 | }
148 |
149 | // since on init the checksum never match, we always trigger an OnChange when we
150 | // boot up
151 | func (wt *textWatcher) scan() {
152 | // log.Debug("wt: Check for %s", wt.Path())
153 | currentCheck := wt.currentChecksum()
154 |
155 | if currentCheck == 0 {
156 | // log.Debug("wt: checksum fail.")
157 | // there was some error reading the file.
158 | // try again later?
159 | wt.SetBeingWatched(false)
160 | return
161 | }
162 |
163 | wt.SetBeingWatched(true)
164 |
165 | if wt.Checksum != currentCheck {
166 | go wt.OnChange(wt)
167 | wt.Checksum = currentCheck
168 | }
169 | }
170 |
171 | func (wt *textWatcher) currentChecksum() uint32 {
172 | file, err := wt.Contents()
173 | if err != nil {
174 | return 0
175 | }
176 |
177 | return crc32.ChecksumIEEE(file)
178 | }
179 |
180 | func (wt *textWatcher) listen() {
181 | for wt.KeepPolling() {
182 | wt.scan()
183 | time.Sleep(wt.pollSleep)
184 | }
185 | }
186 |
187 | func (wt *textWatcher) Contents() ([]byte, error) {
188 | return wt.contents()
189 | }
190 |
191 | func (wt *textWatcher) FileContents() ([]byte, error) {
192 | // log.Debug("####### file contents for %s!", wt.Path())
193 | return ioutil.ReadFile(wt.Path())
194 | }
195 |
196 | func (wt *textWatcher) ProcessContents() ([]byte, error) {
197 | // log.Debug("####### process contents!")
198 | cmd := exec.Command(wt.CmdName, wt.CmdArgs...)
199 | out, err := cmd.Output()
200 |
201 | return out, err
202 | }
203 |
--------------------------------------------------------------------------------
/agent/upgrade.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "os/exec"
7 | "strings"
8 |
9 | "github.com/appcanary/agent/conf"
10 | )
11 |
12 | type UpgradeCommand struct {
13 | Name string
14 | Args []string
15 | }
16 |
17 | type UpgradeSequence []UpgradeCommand
18 |
19 | func buildCentOSUpgrade(packageList map[string]string) UpgradeSequence {
20 | installCmd := "yum"
21 | installArg := []string{"update-to", "--assumeyes"}
22 |
23 | for name, _ := range packageList {
24 | installArg = append(installArg, strings.TrimSuffix(packageList[name], ".rpm"))
25 | }
26 |
27 | return UpgradeSequence{UpgradeCommand{installCmd, installArg}}
28 | }
29 |
30 | func buildDebianUpgrade(packageList map[string]string) UpgradeSequence {
31 | env := conf.FetchEnv()
32 |
33 | updateCmd := "apt-get"
34 | updateArg := []string{"update", "-q"}
35 |
36 | // install only new packages, silence confirm prompt, and
37 | // if new package has a new set of conf files, update them
38 | // if the existing conf has not changed from default, or
39 | // leave old conf in place
40 | installCmd := "apt-get"
41 | installArg := []string{"install", "--only-upgrade", "--no-install-recommends", "-y", "-q"}
42 |
43 | if !env.FailOnConflict {
44 | installArg = append(installArg, "-o Dpkg::Options::=\"--force-confdef\"", "-o Dpkg::Options::=\"--force-confold\"")
45 | }
46 |
47 | for name, _ := range packageList {
48 | // for now let's just stick to blanket updates
49 | // to the packages. At a glance, it seems in ubuntu land you only
50 | // get access to the most recent version anyways.
51 | // installArg = append(installArg, name+"="+version)
52 | installArg = append(installArg, name)
53 | }
54 |
55 | return UpgradeSequence{UpgradeCommand{updateCmd, updateArg}, UpgradeCommand{installCmd, installArg}}
56 | }
57 |
58 | func executeUpgradeSequence(commands UpgradeSequence) error {
59 | env := conf.FetchEnv()
60 | log := conf.FetchLog()
61 |
62 | if env.DryRun {
63 | log.Info("Running upgrade in dry-run mode...")
64 | }
65 |
66 | for _, command := range commands {
67 | err := runCmd(command)
68 | if err != nil {
69 | return err
70 | }
71 | }
72 | return nil
73 | }
74 |
75 | func runCmd(command UpgradeCommand) error {
76 | log := conf.FetchLog()
77 | env := conf.FetchEnv()
78 |
79 | cmdName := command.Name
80 | args := command.Args
81 |
82 | _, err := exec.LookPath(cmdName)
83 |
84 | if err != nil {
85 | log.Info("Can't find " + cmdName)
86 | return err
87 | }
88 |
89 | cmd := exec.Command(cmdName, args...)
90 |
91 | var output bytes.Buffer
92 | cmd.Stdout = &output
93 | cmd.Stderr = &output
94 |
95 | log.Infof("Running: %s %s", cmdName, strings.Join(args, " "))
96 | if env.DryRun {
97 | return nil
98 | } else {
99 | if err := cmd.Start(); err != nil {
100 | log.Infof("Was unable to start %s. Error: %v", cmdName, err)
101 | return err
102 | }
103 |
104 | err = cmd.Wait()
105 | fmt.Println(string(output.Bytes()))
106 |
107 | return err
108 | }
109 | }
110 |
--------------------------------------------------------------------------------
/agent/upgrade_test.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stateio/testify/assert"
7 | )
8 |
9 | func TestBuildDebianUpgrade(t *testing.T) {
10 | assert := assert.New(t)
11 |
12 | packageList := map[string]string{"foobar": "version"}
13 | commands := buildDebianUpgrade(packageList)
14 |
15 | assert.Equal(2, len(commands))
16 | assert.Equal("apt-get", commands[0].Name)
17 | assert.Equal("apt-get", commands[1].Name)
18 |
19 | upgradeArgs := commands[1].Args
20 | lastArg := upgradeArgs[len(upgradeArgs)-1]
21 |
22 | assert.Equal("foobar", lastArg)
23 | }
24 |
25 | func TestBuildCentOSUpgradeWithSuffixedVersion(t *testing.T) {
26 | assert := assert.New(t)
27 |
28 | packageList := map[string]string{"foobar": "foobar-version-with-suffix.rpm"}
29 | commands := buildCentOSUpgrade(packageList)
30 |
31 | assert.Equal(1, len(commands))
32 | assert.Equal("yum", commands[0].Name)
33 |
34 | upgradeArgs := commands[0].Args
35 | lastArg := upgradeArgs[len(upgradeArgs)-1]
36 |
37 | assert.Equal("foobar-version-with-suffix", lastArg)
38 | }
39 |
40 | func TestBuildCentOSUpgradeWithoutSuffixedVersion(t *testing.T) {
41 | assert := assert.New(t)
42 |
43 | packageList := map[string]string{"foobar": "foobar-version-without-suffix"}
44 | commands := buildCentOSUpgrade(packageList)
45 |
46 | assert.Equal(1, len(commands))
47 | assert.Equal("yum", commands[0].Name)
48 |
49 | upgradeArgs := commands[0].Args
50 | lastArg := upgradeArgs[len(upgradeArgs)-1]
51 |
52 | assert.Equal("foobar-version-without-suffix", lastArg)
53 | }
54 |
--------------------------------------------------------------------------------
/agent/watcher.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | type ChangeHandler func(Watcher)
4 |
5 | type Watcher interface {
6 | Start()
7 | Stop()
8 | }
9 |
10 | type Watchers []Watcher
11 |
--------------------------------------------------------------------------------
/agent/watcher_test.go:
--------------------------------------------------------------------------------
1 | package agent
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "sync"
7 | "testing"
8 | "time"
9 |
10 | "io/ioutil"
11 |
12 | "github.com/appcanary/agent/conf"
13 | "github.com/appcanary/testify/assert"
14 | )
15 |
16 | // create a tempfile, add a hook, see if hook gets called
17 | // when file changes. TODO: test all other fs events.
18 | func TestWatchFile(t *testing.T) {
19 | assert := assert.New(t)
20 | conf.InitEnv("test")
21 |
22 | file_content := "tst1"
23 | tf, _ := ioutil.TempFile("", "gems.lock")
24 | tf.Write([]byte(file_content))
25 | tf.Close()
26 |
27 | timer := time.Tick(5 * time.Second)
28 | cbInvoked := make(chan bool)
29 | testcb := func(nop Watcher) {
30 | cbInvoked <- true
31 | }
32 |
33 | wfile := NewFileWatcher(tf.Name(), testcb).(TextWatcher)
34 |
35 | wfile.Start()
36 |
37 | // let's make sure the file got written to
38 | read_contents, _ := wfile.Contents()
39 | assert.Equal(file_content, string(read_contents))
40 |
41 | // but really we want to know if the
42 | // callback was ever invoked
43 | select {
44 | case invoked := <-cbInvoked:
45 | assert.True(invoked)
46 |
47 | case _ = <-timer:
48 | assert.True(false)
49 | }
50 |
51 | // solid. on boot it worked. But what
52 | // if we changed the file contents?
53 |
54 | newContents := []byte("HelloWorld\n")
55 | err := ioutil.WriteFile(tf.Name(), newContents, 0777)
56 | assert.Nil(err)
57 |
58 | // let's wait again just in case.
59 | select {
60 | case invoked := <-cbInvoked:
61 | assert.True(invoked)
62 |
63 | case _ = <-timer:
64 | assert.True(false)
65 | }
66 |
67 | wfile.Stop()
68 | }
69 |
70 | func TestWatchProcess(t *testing.T) {
71 | assert := assert.New(t)
72 | conf.InitEnv("test")
73 |
74 | timer := time.Tick(5 * time.Second)
75 | cbInvoked := make(chan bool)
76 | testcb := func(nop Watcher) {
77 | cbInvoked <- true
78 | }
79 |
80 | wfile := NewCommandOutputWatcher("date +%S", testcb).(TextWatcher)
81 |
82 | wfile.Start()
83 |
84 | // let's make sure the file got written to
85 | read_contents, _ := wfile.Contents()
86 | assert.NotEqual("", string(read_contents))
87 |
88 | // but really we want to know if the
89 | // callback was ever invoked
90 | select {
91 | case invoked := <-cbInvoked:
92 | assert.True(invoked)
93 |
94 | case _ = <-timer:
95 | assert.True(false)
96 | }
97 |
98 | // solid. on boot it worked.
99 | // date changes every second, so we should get
100 | // another call back
101 |
102 | select {
103 | case invoked := <-cbInvoked:
104 | assert.True(invoked)
105 |
106 | case _ = <-timer:
107 | assert.True(false)
108 | }
109 |
110 | wfile.Stop()
111 | }
112 |
113 | func TestWatchFileFailure(t *testing.T) {
114 | assert := assert.New(t)
115 | conf.InitEnv("test")
116 |
117 | file_content := "tst1"
118 | tf, _ := ioutil.TempFile("", "gems.lock")
119 | tf.Write([]byte(file_content))
120 | tf.Close()
121 |
122 | cbInvoked := make(chan bool)
123 | testcb := func(nop Watcher) {
124 | cbInvoked <- true
125 | }
126 |
127 | wfile := NewFileWatcher(tf.Name(), testcb).(*textWatcher)
128 | wfile.Start()
129 | <-cbInvoked
130 |
131 | // File is being watched
132 | time.Sleep(conf.TEST_POLL_SLEEP)
133 | assert.True(wfile.GetBeingWatched())
134 | os.Remove(tf.Name())
135 | time.Sleep(conf.TEST_POLL_SLEEP)
136 | //Since the file is gone, we stopped watching it
137 | assert.False(wfile.GetBeingWatched())
138 | wfile.Stop()
139 | }
140 |
141 | // does the callback get fired when the directory
142 | // the file is in gets renamed?
143 | // TODO: replace with tempfiles.
144 | func TestWatchFileRenameDirectory(t *testing.T) {
145 | assert := assert.New(t)
146 | conf.InitEnv("test")
147 |
148 | folder := "/tmp/CANARYTEST"
149 | file_name := folder + "/test1.gems"
150 |
151 | os.Mkdir(folder, 0777)
152 | ioutil.WriteFile(file_name, []byte("tst"), 0644)
153 |
154 | cbInvoked := make(chan bool, 10)
155 |
156 | mutex := &sync.Mutex{}
157 | counter := 0
158 | testcb := func(wfile Watcher) {
159 | mutex.Lock()
160 | counter++
161 | mutex.Unlock()
162 | cbInvoked <- true
163 | }
164 |
165 | wfile := NewFileWatcher(file_name, testcb).(*textWatcher)
166 |
167 | // file gets read on hook add
168 | wfile.Start()
169 | defer wfile.Stop()
170 | <-cbInvoked
171 |
172 | // aight. let's rename the folder it's in.
173 | // let's create a tmp path we can rename to.
174 | folder2 := "/tmp/CANARYTEST2"
175 | os.Rename(folder, folder2)
176 |
177 | // file should now be missing.
178 | time.Sleep(conf.TEST_POLL_SLEEP)
179 |
180 | assert.False(wfile.GetBeingWatched())
181 |
182 | // let's then recreate a new file w/same path
183 | // recreate the old folderm
184 | os.Mkdir(folder, 0777)
185 | // write new file
186 | ioutil.WriteFile(file_name, []byte("tst2"), 0644)
187 |
188 | time.Sleep(conf.TEST_POLL_SLEEP)
189 | // this file should be different, thus triggering
190 | // another callback
191 |
192 | mutex.Lock()
193 | assert.Equal(2, counter)
194 | mutex.Unlock()
195 |
196 | os.RemoveAll(folder)
197 | os.RemoveAll(folder2)
198 | }
199 |
200 | func TestWatchFileHookLoop(t *testing.T) {
201 |
202 | assert := assert.New(t)
203 | conf.InitEnv("test")
204 |
205 | file_content := []byte("tst1")
206 | tf, _ := ioutil.TempFile("", "gems.lock")
207 | tf.Write([]byte(file_content))
208 | tf.Close()
209 | file_name := tf.Name()
210 |
211 | cbInvoked := make(chan bool, 10)
212 |
213 | mutex := &sync.Mutex{}
214 | counter := 0
215 | testcb := func(wfile Watcher) {
216 | mutex.Lock()
217 | counter++
218 | mutex.Unlock()
219 | cbInvoked <- true
220 | }
221 |
222 | wfile := NewFileWatcher(file_name, testcb)
223 |
224 | // file gets read on hook add
225 | wfile.Start()
226 | <-cbInvoked
227 |
228 | // // file gets read on rewrite
229 | fmt.Println("--> write 2")
230 | file_content = []byte("hello test1")
231 | err := ioutil.WriteFile(file_name, file_content, 0644)
232 | <-cbInvoked
233 |
234 | // we remove and recreate the file,
235 | // triggering a rehook and re-read
236 | fmt.Println("--> removal 1")
237 | os.Remove(file_name)
238 |
239 | fmt.Println("--> write 3")
240 | file_content = []byte("hello test2")
241 | err = ioutil.WriteFile(file_name, file_content, 0644)
242 | assert.Nil(err)
243 | <-cbInvoked
244 |
245 | // we write to the file, triggering
246 | // another re-read
247 | fmt.Println("--> write 4")
248 | file_content = []byte("hello test3")
249 | err = ioutil.WriteFile(file_name, file_content, 0644)
250 | assert.Nil(err)
251 | <-cbInvoked
252 |
253 | // we remove and recreate the file,
254 | // triggering a rehook yet another re-read
255 | fmt.Println("--> removal 2")
256 | os.Remove(file_name)
257 |
258 | fmt.Println("--> write 5")
259 | file_content = []byte("hello test4")
260 | err = ioutil.WriteFile(file_name, file_content, 0644)
261 | assert.Nil(err)
262 | <-cbInvoked
263 |
264 | fmt.Println("--> write 6")
265 | file_content = []byte("hello test5")
266 | err = ioutil.WriteFile(file_name, file_content, 0644)
267 | assert.Nil(err)
268 | <-cbInvoked
269 |
270 | fmt.Println("cleaning up")
271 | // we wrote the file five times, plus the init read
272 | mutex.Lock()
273 | assert.True(counter >= 6)
274 | mutex.Unlock()
275 |
276 | // cleanup
277 | wfile.Stop()
278 | os.Remove(file_name)
279 | }
280 |
281 | // TODO: create version of the above test where we compare files that are identical in size, and were touched within one second
282 |
--------------------------------------------------------------------------------
/appcanary-hero.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/appcanary/agent/48db7f45c6b2ed2d8d1dede8751e92d61756d984/appcanary-hero.png
--------------------------------------------------------------------------------
/circle.yml:
--------------------------------------------------------------------------------
1 | dependencies:
2 | override:
3 | - go get -t -d -v ./...
4 | - go get -u github.com/jteeuwen/go-bindata/...
5 | - bundle install
6 | test:
7 | override:
8 | - bundle exec rake test
9 | checkout:
10 | post:
11 | - rm -rf ~/.go_workspace/src/github.com/appcanary/agent
12 | - mkdir -p ~/.go_workspace/src/github.com/appcanary/agent
13 | - cp -R ~/agent ~/.go_workspace/src/github.com/appcanary/
14 |
--------------------------------------------------------------------------------
/conf/common.go:
--------------------------------------------------------------------------------
1 | package conf
2 |
3 | import (
4 | "errors"
5 | "os"
6 | "path/filepath"
7 |
8 | "github.com/appcanary/agent/agent/detect"
9 | )
10 |
11 | type ServerConf struct {
12 | UUID string `toml:"uuid" yaml:"uuid"`
13 | }
14 |
15 | type Conf struct {
16 | detect.LinuxOSInfo `yaml:",inline"`
17 | ApiKey string `yaml:"api_key,omitempty" toml:"api_key"`
18 | LogPath string `yaml:"log_path,omitempty" toml:"log_path"`
19 | ServerName string `yaml:"server_name,omitempty" toml:"server_name"`
20 | Watchers []WatcherConf `yaml:"watchers" toml:"files"`
21 | StartupDelay int `yaml:"startup_delay,omitempty" toml:"startup_delay"`
22 | ServerConf *ServerConf `yaml:"-" toml:"-"`
23 | Tags []string `yaml:"tags,omitempty"` // no toml support for this
24 | }
25 |
26 | type WatcherConf struct {
27 | Path string `yaml:"path,omitempty" toml:"path"`
28 | Process string `yaml:"process,omitempty" toml:"inspect_process"`
29 | Command string `yaml:"command,omitempty" toml:"process"`
30 | }
31 |
32 | func NewConf() *Conf {
33 | return &Conf{ServerConf: &ServerConf{}}
34 | }
35 |
36 | func (c *Conf) OSInfo() *detect.LinuxOSInfo {
37 | if c.Distro != "" && c.Release != "" {
38 | return &c.LinuxOSInfo
39 | } else {
40 | return nil
41 | }
42 | }
43 |
44 | func fileExists(fname string) bool {
45 | _, err := os.Stat(fname)
46 | return err == nil
47 | }
48 |
49 | func renameDeprecatedConf(path string) (err error) {
50 | absPath, err := filepath.Abs(path)
51 | if err != nil {
52 | return err
53 | }
54 |
55 | err = os.Rename(absPath, absPath+".deprecated")
56 | if err != nil {
57 | return err
58 | }
59 |
60 | return
61 | }
62 |
63 | func convertOldConf() (*Conf, error) {
64 | env := FetchEnv()
65 | log := FetchLog()
66 | var confFile, varFile string
67 |
68 | // load the TOML
69 | if env.Prod { // we only get this far if locations are default
70 | confFile = OLD_DEFAULT_CONF_FILE
71 | varFile = OLD_DEFAULT_VAR_FILE
72 | } else { // this should only happen in test
73 | confFile = OLD_DEV_CONF_FILE
74 | varFile = OLD_DEV_VAR_FILE
75 | }
76 |
77 | if fileExists(confFile) {
78 | log.Info("Old configuration file detected, converting to new format")
79 | } else {
80 | // One more thing to try, on RPM systems the confile may have been moved to .rpmsave during the upgrade process
81 | confFile = confFile + ".rpmsave"
82 | varFile = varFile + ".rpmsave"
83 | if fileExists(confFile) {
84 | log.Info("Old configuration file detected, converting to new format")
85 | } else {
86 | // we know things are set to default AND the default yml file is missing
87 | // AND the old file is missing... well there's nothing for us to do here
88 | return nil, errors.New("We can't find any configuration files! Please consult https://appcanary.com/servers/new for more instructions.")
89 | }
90 | }
91 |
92 | c, err := NewTomlConfFromEnv(confFile, varFile)
93 | if err != nil {
94 | return nil, err
95 | }
96 |
97 | // now move the old files out of the way and dump a new YAML version
98 |
99 | if err := renameDeprecatedConf(confFile); err != nil {
100 | log.Warningf("Couldn't rename old agent config: %v", err)
101 | } else {
102 | log.Infof("Renamed %s to %s.deprecated", confFile, confFile)
103 | }
104 |
105 | if err := renameDeprecatedConf(varFile); err != nil {
106 | log.Warningf("Couldn't rename old server config: %v", err)
107 | } else {
108 | log.Infof("Renamed %s to %s.deprecated", varFile, varFile)
109 | }
110 |
111 | var newConfFile, newVarFile string
112 | if env.Prod {
113 | newConfFile = DEFAULT_CONF_FILE
114 | newVarFile = DEFAULT_VAR_FILE
115 | } else {
116 | newConfFile = DEV_CONF_FILE
117 | newVarFile = DEV_VAR_FILE
118 | }
119 |
120 | // dump the new YAML files
121 | c.FullSave(newConfFile, newVarFile)
122 |
123 | log.Infof("New configuration file saved to: %s", newConfFile)
124 |
125 | return c, nil
126 | }
127 |
128 | func confFilesSetToDefault(env *Env) bool {
129 | if env.Prod {
130 | return env.ConfFile == DEFAULT_CONF_FILE && env.VarFile == DEFAULT_VAR_FILE
131 | } else {
132 | return env.ConfFile == DEV_CONF_FILE && env.VarFile == DEV_VAR_FILE
133 | }
134 | }
135 |
136 | // we can't function without configuration
137 | // so at some point some substack callee of this method
138 | // will Fatal() if it can't find what it needs
139 | func NewConfFromEnv() (*Conf, error) {
140 | env := FetchEnv()
141 |
142 | // if conf files were supplied via cli flags,
143 | // i.e. not the default setting,
144 | // they should be in yaml
145 |
146 | // therefore,
147 | // if we have a default file location
148 | // but the file does not exist,
149 | // try looking for the old files and convert them
150 |
151 | if confFilesSetToDefault(env) && !fileExists(env.ConfFile) {
152 | return convertOldConf()
153 | } else {
154 | return NewYamlConfFromEnv()
155 | }
156 | }
157 |
--------------------------------------------------------------------------------
/conf/conf_test.go:
--------------------------------------------------------------------------------
1 | package conf
2 |
3 | import (
4 | "os"
5 | "strings"
6 | "testing"
7 |
8 | "github.com/appcanary/testify/assert"
9 | )
10 |
11 | func TestConf(t *testing.T) {
12 | assert := assert.New(t)
13 | InitEnv("test")
14 |
15 | conf, err := NewConfFromEnv()
16 | assert.Nil(err)
17 |
18 | assert.Equal("APIKEY", conf.ApiKey)
19 | assert.Equal("deployment1", conf.ServerName)
20 | assert.Equal("testDistro", conf.Distro)
21 | assert.Equal("testRelease", conf.Release)
22 |
23 | assert.Equal(4, len(conf.Watchers), "len of files")
24 |
25 | dpkg := conf.Watchers[0]
26 | assert.Equal("/var/lib/dpkg/available", dpkg.Path, "file path")
27 |
28 | gemfile := conf.Watchers[1]
29 | assert.Equal("/path/to/Gemfile.lock", gemfile.Path, "file path")
30 |
31 | tar_h := conf.Watchers[2]
32 | assert.Equal("fakecmdhere", tar_h.Command, "command path")
33 |
34 | inspectProcess := conf.Watchers[3]
35 | assert.Equal("*", inspectProcess.Process, "inspect process pattern")
36 |
37 | assert.Equal("123456", conf.ServerConf.UUID)
38 | }
39 |
40 | func TestConfUpgrade(t *testing.T) {
41 | assert := assert.New(t)
42 | InitEnv("test")
43 |
44 | // everything is configured to default, but file is missing
45 | assert.Nil(os.Rename(DEV_CONF_FILE, DEV_CONF_FILE+".bak"))
46 | assert.Nil(os.Rename(DEV_VAR_FILE, DEV_VAR_FILE+".bak"))
47 | assert.False(fileExists(DEV_CONF_FILE))
48 |
49 | // ensure we have the old dev conf file it'll fall back on,
50 | // convert and rename
51 | assert.True(fileExists(OLD_DEV_CONF_FILE))
52 |
53 | // now do the conversion
54 | conf, err := NewConfFromEnv()
55 | assert.Nil(err)
56 |
57 | // check that the configuration is ok
58 | assert.Equal("APIKEY", conf.ApiKey)
59 | assert.Equal("deployment1", conf.ServerName)
60 | assert.Equal("testDistro", conf.Distro)
61 | assert.Equal("testRelease", conf.Release)
62 |
63 | // ensure old ones got copied to .deprecated and new yaml files exist
64 | assert.False(fileExists(OLD_DEV_CONF_FILE))
65 | assert.True(fileExists(OLD_DEV_CONF_FILE + ".deprecated"))
66 | assert.True(fileExists(OLD_DEV_VAR_FILE + ".deprecated"))
67 |
68 | assert.True(fileExists(DEV_CONF_FILE))
69 |
70 | // great. Now let's ensure this new file is readable.
71 | // let's make sure we're not reading the old file
72 | assert.False(fileExists(OLD_DEV_CONF_FILE))
73 |
74 | conf, err = NewConfFromEnv()
75 | assert.Nil(err)
76 | assert.Equal("APIKEY", conf.ApiKey)
77 | assert.Equal("deployment1", conf.ServerName)
78 | assert.Equal("testDistro", conf.Distro)
79 | assert.Equal("testRelease", conf.Release)
80 |
81 | // now we clean up the renamed test files
82 |
83 | assert.Nil(os.Rename(DEV_CONF_FILE+".bak", DEV_CONF_FILE))
84 | assert.Nil(os.Rename(DEV_VAR_FILE+".bak", DEV_VAR_FILE))
85 |
86 | assert.Nil(os.Rename(OLD_DEV_CONF_FILE+".deprecated", OLD_DEV_CONF_FILE))
87 | assert.Nil(os.Rename(OLD_DEV_VAR_FILE+".deprecated", OLD_DEV_VAR_FILE))
88 | }
89 |
90 | func TestCustomConfPathTOMLConf(t *testing.T) {
91 | assert := assert.New(t)
92 | InitEnv("test")
93 |
94 | // link files to a non-standard location
95 | assert.Nil(os.Link(OLD_DEV_CONF_FILE, "/tmp/agent.conf"))
96 | assert.Nil(os.Link(OLD_DEV_VAR_FILE, "/tmp/server.conf"))
97 |
98 | // set the new values in the environment
99 | env.ConfFile = "/tmp/agent.conf"
100 | env.VarFile = "/tmp/server.conf"
101 |
102 | // attempt to load the config
103 | conf, err := NewConfFromEnv()
104 |
105 | // there should be an error
106 | assert.NotNil(err)
107 | assert.True(strings.Contains(err.Error(), "Is this file valid YAML?"))
108 |
109 | // there should not be a configuration
110 | assert.Nil(conf)
111 |
112 | // ditch the links
113 | assert.Nil(os.Remove("/tmp/agent.conf"))
114 | assert.Nil(os.Remove("/tmp/server.conf"))
115 | }
116 |
117 | func TestCustomConfPathYAMLConf(t *testing.T) {
118 | assert := assert.New(t)
119 | InitEnv("test")
120 |
121 | // link files to a non-standard location
122 | assert.Nil(os.Link(DEV_CONF_FILE, "/tmp/agent.yml"))
123 | assert.Nil(os.Link(DEV_VAR_FILE, "/tmp/server.yml"))
124 |
125 | // set the new values in the environment
126 | env.ConfFile = "/tmp/agent.yml"
127 | env.VarFile = "/tmp/server.yml"
128 |
129 | // attempt to load the configuration
130 | conf, err := NewConfFromEnv()
131 |
132 | // there should not be an error
133 | assert.Nil(err)
134 |
135 | // there SHOULD be a conf with some things in it
136 | assert.NotNil(conf)
137 | assert.Equal("deployment1", conf.ServerName)
138 | assert.Equal("APIKEY", conf.ApiKey)
139 | assert.Equal("testDistro", conf.Distro)
140 | assert.Equal("testRelease", conf.Release)
141 | assert.Equal("123456", conf.ServerConf.UUID)
142 |
143 | // ditch the links
144 | assert.Nil(os.Remove("/tmp/agent.yml"))
145 | assert.Nil(os.Remove("/tmp/server.yml"))
146 | }
147 |
--------------------------------------------------------------------------------
/conf/consts.go:
--------------------------------------------------------------------------------
1 | package conf
2 |
3 | import "time"
4 |
5 | // consts can't be outputs of functions?
6 | var DEV_CONF_PATH string
7 | var DEV_CONF_FILE string
8 | var OLD_DEV_CONF_FILE string
9 |
10 | var DEV_VAR_PATH string
11 | var DEV_VAR_FILE string
12 | var OLD_DEV_VAR_FILE string
13 |
14 | // env vars
15 | const (
16 | PROD_URL = "https://www.appcanary.com"
17 | DEV_URL = "http://localhost:4000"
18 |
19 | DEFAULT_CONF_PATH = "/etc/appcanary/"
20 | DEFAULT_CONF_FILE_BASE = DEFAULT_CONF_PATH + "agent"
21 | DEFAULT_VAR_PATH = "/var/db/appcanary/"
22 | DEFAULT_VAR_FILE_BASE = DEFAULT_VAR_PATH + "server"
23 | DEFAULT_CONF_FILE = DEFAULT_CONF_FILE_BASE + ".yml"
24 | DEFAULT_VAR_FILE = DEFAULT_VAR_FILE_BASE + ".yml"
25 | OLD_DEFAULT_CONF_FILE = DEFAULT_CONF_FILE_BASE + ".conf"
26 | OLD_DEFAULT_VAR_FILE = DEFAULT_VAR_FILE_BASE + ".conf"
27 |
28 | DEFAULT_HEARTBEAT_DURATION = 1 * time.Hour
29 | DEV_HEARTBEAT_DURATION = 10 * time.Second
30 |
31 | DEFAULT_SYNC_ALL_DURATION = 24 * time.Hour
32 | DEV_SYNC_ALL_DURATION = 30 * time.Second
33 |
34 | DEFAULT_LOG_FILE = "/var/log/appcanary.log"
35 | )
36 |
37 | // api endpoints
38 | const (
39 | API_VERSION = "/api/v1/agent/"
40 | API_HEARTBEAT = API_VERSION + "heartbeat"
41 | API_SERVERS = API_VERSION + "servers"
42 | )
43 |
44 | // file polling
45 | const (
46 | DEFAULT_POLL_SLEEP = 5 * time.Minute
47 | // test env.PollSleep is 1second
48 | // test poll sleep is double to give the fs time to flush
49 | DEV_POLL_SLEEP = time.Second
50 | TEST_POLL_SLEEP = (time.Second + (150 * time.Millisecond)) * 2
51 | )
52 |
53 | // trolol
54 | const (
55 | DEV_LOGO = `
56 | ********** ******** ******** **********
57 | /////**/// /**///// **////// /////**///
58 | /** /** /** /**
59 | /** /******* /********* /**
60 | /** /**//// ////////** /**
61 | /** /** /** /**
62 | /** /******** ******** /**
63 | // //////// //////// //
64 | `
65 | PROD_LOGO = `
66 |
67 |
68 | __ _____ _____ ___ __ ___ __ _ __ __ __
69 | /'__` + "`" + `\ /\ '__` + "`" + `\/\ '__` + "`" + `\ /'___\ /'__` + "`" + `\ /' _ ` + "`" + `\ /'__` + "`" + `\ /\` + "`" + `'__\/\ \/\ \
70 | /\ \L\.\_\ \ \L\ \ \ \L\ \/\ \__//\ \L\.\_/\ \/\ \/\ \L\.\_\ \ \/ \ \ \_\ \
71 | \ \__/.\_\\ \ ,__/\ \ ,__/\ \____\ \__/.\_\ \_\ \_\ \__/.\_\\ \_\ \/` + "`" + `____ \
72 | \/__/\/_/ \ \ \/ \ \ \/ \/____/\/__/\/_/\/_/\/_/\/__/\/_/ \/_/ ` + "`" + `/___/> \
73 | \ \_\ \ \_\ /\___/
74 | \/_/ \/_/ \/__/
75 |
76 |
77 | `
78 | )
79 |
--------------------------------------------------------------------------------
/conf/env.go:
--------------------------------------------------------------------------------
1 | package conf
2 |
3 | import (
4 | "os"
5 | "path/filepath"
6 | "time"
7 |
8 | logging "github.com/op/go-logging"
9 | )
10 |
11 | var log = logging.MustGetLogger("canary-agent")
12 |
13 | type Env struct {
14 | Env string
15 | Prod bool
16 | DryRun bool
17 | FailOnConflict bool
18 | Logo string
19 | BaseUrl string
20 | ConfFile string
21 | VarFile string
22 | LogFile string
23 | LogFileHandle *os.File
24 | HeartbeatDuration time.Duration
25 | SyncAllDuration time.Duration
26 | PollSleep time.Duration
27 | }
28 |
29 | var env = &Env{
30 | Prod: true,
31 | DryRun: false,
32 | FailOnConflict: false,
33 | Logo: PROD_LOGO,
34 | BaseUrl: PROD_URL,
35 | ConfFile: DEFAULT_CONF_FILE,
36 | VarFile: DEFAULT_VAR_FILE,
37 | LogFile: DEFAULT_LOG_FILE,
38 | HeartbeatDuration: DEFAULT_HEARTBEAT_DURATION,
39 | SyncAllDuration: DEFAULT_SYNC_ALL_DURATION,
40 | PollSleep: DEFAULT_POLL_SLEEP}
41 |
42 | func FetchEnv() *Env {
43 | return env
44 | }
45 |
46 | func FetchLog() *logging.Logger {
47 | return log
48 | }
49 |
50 | func InitEnv(envStr string) {
51 | env.Env = envStr
52 | if envStr == "test" || envStr == "debug" {
53 | env.Prod = false
54 | }
55 |
56 | // to be overriden by cli options
57 | if env.Prod == false {
58 | // ###### resolve path
59 | // filepath.Abs was resolving to a different folder
60 | // depending on whether it was run from main or a test
61 | DEV_CONF_PATH, _ = filepath.Abs("test/data")
62 |
63 | if _, err := os.Stat(DEV_CONF_PATH); err != nil {
64 | DEV_CONF_PATH, _ = filepath.Abs("../test/data")
65 | }
66 |
67 | DEV_CONF_FILE = filepath.Join(DEV_CONF_PATH, "agent.yml")
68 | OLD_DEV_CONF_FILE = filepath.Join(DEV_CONF_PATH, "old_toml_test.conf")
69 |
70 | DEV_VAR_FILE = filepath.Join(DEV_CONF_PATH, "server.yml")
71 | OLD_DEV_VAR_FILE = filepath.Join(DEV_CONF_PATH, "old_toml_server.conf")
72 |
73 | // set dev vals
74 |
75 | env.BaseUrl = DEV_URL
76 |
77 | env.Logo = DEV_LOGO
78 |
79 | env.ConfFile = DEV_CONF_FILE
80 |
81 | env.VarFile = DEV_VAR_FILE
82 |
83 | env.HeartbeatDuration = DEV_HEARTBEAT_DURATION
84 | env.SyncAllDuration = DEV_SYNC_ALL_DURATION
85 |
86 | env.PollSleep = DEV_POLL_SLEEP
87 |
88 | }
89 | }
90 |
91 | func InitLogging() {
92 | // TODO: SetLevel must come before SetBackend
93 | format := logging.MustStringFormatter("%{time} %{pid} %{shortfile}] %{message}")
94 | stdoutBackend := logging.NewBackendFormatter(logging.NewLogBackend(os.Stdout, "", 0), format)
95 | if env.Prod {
96 | logging.SetLevel(logging.INFO, "canary-agent")
97 |
98 | conf, err := NewConfFromEnv()
99 | if err != nil {
100 | log.Fatal(err)
101 | }
102 |
103 | var logPath string
104 | if conf.LogPath != "" {
105 | logPath = conf.LogPath
106 | } else {
107 | logPath = env.LogFile
108 | }
109 |
110 | env.LogFileHandle, err = os.OpenFile(logPath, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0666)
111 | if err != nil {
112 | log.Error("Can't open log file", err) //INCEPTION
113 | os.Exit(1)
114 | } else {
115 | fileBackend := logging.NewBackendFormatter(logging.NewLogBackend(env.LogFileHandle, "", 0), logging.GlogFormatter)
116 | logging.SetBackend(fileBackend, stdoutBackend)
117 | }
118 | } else {
119 | logging.SetLevel(logging.DEBUG, "canary-agent")
120 | logging.SetBackend(stdoutBackend)
121 | }
122 | }
123 |
124 | func ApiHeartbeatPath(ident string) string {
125 | return ApiPath(API_HEARTBEAT) + "/" + ident
126 | }
127 |
128 | func ApiServersPath() string {
129 | return ApiPath(API_SERVERS)
130 | }
131 |
132 | func ApiServerPath(ident string) string {
133 | return ApiServersPath() + "/" + ident
134 | }
135 |
136 | func ApiServerProcsPath(ident string) string {
137 | return ApiServerPath(ident) + "/processes"
138 | }
139 |
140 | func ApiPath(resource string) string {
141 | return env.BaseUrl + resource
142 | }
143 |
--------------------------------------------------------------------------------
/conf/toml.go:
--------------------------------------------------------------------------------
1 | package conf
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "os"
7 |
8 | "github.com/BurntSushi/toml"
9 | )
10 |
11 | func NewTomlConfFromEnv(confFile, varFile string) (*Conf, error) {
12 | conf := NewConf()
13 | log := FetchLog()
14 | env := FetchEnv()
15 |
16 | _, err := toml.DecodeFile(confFile, &conf)
17 | if err != nil {
18 | log.Error(err)
19 | return nil, errors.New(fmt.Sprintf("Can't seem to read %s. Does the file exist? Please consult https://appcanary.com/servers/new for more instructions.", env.ConfFile))
20 | }
21 |
22 | if len(conf.Watchers) == 0 {
23 | return nil, errors.New("No files to monitor! Please consult https://appcanary.com/servers/new for more instructions.")
24 | }
25 |
26 | if _, err := os.Stat(varFile); err == nil {
27 | _, err := toml.DecodeFile(varFile, &conf.ServerConf)
28 | if err != nil {
29 | return nil, err
30 | }
31 |
32 | log.Debug("Found and read TOML server configuration")
33 | }
34 |
35 | return conf, nil
36 | }
37 |
--------------------------------------------------------------------------------
/conf/yaml.go:
--------------------------------------------------------------------------------
1 | package conf
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "io/ioutil"
7 |
8 | yaml "gopkg.in/yaml.v2"
9 | )
10 |
11 | func save(fileName string, data []byte) {
12 | log := FetchLog()
13 |
14 | err := ioutil.WriteFile(fileName, data, 0644)
15 | if err != nil {
16 | log.Fatal(err)
17 | }
18 | }
19 |
20 | func saveServerConf(c *Conf, varFile string) {
21 | log := FetchLog()
22 |
23 | yml, err := yaml.Marshal(c.ServerConf)
24 | if err != nil {
25 | log.Fatal(err)
26 | }
27 |
28 | save(varFile, yml)
29 | log.Debug("Saved server info.")
30 | }
31 |
32 | func (c *Conf) Save() {
33 | saveServerConf(c, env.VarFile)
34 | }
35 |
36 | // Saves the whole structure in two files
37 | func (c *Conf) FullSave(confFile, varFile string) {
38 | log := FetchLog()
39 |
40 | yml, err := yaml.Marshal(c)
41 | if err != nil {
42 | log.Fatal(err)
43 | }
44 |
45 | save(confFile, yml)
46 | saveServerConf(c, varFile)
47 | log.Debug("Saved all the config files.")
48 | }
49 |
50 | func NewYamlConfFromEnv() (*Conf, error) {
51 | conf := NewConf()
52 | log := FetchLog()
53 | env := FetchEnv()
54 |
55 | // read file contents
56 | data, err := ioutil.ReadFile(env.ConfFile)
57 | if err != nil {
58 | log.Error(err)
59 | return nil, errors.New(fmt.Sprintf("Can't seem to read %s. Does the file exist? Please consult https://appcanary.com/servers/new for more instructions.", env.ConfFile))
60 | }
61 |
62 | // parse the YAML
63 | err = yaml.Unmarshal(data, conf)
64 | if err != nil {
65 | log.Error(err)
66 | return nil, errors.New(fmt.Sprintf("Can't seem to parse %s. Is this file valid YAML? Please consult https://appcanary.com/servers/new for more instructions.", env.ConfFile))
67 | }
68 |
69 | // bail if there's nothing configured
70 | if len(conf.Watchers) == 0 {
71 | return nil, errors.New("No watchers configured! Please consult https://appcanary.com/servers/new for more instructions.")
72 | }
73 |
74 | // load the server conf (probably) from /var/db if there is one
75 | tryLoadingVarFile(conf)
76 |
77 | return conf, nil
78 | }
79 |
80 | func tryLoadingVarFile(conf *Conf) {
81 | env := FetchEnv()
82 | log := FetchLog()
83 |
84 | if !fileExists(env.VarFile) {
85 | log.Debugf("%s does not exist", env.VarFile)
86 | return
87 | }
88 |
89 | data, err := ioutil.ReadFile(env.VarFile)
90 | if err != nil {
91 | log.Error(err)
92 | return
93 | }
94 |
95 | err = yaml.Unmarshal(data, &conf.ServerConf)
96 | if err != nil {
97 | log.Error(err)
98 | return
99 | }
100 |
101 | log.Debug("Found and read server configuration.")
102 | }
103 |
--------------------------------------------------------------------------------
/conf/yaml_test.go:
--------------------------------------------------------------------------------
1 | package conf
2 |
3 | import (
4 | "os"
5 | "testing"
6 |
7 | "github.com/stateio/testify/assert"
8 | )
9 |
10 | func TestYamlConf(t *testing.T) {
11 | assert := assert.New(t)
12 |
13 | env.ConfFile = "../test/data/test3.yml"
14 | env.VarFile = "../test/data/test_server3.yml"
15 | conf, err := NewYamlConfFromEnv()
16 | assert.Nil(err)
17 |
18 | assert.Equal("APIKEY", conf.ApiKey)
19 | assert.Equal("deployment1", conf.ServerName)
20 | assert.Equal("testDistro", conf.Distro)
21 | assert.Equal("testRelease", conf.Release)
22 |
23 | assert.Equal(3, len(conf.Watchers), "number of watchers")
24 |
25 | dpkg := conf.Watchers[0]
26 | assert.Equal("/var/lib/dpkg/available", dpkg.Path, "file path")
27 |
28 | gemfile := conf.Watchers[1]
29 | assert.Equal("/path/to/Gemfile.lock", gemfile.Path, "file path")
30 |
31 | cmd := conf.Watchers[2]
32 | assert.Equal("fakecmdhere", cmd.Command, "command path")
33 |
34 | assert.Equal("123456", conf.ServerConf.UUID)
35 | }
36 |
37 | func TestTomlYamlConversion(t *testing.T) {
38 | assert := assert.New(t)
39 | // need to populate DEV_CONF vars
40 | InitEnv("test")
41 |
42 | oldConfFile := OLD_DEV_CONF_FILE
43 | oldVarFile := OLD_DEV_VAR_FILE
44 |
45 | conf, err := NewTomlConfFromEnv(oldConfFile, oldVarFile)
46 | assert.Nil(err)
47 |
48 | // check a few bits
49 | assert.Equal("APIKEY", conf.ApiKey)
50 | assert.Equal("deployment1", conf.ServerName)
51 | assert.Equal("testDistro", conf.Distro)
52 | assert.Equal("testRelease", conf.Release)
53 |
54 | assert.Equal("123456", conf.ServerConf.UUID)
55 |
56 | // now save it all as something yaml
57 | newConfFile := "/tmp/newagentconf.yml"
58 | newVarFile := "/tmp/newserverconf.yml"
59 | conf.FullSave(newConfFile, newVarFile)
60 |
61 | if _, err := os.Stat(env.ConfFile); err != nil {
62 | assert.Error(err)
63 | }
64 |
65 | if _, err := os.Stat(env.VarFile); err != nil {
66 | assert.Error(err)
67 | }
68 |
69 | // let's see what's inside
70 | env.ConfFile = newConfFile
71 | env.VarFile = newVarFile
72 | conf, err = NewYamlConfFromEnv()
73 | assert.Nil(err)
74 |
75 | assert.Equal("APIKEY", conf.ApiKey)
76 | assert.Equal("deployment1", conf.ServerName)
77 | assert.Equal("testDistro", conf.Distro)
78 | assert.Equal("testRelease", conf.Release)
79 |
80 | assert.Equal(4, len(conf.Watchers), "number of watchers")
81 |
82 | dpkg := conf.Watchers[0]
83 | assert.Equal("/var/lib/dpkg/available", dpkg.Path, "file path")
84 |
85 | gemfile := conf.Watchers[1]
86 | assert.Equal("/path/to/Gemfile.lock", gemfile.Path, "file path")
87 |
88 | cmd := conf.Watchers[2]
89 | assert.Equal("fakecmdhere", cmd.Command, "command path")
90 |
91 | process := conf.Watchers[3]
92 | assert.Equal("*", process.Process, "inspect process")
93 |
94 | assert.Equal("123456", conf.ServerConf.UUID)
95 | }
96 |
--------------------------------------------------------------------------------
/dist/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | !.gitignore
--------------------------------------------------------------------------------
/examples/agent.conf:
--------------------------------------------------------------------------------
1 | server_name = "deployment1"
2 |
3 | api_key = "p28tdt94c6fu8l3hscq2gq16uef6fncrud4s6smkfh7qfk1sam7"
4 | #api_key = "APIKEY"
5 | #api_key = "17hbup347no9o9ri1cf7gdvudpk0trkfh3nn8un2id3kjj7568uf"
6 | log_level = "info"
7 | log_path = "./canary.log"
8 |
9 | [[files]]
10 | path = "/Users/maxim/c/projects/agent/test/data/dpkg/available"
11 |
12 | [[files]]
13 | path = "/Users/maxim/c/projects/gemcanary-nest/Gemfile.lock"
14 | #path = "/Users/phillmv/code/state/canary-agent/test/data/Gemfile.lock"
15 |
16 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | "os"
7 | "time"
8 |
9 | "github.com/appcanary/agent/agent"
10 | "github.com/appcanary/agent/agent/detect"
11 | "github.com/appcanary/agent/conf"
12 | )
13 |
14 | var CanaryVersion string
15 | var defaultFlags *flag.FlagSet
16 |
17 | type CommandToPerform int
18 |
19 | const (
20 | PerformAgentLoop CommandToPerform = iota
21 | PerformUpgrade
22 | PerformDisplayVersion
23 | PerformDetectOS
24 | PerformProcessInspection
25 | PerformProcessInspectionJsonDump
26 | )
27 |
28 | func usage() {
29 | fmt.Fprintf(os.Stderr, "Usage: appcanary [COMMAND] [OPTIONS]\nOptions:\n")
30 |
31 | defaultFlags.PrintDefaults()
32 | fmt.Fprintf(os.Stderr, "\nCommands:\n"+
33 | "\t[none]\t\t\tStart the agent\n"+
34 | "\tupgrade\t\t\tUpgrade system packages to nearest safe version (Ubuntu only)\n"+
35 | "\tinspect-processes\tSend your process library information to Appcanary\n"+
36 | "\tdetect-os\t\tDetect current operating system\n")
37 | }
38 |
39 | func parseFlags(argRange int, env *conf.Env) {
40 | var displayVersionFlagged bool
41 | // httptest, used in client.test, sets a usage flag
42 | // that leaks when you use the 'global' FlagSet.
43 | defaultFlags = flag.NewFlagSet("Default", flag.ExitOnError)
44 | defaultFlags.Usage = usage
45 | defaultFlags.StringVar(&env.ConfFile, "conf", env.ConfFile, "Set the config file")
46 | defaultFlags.StringVar(&env.VarFile, "server", env.VarFile, "Set the server file")
47 | defaultFlags.StringVar(&env.LogFile, "log", env.LogFile, "Set the log file (will not override if set in config file)")
48 | defaultFlags.BoolVar(&env.DryRun, "dry-run", false, "Only print, and do not execute, potentially destructive commands")
49 | // -version is handled in parseArguments, but is set here for the usage print out
50 | defaultFlags.BoolVar(&displayVersionFlagged, "version", false, "Display version information")
51 |
52 | defaultFlags.BoolVar(&env.FailOnConflict, "fail-on-conflict", false, "Should upgrade encounter a conflict with configuration files, abort (default: old configuration files are kept, or updated if not modified)")
53 |
54 | if !env.Prod {
55 | defaultFlags.StringVar(&env.BaseUrl, "url", env.BaseUrl, "Set the endpoint")
56 | }
57 |
58 | defaultFlags.Parse(os.Args[argRange:])
59 | }
60 |
61 | func parseArguments(env *conf.Env) CommandToPerform {
62 | var performCmd CommandToPerform
63 |
64 | if len(os.Args) < 2 {
65 | return PerformAgentLoop
66 | }
67 |
68 | // if first arg is a command,
69 | // flags will follow in os.Args[2:]
70 | // else in os.Args[1:]
71 | argRange := 2
72 | switch os.Args[1] {
73 | case "upgrade":
74 | performCmd = PerformUpgrade
75 | case "detect-os":
76 | performCmd = PerformDetectOS
77 | case "inspect-processes":
78 | performCmd = PerformProcessInspection
79 | case "inspect-processes-json":
80 | performCmd = PerformProcessInspectionJsonDump
81 | case "-version":
82 | performCmd = PerformDisplayVersion
83 | case "--version":
84 | performCmd = PerformDisplayVersion
85 | default:
86 | argRange = 1
87 | performCmd = PerformAgentLoop
88 | }
89 |
90 | parseFlags(argRange, env)
91 | return performCmd
92 | }
93 |
94 | func runDisplayVersion() {
95 | fmt.Println(CanaryVersion)
96 | os.Exit(0)
97 | }
98 |
99 | func runDetectOS() {
100 | guess, err := detect.DetectOS()
101 | if err == nil {
102 | fmt.Printf("%s/%s\n", guess.Distro, guess.Release)
103 | } else {
104 | fmt.Printf("%v\n", err.Error())
105 | }
106 | os.Exit(0)
107 | }
108 |
109 | func initialize(env *conf.Env) *agent.Agent {
110 | // let's get started eh
111 | // start the logger
112 | conf.InitLogging()
113 | log := conf.FetchLog()
114 |
115 | fmt.Println(env.Logo)
116 |
117 | // slurp env, instantiate agent
118 | config, err := conf.NewConfFromEnv()
119 | if err != nil {
120 | log.Fatal(err)
121 | }
122 |
123 | if config.ApiKey == "" {
124 | log.Fatal("There's no API key set. Get yours from https://appcanary.com/settings and set it in /etc/appcanary/agent.yml")
125 | }
126 |
127 | // If the config sets a startup delay, we wait to boot up here
128 | if config.StartupDelay != 0 {
129 | delay := time.Duration(config.StartupDelay) * time.Second
130 | tick := time.Tick(delay)
131 | <-tick
132 | }
133 |
134 | a := agent.NewAgent(CanaryVersion, config)
135 | a.DoneChannel = make(chan os.Signal, 1)
136 |
137 | // we prob can't reliably fingerprint servers.
138 | // so instead, we assign a uuid by registering
139 | if a.FirstRun() {
140 | log.Debug("Found no server config. Let's register!")
141 |
142 | for err := a.RegisterServer(); err != nil; {
143 | // we don't need to wait here because of the backoff
144 | // exponential decay library; by the time we hit this
145 | // point we've been trying for about, what, an hour?
146 | log.Infof("Register server error: %s", err)
147 | err = a.RegisterServer()
148 | }
149 |
150 | }
151 |
152 | // Now that we're registered,
153 | // let's init our watchers. We auto sync on watcher init.
154 | a.BuildAndSyncWatchers()
155 | return a
156 | }
157 |
158 | func runProcessInspection(a *agent.Agent) {
159 | log := conf.FetchLog()
160 | agent.ShipProcessMap(a)
161 | log.Info("Process inspection sent. Check https://appcanary.com")
162 | os.Exit(0)
163 | }
164 |
165 | func runProcessInspectionDump() {
166 | agent.DumpProcessMap()
167 | os.Exit(0)
168 | }
169 |
170 | func runUpgrade(a *agent.Agent) {
171 | log := conf.FetchLog()
172 | log.Info("Running upgrade...")
173 | a.PerformUpgrade()
174 | os.Exit(0)
175 | }
176 |
177 | func runAgentLoop(env *conf.Env, a *agent.Agent) {
178 | log := conf.FetchLog()
179 | // Add hooks to files, and push them over
180 | // whenever they change
181 | a.StartPolling()
182 |
183 | // send a heartbeat every ~60min, forever
184 | go func() {
185 | tick := time.Tick(env.HeartbeatDuration)
186 |
187 | for {
188 | err := a.Heartbeat()
189 | if err != nil {
190 | log.Infof("<3 error: %s", err)
191 | }
192 | <-tick
193 | }
194 | }()
195 |
196 | go func() {
197 | tick := time.Tick(env.SyncAllDuration)
198 |
199 | for {
200 | <-tick
201 | a.SyncAllFiles()
202 | }
203 | }()
204 |
205 | defer a.CloseWatches()
206 |
207 | // wait for the right signal?
208 | // signal.Notify(done, os.Interrupt, os.Kill)
209 |
210 | // block forever
211 | <-a.DoneChannel
212 | }
213 |
214 | func checkYourPrivilege() {
215 | if os.Getuid() != 0 && os.Geteuid() != 0 {
216 | fmt.Println("Cannot run unprivileged - must be root (UID=0)")
217 | os.Exit(13)
218 | }
219 | }
220 |
221 | func main() {
222 | conf.InitEnv(os.Getenv("CANARY_ENV"))
223 | env := conf.FetchEnv()
224 |
225 | // parse the args
226 | switch parseArguments(env) {
227 |
228 | case PerformDisplayVersion:
229 | runDisplayVersion()
230 |
231 | case PerformDetectOS:
232 | runDetectOS()
233 |
234 | case PerformProcessInspection:
235 | checkYourPrivilege()
236 | a := initialize(env)
237 | runProcessInspection(a)
238 |
239 | case PerformProcessInspectionJsonDump:
240 | checkYourPrivilege()
241 | conf.InitLogging()
242 | runProcessInspectionDump()
243 |
244 | case PerformUpgrade:
245 | a := initialize(env)
246 | runUpgrade(a)
247 |
248 | case PerformAgentLoop:
249 | a := initialize(env)
250 | runAgentLoop(env, a)
251 | }
252 |
253 | // Close the logfile when we exit
254 | if env.LogFileHandle != nil {
255 | defer env.LogFileHandle.Close()
256 | }
257 |
258 | }
259 |
--------------------------------------------------------------------------------
/package/packager.rb:
--------------------------------------------------------------------------------
1 | # path is relative to root agent folder
2 | load "package/packager_models.rb"
3 | load "package/packager_mgmt.rb"
4 | load "package/packager_recipes.rb"
5 |
--------------------------------------------------------------------------------
/package/packager_mgmt.rb:
--------------------------------------------------------------------------------
1 | class Packager
2 | CONFIG_FILES = {"config/etc/appcanary/agent.yml" => "/etc/appcanary/agent.yml.sample",
3 | "config/var/db/appcanary/server.yml" => "/var/db/appcanary/server.yml.sample"}
4 | DIRECTORIES = ["/etc/appcanary/", "/var/db/appcanary/"]
5 | ARCHS = ["amd64", "i386"]
6 |
7 | class << self
8 | attr_accessor :distro, :releases, :package_type, :version, :skip_docker
9 | end
10 |
11 | attr_accessor :distro, :releases, :package_type, :version, :skip_docker
12 | def initialize(version)
13 | self.version = version
14 |
15 | self.distro = self.class.distro
16 | self.releases = self.class.releases
17 | self.package_type = self.class.package_type
18 | self.skip_docker = self.class.skip_docker
19 | end
20 |
21 | def build_packages
22 | releases.map do |rel, service|
23 | ARCHS.map do |arch|
24 | pre_pkg = PrePackage.new(distro, rel, service, package_type, arch, version, self.class::CONFIG_FILES, DIRECTORIES, skip_docker)
25 | PackageBuilder.new(pre_pkg).build!
26 | end
27 | end.flatten
28 | end
29 | end
30 |
31 | class PackageBuilder
32 | VENDOR = "Appcanary"
33 | NAME = "appcanary"
34 | LICENSE = "GPLv3"
35 |
36 | attr_accessor :package
37 | def initialize(package)
38 | self.package = package
39 | end
40 |
41 | def execute(str)
42 | puts str
43 | system str
44 | puts "\n"
45 | end
46 |
47 | def build!
48 | p = self.package
49 | build_cmd = [
50 | "bundle exec fpm -f", # force
51 | "-s dir", # input type
52 | "-t #{p.package_type}", # output type
53 | "-n #{NAME}", # package name
54 | "-p #{p.release_path}", # where to output
55 | "-v #{p.version}",
56 | "-a #{p.arch}",
57 | "--rpm-os linux", # target OS, ignored if not rpm
58 | "-C #{p.files_path}", # use this directory to look for files
59 | "#{p.dir_args}", # use the following directories when building
60 | "#{p.post_install_files}", # after install, use this file
61 | "--license #{LICENSE} --vendor #{VENDOR}",
62 | "./ #{p.bin_file}", # where should the binary be copied to?
63 | "#{p.config_files_path}" # find the config files to install
64 | ].join(" ")
65 |
66 | execute build_cmd
67 | Package.new(p.distro, p.release, p.version, p.release_path, p.skip_docker)
68 | end
69 | end
70 |
71 | class PackagePublisher
72 | attr_accessor :user, :repo
73 | def initialize(user, repo)
74 | self.user = user
75 | self.repo = repo
76 | end
77 |
78 | def execute(str)
79 | puts str
80 | system str
81 | puts "\n"
82 | end
83 |
84 | def publish!(pkg)
85 | execute %{bundle exec package_cloud push #{user}/#{repo}/#{pc_distro_name(pkg.distro)}/#{pkg.release} #{pkg.path}}
86 | end
87 |
88 | def pc_distro_name(dname)
89 | case dname
90 | when "centos"
91 | "el"
92 | when "amazon"
93 | "el"
94 | else
95 | dname
96 | end
97 | end
98 | end
99 |
--------------------------------------------------------------------------------
/package/packager_models.rb:
--------------------------------------------------------------------------------
1 | class Package
2 | attr_accessor :distro, :release, :version, :path, :skip_docker
3 | def initialize(distro, release, version, path, skip_docker)
4 | self.distro = distro
5 | self.release = release
6 | self.version = version
7 | self.path = path
8 | self.skip_docker = skip_docker
9 | end
10 | end
11 |
12 | class PrePackage
13 | SERVICES = [:upstart, :systemd, :systemv]
14 | attr_accessor :distro, :release, :service, :package_type, :arch, :version, :config_files, :directories, :skip_docker
15 | def initialize(distro, release, service, package_type, arch, version, config_files, directories, skip_docker)
16 | self.distro = distro
17 | self.release = release
18 | self.service = service
19 | if !SERVICES.include? service
20 | raise "Service must be one of #{SERVICES}"
21 | end
22 | self.package_type = package_type
23 | self.arch = arch
24 | self.version = version
25 | self.config_files = config_files
26 | self.directories = directories
27 | self.skip_docker = skip_docker
28 | end
29 |
30 | def dir_args
31 | directories.map { |f| "--directories #{f}"}.join(" ")
32 | end
33 |
34 | def config_files_path
35 | config_files.map {|k, v| "../../../#{k}=#{v}" }.join(" ")
36 | end
37 |
38 | def full_distro_name
39 | "#{distro}_#{release}"
40 | end
41 |
42 | def release_path
43 | "releases/appcanary_#{version}_#{arch}_#{full_distro_name}.#{package_type}"
44 | end
45 |
46 | # also, remember to document things. why is this
47 | # four layers deep?
48 | def bin_path
49 | "../../../../dist/#{version}/linux_#{arch}/appcanary"
50 | end
51 |
52 | def bin_file
53 | "#{bin_path}=/usr/sbin/appcanary"
54 | end
55 |
56 |
57 | def post_install_files
58 | "--after-install ./#{package_dir}/post-install.sh"
59 | end
60 |
61 | def after_remove_files
62 | "--after-remove ./#{package_dir}/post-remove.sh"
63 | end
64 |
65 | def after_upgrade_files
66 | "--after-upgrade ./#{package_dir}/post-upgrade.sh"
67 | end
68 |
69 | def files_path
70 | "#{package_dir}/files"
71 | end
72 |
73 | def package_dir
74 | "package_files/#{package_type}/#{service}/"
75 | end
76 | end
77 |
--------------------------------------------------------------------------------
/package/packager_recipes.rb:
--------------------------------------------------------------------------------
1 | class UbuntuRecipe < Packager
2 | self.distro = "ubuntu"
3 | self.releases = {"precise" => :upstart,
4 | "trusty" => :upstart,
5 | "utopic" => :upstart,
6 | "vivid" => :systemd,
7 | "wily" => :systemd,
8 | "xenial" => :systemd,
9 | "yakkety" => :systemd,
10 | "zesty" => :systemd}
11 | self.package_type = "deb"
12 | CONFIG_FILES = {"config/etc/appcanary/dpkg.agent.yml" => "/etc/appcanary/agent.yml.sample",
13 | "config/var/db/appcanary/server.yml" => "/var/db/appcanary/server.yml.sample"}
14 | end
15 |
16 | class CentosRecipe < Packager
17 | self.distro = "centos"
18 | self.releases = {"5" => :systemv,
19 | "6" => :systemv,
20 | "7" => :systemd}
21 |
22 | CONFIG_FILES = {"config/etc/appcanary/rpm.agent.yml" => "/etc/appcanary/agent.yml.sample",
23 | "config/var/db/appcanary/server.yml" => "/var/db/appcanary/server.yml.sample"}
24 | self.package_type = "rpm"
25 | end
26 |
27 |
28 | class DebianRecipe < Packager
29 | self.distro = "debian"
30 | self.releases = {"jessie" => :systemd,
31 | "wheezy" => :systemv,
32 | "squeeze" => :systemv}
33 | self.package_type = "deb"
34 | CONFIG_FILES = {"config/etc/appcanary/dpkg.agent.yml" => "/etc/appcanary/agent.yml.sample",
35 | "config/var/db/appcanary/server.yml" => "/var/db/appcanary/server.yml.sample"}
36 | end
37 |
38 | class MintRecipe < Packager
39 | self.distro = "linuxmint"
40 | self.releases = {"rosa" => :upstart,
41 | "rafaela" => :upstart,
42 | "rebecca" => :upstart,
43 | "qiana" => :upstart}
44 | self.package_type = "deb"
45 | self.skip_docker = true
46 | end
47 |
48 | class FedoraRecipe < Packager
49 | self.distro = "fedora"
50 | self.releases = {"23" => :systemd,
51 | "24" => :systemd}
52 | self.package_type = "rpm"
53 | self.skip_docker = true
54 | end
55 |
--------------------------------------------------------------------------------
/package/prune.rb:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ruby
2 | # usage: ./package/prune user/repo
3 | #
4 | require 'json'
5 | require 'pp'
6 | require 'rest-client'
7 | require 'time'
8 |
9 | API_TOKEN = JSON.parse(`cat ~/.packagecloud`)["token"]
10 | USER_REPO = ARGV[0]
11 |
12 | @base_url = "https://#{API_TOKEN}:@packagecloud.io/api/v1/repos/#{USER_REPO}"
13 |
14 | package_url = "/packages.json"
15 |
16 | url = @base_url + package_url
17 |
18 | all_pkg = RestClient.get(url);
19 | pkgs = JSON.parse(all_pkg)
20 |
21 | # prune everything
22 | # pkgs.each do |p|
23 | # distro_version = p["distro_version"]
24 | # filename = p["filename"]
25 | # yank_url = "/#{distro_version}/#{filename}"
26 | # url = base_url + yank_url
27 | # puts "yanking #{url}"
28 | #
29 | # result = RestClient.delete(url)
30 | # if result == {}
31 | # puts "successfully yanked #{filename}!"
32 | # end
33 | # end
34 |
35 |
36 | def yank_url(p)
37 | distro_version = p["distro_version"]
38 | filename = p["filename"]
39 | yank_url = "/#{distro_version}/#{filename}"
40 | @base_url + yank_url
41 | end
42 |
43 | pkgs.select { |p| p["version"] < "0.0.2" }.map { |p| RestClient.delete yank_url(p) }
44 |
--------------------------------------------------------------------------------
/package_files/config/etc/appcanary/agent.yml:
--------------------------------------------------------------------------------
1 | # Get your api key at https://www.appcanary.com/settings
2 | api_key: ""
3 |
4 | # Name your server (optional)
5 | #server_name: ""
6 |
7 | # add your gemfiles by uncommenting these lines:
8 |
9 | #watchers:
10 | #- path: "/var/www/someapp/current/Gemfile.lock"
11 |
12 |
--------------------------------------------------------------------------------
/package_files/config/etc/appcanary/dpkg.agent.yml:
--------------------------------------------------------------------------------
1 | # Get your api key at https://www.appcanary.com/settings
2 | api_key: ""
3 |
4 | # Name your server (optional)
5 | #server_name: ""
6 |
7 | # ubuntu packages
8 | # add a gemfile by uncommenting the bottom line:
9 | watchers:
10 | - path: "/var/lib/dpkg/status"
11 | # - path: "/var/www/someapp/current/Gemfile.lock"
12 |
--------------------------------------------------------------------------------
/package_files/config/etc/appcanary/rpm.agent.yml:
--------------------------------------------------------------------------------
1 | # Get your api key at https://www.appcanary.com/settings
2 | api_key: ""
3 |
4 | # Name your server (optional)
5 | #server_name: ""
6 |
7 | # centos packages
8 | # add a gemfile by uncommenting the bottom line:
9 | watchers:
10 | - command: "rpm -qa"
11 | # - path: "/var/www/someapp/current/Gemfile.lock"
12 |
13 |
--------------------------------------------------------------------------------
/package_files/config/var/db/appcanary/server.yml:
--------------------------------------------------------------------------------
1 | #left empty
--------------------------------------------------------------------------------
/package_files/deb/systemd/files/etc/logrotate.d/appcanary:
--------------------------------------------------------------------------------
1 | /var/log/appcanary.log
2 | {
3 | rotate 4
4 | weekly
5 | missingok
6 | compress
7 | notifempty
8 | create 0644 appcanary appcanary
9 | postrotate
10 | systemctl restart appcanary >/dev/null 2>&1 || true
11 | endscript
12 | }
13 |
--------------------------------------------------------------------------------
/package_files/deb/systemd/files/etc/systemd/system/appcanary.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=appcanary agent
3 | After=network.target
4 |
5 | [Service]
6 | Type=simple
7 | ExecStart=/usr/sbin/appcanary
8 |
9 | [Install]
10 | WantedBy=multi-user.target
11 |
--------------------------------------------------------------------------------
/package_files/deb/systemd/post-install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | USER="appcanary"
4 |
5 | systemctl enable appcanary
6 | if ! id -u $USER > /dev/null 2>&1; then
7 | useradd -r -d /var/db/appcanary -s /sbin/nologin -c "AppCanary Agent" $USER
8 | fi
9 | touch /var/log/appcanary.log
10 | chown ${USER}:${USER} /var/log/appcanary.log
11 |
--------------------------------------------------------------------------------
/package_files/deb/systemd/post-remove.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | userdel appcanary
3 |
--------------------------------------------------------------------------------
/package_files/deb/systemd/post-upgrade.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | systemctl restart appcanary
3 |
--------------------------------------------------------------------------------
/package_files/deb/systemv/files/etc/default/appcanary:
--------------------------------------------------------------------------------
1 | # defaults for appcanary
2 | # sourced by /etc/init.d/appcanary
3 |
4 | # Change to 0 to disable daemon
5 | ENABLE_DAEMON=1
6 |
--------------------------------------------------------------------------------
/package_files/deb/systemv/files/etc/init.d/appcanary:
--------------------------------------------------------------------------------
1 | #! /bin/sh
2 | ### BEGIN INIT INFO
3 | # Provides: appcanary
4 | # Required-Start: $local_fs $remote_fs $syslog $network
5 | # Required-Stop: $local_fs $remote_fs $syslog $network
6 | # Default-Start: 2 3 4 5
7 | # Default-Stop: 0 1 6
8 | # Short-Description: Start or stop the appcanary
9 | # Description: Enable service provided by the appcanary
10 | ### END INIT INFO
11 |
12 | # Author: Michael MacLeod
13 |
14 | # PATH should only include /usr/* if it runs after the mountnfs.sh script
15 | PATH=/sbin:/usr/sbin:/bin:/usr/bin
16 | DESC="appCanary agent"
17 | NAME=appcanary
18 | DAEMON=/usr/sbin/$NAME
19 | DAEMON_ARGS=""
20 | PIDFILE=/var/run/$NAME.pid
21 | SCRIPTNAME=/etc/init.d/$NAME
22 |
23 | # Exit if the package is not installed
24 | [ -x "$DAEMON" ] || exit 0
25 |
26 | # Read configuration variable file if it is present
27 | [ -r /etc/default/$NAME ] && . /etc/default/$NAME
28 |
29 | # Load the VERBOSE setting and other rcS variables
30 | . /lib/init/vars.sh
31 |
32 | # Define LSB log_* functions.
33 | # Depend on lsb-base (>= 3.2-14) to ensure that this file is present
34 | # and status_of_proc is working.
35 | . /lib/lsb/init-functions
36 |
37 | #
38 | # Function that starts the daemon/service
39 | #
40 | do_start()
41 | {
42 | # Return
43 | # 0 if daemon has been started
44 | # 1 if daemon was already running
45 | # 2 if daemon could not be started
46 | start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --background --test > /dev/null \
47 | || return 1
48 | start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --background -- \
49 | $DAEMON_ARGS \
50 | || return 2
51 | # Add code here, if necessary, that waits for the process to be ready
52 | # to handle requests from services started subsequently which depend
53 | # on this one. As a last resort, sleep for some time.
54 | }
55 |
56 | #
57 | # Function that stops the daemon/service
58 | #
59 | do_stop()
60 | {
61 | # Return
62 | # 0 if daemon has been stopped
63 | # 1 if daemon was already stopped
64 | # 2 if daemon could not be stopped
65 | # other if a failure occurred
66 | start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
67 | RETVAL="$?"
68 | [ "$RETVAL" = 2 ] && return 2
69 | # Wait for children to finish too if this is a daemon that forks
70 | # and if the daemon is only ever run from this initscript.
71 | # If the above conditions are not satisfied then add some other code
72 | # that waits for the process to drop all resources that could be
73 | # needed by services started subsequently. A last resort is to
74 | # sleep for some time.
75 | start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
76 | [ "$?" = 2 ] && return 2
77 | # Many daemons don't delete their pidfiles when they exit.
78 | rm -f $PIDFILE
79 | return "$RETVAL"
80 | }
81 |
82 | #
83 | # Function that sends a SIGHUP to the daemon/service
84 | #
85 | do_reload() {
86 | #
87 | # If the daemon can reload its configuration without
88 | # restarting (for example, when it is sent a SIGHUP),
89 | # then implement that here.
90 | #
91 | start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME
92 | return 0
93 | }
94 |
95 | case "$1" in
96 | start)
97 | [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
98 | do_start
99 | case "$?" in
100 | 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
101 | 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
102 | esac
103 | ;;
104 | stop)
105 | [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
106 | do_stop
107 | case "$?" in
108 | 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
109 | 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
110 | esac
111 | ;;
112 | status)
113 | status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
114 | ;;
115 | #reload|force-reload)
116 | #
117 | # If do_reload() is not implemented then leave this commented out
118 | # and leave 'force-reload' as an alias for 'restart'.
119 | #
120 | #log_daemon_msg "Reloading $DESC" "$NAME"
121 | #do_reload
122 | #log_end_msg $?
123 | #;;
124 | restart|force-reload)
125 | #
126 | # If the "reload" option is implemented then remove the
127 | # 'force-reload' alias
128 | #
129 | log_daemon_msg "Restarting $DESC" "$NAME"
130 | do_stop
131 | case "$?" in
132 | 0|1)
133 | do_start
134 | case "$?" in
135 | 0) log_end_msg 0 ;;
136 | 1) log_end_msg 1 ;; # Old process is still running
137 | *) log_end_msg 1 ;; # Failed to start
138 | esac
139 | ;;
140 | *)
141 | # Failed to stop
142 | log_end_msg 1
143 | ;;
144 | esac
145 | ;;
146 | *)
147 | #echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2
148 | echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
149 | exit 3
150 | ;;
151 | esac
152 |
153 | :
154 |
--------------------------------------------------------------------------------
/package_files/deb/systemv/files/etc/logrotate.d/appcanary:
--------------------------------------------------------------------------------
1 | /var/log/appcanary.log
2 | {
3 | rotate 4
4 | weekly
5 | missingok
6 | compress
7 | notifempty
8 | create 0644 appcanary appcanary
9 | postrotate
10 | service appcanary restart >/dev/null 2>&1 || true
11 | endscript
12 | }
13 |
--------------------------------------------------------------------------------
/package_files/deb/systemv/post-install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | USER="appcanary"
4 |
5 | update-rc.d appcanary defaults
6 | if ! id -u $USER > /dev/null 2>&1; then
7 | useradd -r -d /var/db/appcanary -s /sbin/nologin -c "AppCanary Agent" $USER
8 | fi
9 | touch /var/log/appcanary.log
10 | chown ${USER}:${USER} /var/log/appcanary.log
11 |
--------------------------------------------------------------------------------
/package_files/deb/systemv/post-remove.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | userdel appcanary
3 |
--------------------------------------------------------------------------------
/package_files/deb/systemv/post-upgrade.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | service appcanary restart
3 |
--------------------------------------------------------------------------------
/package_files/deb/upstart/files/etc/init/appcanary.conf:
--------------------------------------------------------------------------------
1 | start on filesystem
2 | stop on runlevel [06]
3 |
4 | respawn
5 | respawn limit 10 5
6 | console log
7 |
8 | script
9 | exec appcanary
10 | end script
11 |
--------------------------------------------------------------------------------
/package_files/deb/upstart/files/etc/logrotate.d/appcanary:
--------------------------------------------------------------------------------
1 | /var/log/appcanary.log
2 | {
3 | rotate 4
4 | weekly
5 | missingok
6 | compress
7 | notifempty
8 | create 0644 appcanary appcanary
9 | postrotate
10 | service appcanary restart >/dev/null 2>&1 || true
11 | endscript
12 | }
13 |
--------------------------------------------------------------------------------
/package_files/deb/upstart/post-install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | USER="appcanary"
4 |
5 | if ! id -u $USER > /dev/null 2>&1; then
6 | useradd -r -d /var/db/appcanary -s /sbin/nologin -c "AppCanary Agent" $USER
7 | fi
8 | touch /var/log/appcanary.log
9 | chown ${USER}:${USER} /var/log/appcanary.log
10 |
--------------------------------------------------------------------------------
/package_files/deb/upstart/post-remove.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | userdel appcanary
3 |
--------------------------------------------------------------------------------
/package_files/deb/upstart/post-upgrade.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | service appcanary restart
3 |
--------------------------------------------------------------------------------
/package_files/rpm/systemd/files/etc/logrotate.d/appcanary:
--------------------------------------------------------------------------------
1 | /var/log/appcanary.log
2 | {
3 | rotate 4
4 | weekly
5 | missingok
6 | compress
7 | notifempty
8 | create 0644 appcanary appcanary
9 | postrotate
10 | systemctl restart appcanary >/dev/null 2>&1 || true
11 | endscript
12 | }
13 |
--------------------------------------------------------------------------------
/package_files/rpm/systemd/files/etc/systemd/system/appcanary.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=appcanary agent
3 | After=network.target
4 |
5 | [Service]
6 | Type=simple
7 | ExecStart=/usr/sbin/appcanary
8 |
9 | [Install]
10 | WantedBy=multi-user.target
11 |
--------------------------------------------------------------------------------
/package_files/rpm/systemd/post-install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | USER="appcanary"
4 |
5 | systemctl enable appcanary
6 | if ! id -u $USER > /dev/null 2>&1; then
7 | useradd -r -d /var/db/appcanary -s /sbin/nologin -c "AppCanary Agent" $USER
8 | fi
9 | touch /var/log/appcanary.log
10 | chown ${USER}:${USER} /var/log/appcanary.log
11 |
--------------------------------------------------------------------------------
/package_files/rpm/systemd/post-remove.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | userdel appcanary
3 |
--------------------------------------------------------------------------------
/package_files/rpm/systemd/post-upgrade.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | systemctl restart appcanary
3 |
--------------------------------------------------------------------------------
/package_files/rpm/systemv/files/etc/logrotate.d/appcanary:
--------------------------------------------------------------------------------
1 | /var/log/appcanary.log
2 | {
3 | rotate 4
4 | weekly
5 | missingok
6 | compress
7 | notifempty
8 | create 0644 appcanary appcanary
9 | postrotate
10 | service appcanary restart >/dev/null 2>&1 || true
11 | endscript
12 | }
13 |
--------------------------------------------------------------------------------
/package_files/rpm/systemv/files/etc/rc.d/init.d/appcanary:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # appcanary Startup script for appcanary.
4 | #
5 | # chkconfig: 2345 12 88
6 | # description: The appcanary
7 | ### BEGIN INIT INFO
8 | # Provides: $appcanary
9 | # Required-Start: $local_fs
10 | # Required-Stop: $local_fs
11 | # Default-Start: 2 3 4 5
12 | # Default-Stop: 0 1 6
13 | # Short-Description: appcanary
14 | # Description: appcanary agent
15 | ### END INIT INFO
16 |
17 | # Source function library.
18 | . /etc/init.d/functions
19 |
20 | RETVAL=0
21 |
22 | proc=appcanary
23 | user=root
24 | exec=/usr/sbin/appcanary
25 | piddir=/var/run/appcanary
26 | pidfile=${piddir}/appcanary.pid
27 | lockfile=/var/lock/subsys/appcanary
28 |
29 | start() {
30 | [ -x $exec ] || exit 5
31 |
32 | mkdir -p /var/db/appcanary ${piddir}
33 | chown -R $user /var/db/appcanary $piddir
34 |
35 | echo -n $"Starting appcanary: "
36 | daemon --check $proc \
37 | --user=$user \
38 | --pidfile=$pidfile \
39 | " { nohup $exec < /dev/null > /dev/null 2>&1 & } ; echo \$! >| $pidfile "
40 | RETVAL=$?
41 | if [ $RETVAL = 0 ]; then
42 | touch ${lockfile}
43 | echo_success
44 | else
45 | echo_failure
46 | fi
47 | echo
48 | return $RETVAL
49 | }
50 |
51 | stop() {
52 | echo -n $"Shutting down appcanary: "
53 | killproc -p $pidfile $exec
54 | RETVAL=$?
55 | if [ $RETVAL = 0 ]; then
56 | rm -f ${lockfile} ${pidfile}
57 | echo_success
58 | else
59 | echo_failure
60 | fi
61 | echo
62 | return $RETVAL
63 | }
64 |
65 | case "$1" in
66 | start)
67 | start
68 | ;;
69 | stop)
70 | stop
71 | ;;
72 | status)
73 | status $proc
74 | ;;
75 | restart)
76 | stop
77 | start
78 | ;;
79 | reload)
80 | exit 3
81 | ;;
82 | condrestart)
83 | rhstatus >/dev/null 2>&1 || exit 0
84 | restart
85 | ;;
86 | *)
87 | echo $"Usage: $0 {start|stop|status|reload|restart}"
88 | exit 1
89 | ;;
90 | esac
91 | exit $?
92 |
--------------------------------------------------------------------------------
/package_files/rpm/systemv/post-install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | USER="appcanary"
4 |
5 | chkconfig appcanary on
6 | if ! id -u $USER > /dev/null 2>&1; then
7 | useradd -r -d /var/db/appcanary -s /sbin/nologin -c "AppCanary Agent" $USER
8 | fi
9 | touch /var/log/appcanary.log
10 | chown ${USER}:${USER} /var/log/appcanary.log
11 |
--------------------------------------------------------------------------------
/package_files/rpm/systemv/post-remove.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | userdel appcanary
3 |
--------------------------------------------------------------------------------
/package_files/rpm/systemv/post-upgrade.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | service appcanary restart
3 |
--------------------------------------------------------------------------------
/readme.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/appcanary/agent/48db7f45c6b2ed2d8d1dede8751e92d61756d984/readme.gif
--------------------------------------------------------------------------------
/releases/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | !.gitignore
--------------------------------------------------------------------------------
/test/data/.gitignore:
--------------------------------------------------------------------------------
1 | test.yml
2 | test_server.yml
3 | tmptest.yml
4 | tmptest_server.yml
--------------------------------------------------------------------------------
/test/data/Gemfile.lock:
--------------------------------------------------------------------------------
1 | GEM
2 | remote: https://rubygems.org/
3 | specs:
4 | actionmailer (4.1.7)
5 | actionpack (= 4.1.7)
6 | actionview (= 4.1.7)
7 | mail (~> 2.5, >= 2.5.4)
8 | actionpack (4.1.7)
9 | actionview (= 4.1.7)
10 | activesupport (= 4.1.7)
11 | rack (~> 1.5.2)
12 | rack-test (~> 0.6.2)
13 | actionview (4.1.7)
14 | activesupport (= 4.1.7)
15 | builder (~> 3.1)
16 | erubis (~> 2.7.0)
17 | activemodel (4.1.7)
18 | activesupport (= 4.1.7)
19 | builder (~> 3.1)
20 | activerecord (4.1.7)
21 | activemodel (= 4.1.7)
22 | activesupport (= 4.1.7)
23 | arel (~> 5.0.0)
24 | activesupport (4.1.7)
25 | i18n (~> 0.6, >= 0.6.9)
26 | json (~> 1.7, >= 1.7.7)
27 | minitest (~> 5.1)
28 | thread_safe (~> 0.1)
29 | tzinfo (~> 1.1)
30 | arel (5.0.1.20140414130214)
31 | builder (3.2.2)
32 | erubis (2.7.0)
33 | hike (1.2.3)
34 | i18n (0.6.11)
35 | json (1.8.1)
36 | mail (2.6.3)
37 | mime-types (>= 1.16, < 3)
38 | mime-types (2.4.3)
39 | minitest (5.4.3)
40 | multi_json (1.10.1)
41 | rack (1.5.2)
42 | rack-test (0.6.2)
43 | rack (>= 1.0)
44 | rails (4.1.7)
45 | actionmailer (= 4.1.7)
46 | actionpack (= 4.1.7)
47 | actionview (= 4.1.7)
48 | activemodel (= 4.1.7)
49 | activerecord (= 4.1.7)
50 | activesupport (= 4.1.7)
51 | bundler (>= 1.3.0, < 2.0)
52 | railties (= 4.1.7)
53 | sprockets-rails (~> 2.0)
54 | railties (4.1.7)
55 | actionpack (= 4.1.7)
56 | activesupport (= 4.1.7)
57 | rake (>= 0.8.7)
58 | thor (>= 0.18.1, < 2.0)
59 | rake (10.3.2)
60 | sprockets (2.12.3)
61 | hike (~> 1.2)
62 | multi_json (~> 1.0)
63 | rack (~> 1.0)
64 | tilt (~> 1.1, != 1.3.0)
65 | sprockets-rails (2.2.0)
66 | actionpack (>= 3.0)
67 | activesupport (>= 3.0)
68 | sprockets (>= 2.8, < 4.0)
69 | thor (0.19.1)
70 | thread_safe (0.3.4)
71 | tilt (1.4.1)
72 | tzinfo (1.2.2)
73 | thread_safe (~> 0.1)
74 |
75 | PLATFORMS
76 | ruby
77 |
78 | DEPENDENCIES
79 | rails
80 |
--------------------------------------------------------------------------------
/test/data/agent.yml:
--------------------------------------------------------------------------------
1 | server_name: "deployment1"
2 |
3 | api_key: "APIKEY"
4 | log_level: "info"
5 | distro: "testDistro"
6 | release: "testRelease"
7 | tags:
8 | - dogs
9 | - webserver
10 |
11 | watchers:
12 | - path: "/var/lib/dpkg/available"
13 | - path: "/path/to/Gemfile.lock"
14 | - command: "fakecmdhere"
15 | - process: "*"
16 |
--------------------------------------------------------------------------------
/test/data/lsb-release:
--------------------------------------------------------------------------------
1 | DISTRIB_ID=Ubuntu
2 | DISTRIB_RELEASE=14.04
3 | DISTRIB_CODENAME=trusty
4 | DISTRIB_DESCRIPTION="Ubuntu 14.04.2 LTS"
5 |
--------------------------------------------------------------------------------
/test/data/old_toml_server.conf:
--------------------------------------------------------------------------------
1 | uuid = "123456"
2 |
--------------------------------------------------------------------------------
/test/data/old_toml_test.conf:
--------------------------------------------------------------------------------
1 | server_name = "deployment1"
2 |
3 | api_key = "APIKEY"
4 | log_level = "info"
5 | distro = "testDistro"
6 | release = "testRelease"
7 |
8 | [[files]]
9 | path = "/var/lib/dpkg/available"
10 |
11 | [[files]]
12 | path = "/path/to/Gemfile.lock"
13 |
14 | [[files]]
15 | process = "fakecmdhere"
16 |
17 | [[files]]
18 | inspect_process = "*"
--------------------------------------------------------------------------------
/test/data/os-release:
--------------------------------------------------------------------------------
1 | NAME="CentOS Linux"
2 | VERSION="7 (Core)"
3 | ID="centos"
4 | ID_LIKE="rhel fedora"
5 | VERSION_ID="7"
6 | PRETTY_NAME="CentOS Linux 7 (Core)"
7 | ANSI_COLOR="0;31"
8 | CPE_NAME="cpe:/o:centos:centos:7"
9 | HOME_URL="https://www.centos.org/"
10 | BUG_REPORT_URL="https://bugs.centos.org/"
11 |
12 | CENTOS_MANTISBT_PROJECT="CentOS-7"
13 | CENTOS_MANTISBT_PROJECT_VERSION="7"
14 | REDHAT_SUPPORT_PRODUCT="centos"
15 | REDHAT_SUPPORT_PRODUCT_VERSION="7"
16 |
--------------------------------------------------------------------------------
/test/data/pointless:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | while :
4 | do
5 | echo keeping on keeping on
6 | sleep 1
7 | done
8 |
--------------------------------------------------------------------------------
/test/data/redhat-release:
--------------------------------------------------------------------------------
1 | CentOS release 6.7 (Final)
2 |
--------------------------------------------------------------------------------
/test/data/server.yml:
--------------------------------------------------------------------------------
1 | uuid: "123456"
2 |
--------------------------------------------------------------------------------
/test/data/spector.conf:
--------------------------------------------------------------------------------
1 | server_name = "spectortest"
2 |
3 | api_key = "APIKEY"
4 | distro = "ubuntu"
5 | release = "16.04"
6 |
7 | [[files]]
8 | match = "dhclient"
--------------------------------------------------------------------------------
/test/data/test2.conf:
--------------------------------------------------------------------------------
1 | server_name = "deployment1"
2 |
3 | #api_key = "ths1vfqrfvm5otqau35blqofo9hrb9ik7i1qiggeicnfrehtjao"
4 | #api_key = "1bb6dmsosdo5u7gjojinaaok8l9pef5o65kkqjg3mm3qn9dasfjs"
5 | #api_key = "p3lgmr93dmkd3q4dj5m5br837aikd72g4cmrv81hod2iik4oh54"
6 | distro="ubuntu"
7 | release="14.04"
8 |
9 | [[files]]
10 | path = "/Users/phillmv/code/c/agent/test/data/Gemfile.lock"
11 |
12 | [[files]]
13 | path = "/Users/phillmv/code/c/agent/test/data/dpkg/status"
14 | #process = "tar -h"
15 |
--------------------------------------------------------------------------------
/test/data/test3.yml:
--------------------------------------------------------------------------------
1 | server_name: deployment1
2 |
3 | api_key: APIKEY
4 | log_level: info
5 |
6 | distro: testDistro
7 | release: testRelease
8 |
9 | watchers:
10 | - path: /var/lib/dpkg/available
11 | - path: /path/to/Gemfile.lock
12 | - command: fakecmdhere
13 |
--------------------------------------------------------------------------------
/test/data/test_server3.yml:
--------------------------------------------------------------------------------
1 | uuid: "123456"
2 |
--------------------------------------------------------------------------------
/test/dump.json:
--------------------------------------------------------------------------------
1 | {"libkrb5-3":"1.12+dfsg-2ubuntu5.2","isc-dhcp-client":"4.2.4-7ubuntu12.4"}
2 |
--------------------------------------------------------------------------------
/test/pkg/Rakefile:
--------------------------------------------------------------------------------
1 | # docker package integration test suite/runner
2 | #
3 | # We support a lot of different linuxes. We needed
4 | # an automated way to verify that the packages at the very
5 | # least "unpack" correctly on all the different platforms.
6 | #
7 | # The following, given a distro, release name and package
8 | # file will generate a Dockerfile, fetch the docker image,
9 | # build an image with the provided package, install it,
10 | # then verify that all of the files we care about got
11 | # put in the right place.
12 |
13 | require 'erb'
14 | VERBOSE = (ENV["CANARY_ENV"] == "test")
15 |
16 | def shell(cmd)
17 | if VERBOSE
18 | puts "-> #{cmd}"
19 | system(cmd)
20 | else
21 | `#{cmd}`
22 | end
23 | end
24 |
25 | # takes a collection of Recipes and orchestrates
26 | # the test across all of them.
27 | class DTestRunner
28 | attr_accessor :built_packages
29 | def initialize(bp)
30 | @integration_test_path = File.dirname(__FILE__)
31 | @ct_runs = 0
32 | @ct_succ= 0
33 | @ct_fail = 0
34 |
35 | self.built_packages = bp
36 | end
37 |
38 | def run!
39 | @built_packages.each do |pkg|
40 | next if pkg.skip_docker
41 |
42 | # my personal computer is amd64, test only that version
43 | if pkg.path =~ /amd64/
44 | docker = Dockerer.new(pkg.distro, pkg.release, @integration_test_path, pkg.path)
45 | @ct_runs +=1
46 |
47 | if docker.invoke!
48 | @ct_succ += 1
49 | else
50 | @ct_fail += 1
51 | end
52 | end
53 | end
54 |
55 | if @ct_fail != 0
56 | puts "\n\n\nFAIL: #{@ct_fail} tests failed."
57 | puts "#####################################"
58 | exit 1
59 | elsif @ct_succ == @ct_runs
60 | puts "\n\n\nOK: All integration tests succeeded"
61 | puts "#########################################"
62 | end
63 | end
64 | end
65 |
66 | # builds, and 'provisions' the docker image by
67 | # preparing the appropriate folder, constructing
68 | # the test shell script and the dockerfile
69 | # then starting the image.
70 | class Dockerer
71 | attr_accessor :docker_name, :distro_path, :ac_docker_name, :package,
72 | :test_script, :dockerfile
73 |
74 | def initialize(distro_name, release, test_path, package)
75 | self.docker_name = "appcanary/#{distro_name}_#{release}"
76 | self.distro_path = File.join(test_path, "#{distro_name}/#{release}")
77 | self.ac_docker_name = "appcanary:#{distro_name}_#{release}"
78 | self.package = package
79 |
80 | self.test_script = TestSh.new(docker_name)
81 | self.dockerfile = Dockerfile.new(docker_name)
82 | end
83 |
84 | def invoke!
85 | puts "=> Running test for: #{docker_name}"
86 | clean_up!
87 | build!
88 | test!
89 | end
90 |
91 | def test!
92 | puts "# executing test.sh..."
93 | docker = "cd #{@distro_path} && docker run #{@ac_docker_name} /root/test.sh"
94 | cmd = "#{docker} &2>&1"
95 | output = `#{cmd}`
96 |
97 | if VERBOSE
98 | puts "-> #{cmd}"
99 | puts output
100 | end
101 |
102 | if @test_script.success?(output)
103 | puts "OK\n\n"
104 | self.clean_up!
105 | return true
106 | else
107 | puts "FAIL\n\n"
108 | return false
109 | end
110 | end
111 |
112 | def build!
113 | puts "# building #{@distro_version}..."
114 |
115 | if @package.empty?
116 | puts "Hey, we can't find a release. Did you build the packages?"
117 | exit
118 | end
119 |
120 | release_path = @package
121 |
122 | puts "# copying #{@package}"
123 | shell "cp #{release_path} #{@distro_path}/latest.pkg"
124 |
125 | puts "# generating test file"
126 | File.write(File.join(@distro_path, "test.sh"), @test_script.render)
127 | shell "chmod a+x #{File.join(@distro_path, "test.sh")}"
128 |
129 | puts "# generating dockerfile"
130 | File.write(File.join(@distro_path, "Dockerfile"), @dockerfile.render)
131 |
132 | puts "# building docker..."
133 | shell "cd #{@distro_path} && docker pull #{@docker_name}"
134 | shell "cd #{@distro_path} && docker build -t #{@ac_docker_name} ."
135 | end
136 |
137 | def clean_up!
138 | `mkdir -p #{@distro_path}`
139 |
140 | `rm -f #{@distro_path}/latest.pkg`
141 | `rm -f #{@distro_path}/Dockerfile`
142 | `rm -f #{@distro_path}/test.sh`
143 | end
144 |
145 | end
146 |
147 | # generates a docker file
148 | # that copies over the provided package
149 | # and executes the test script
150 | class Dockerfile
151 | TEMPLATE = <<-docker
152 | FROM <%= container_name %>
153 | RUN <%= update_cmd %>
154 | COPY ./test.sh /root/test.sh
155 | COPY latest.pkg /root/latest.pkg
156 | docker
157 |
158 | attr_accessor :docker_name
159 | def initialize(dn)
160 | self.docker_name = dn
161 | end
162 |
163 | def render
164 | container_name = @docker_name
165 | update_cmd = 'exit 1'
166 |
167 | if container_name =~ /centos/
168 | update_cmd = "yum clean all"
169 | else
170 | update_cmd = "apt-get update"
171 | end
172 |
173 | ERB.new(TEMPLATE).result(binding)
174 | end
175 | end
176 |
177 | # test script
178 | # installs the package and then verifies
179 | # that stuff got put in the right place.
180 | # we check to see whether the test 'ran' by
181 | # logging the output and counting the OKAC strings
182 | class TestSh
183 | TEMPLATE = <<-test
184 | #!/usr/bin/env bash
185 | cd /root
186 | <%= install_cmd %>
187 | echo '#########'
188 | echo Does the binary work?
189 | appcanary -version && echo OKAC VERSION
190 |
191 | echo '#########'
192 | echo Did we get the files in?
193 | cat /var/log/appcanary.log && echo OKAC LOG
194 | cat /var/db/appcanary/server.yml && echo OKAC SERVERCONF
195 | cat /etc/appcanary/agent.yml && echo OKAC CONFCONF
196 |
197 | echo and logrotate?
198 | cat /etc/logrotate.d/appcanary && echo OKAC LOGROTATE
199 | stat -c %U /var/log/appcanary.log | grep appcanary && echo OKAC LOGFILE
200 |
201 | echo '#########'
202 | echo What happens when we remove it?
203 |
204 | <%= remove_cmd %> && cat /etc/appcanary/agent.yml 2>&1 && echo OKAC kept
205 | test
206 |
207 | attr_accessor :docker_name
208 | def initialize(dn)
209 | self.docker_name = dn
210 | end
211 |
212 | def render
213 | install_cmd = "exit 1"
214 | if @docker_name =~ /centos/
215 | install_cmd = "rpm --nosignature -i latest.pkg"
216 | # only 80% sure this does what we need
217 | # to test that upgrades don't clobber files
218 | remove_cmd = "echo foo > /etc/appcanary/agent.yml && rpm --nosignature -iv --replacepkgs latest.pkg && (if [ $(cat /etc/appcanary/agent.yml) = 'foo' ]; then exit 0; else exit 1; fi )"
219 | else
220 | install_cmd = "dpkg -i latest.pkg"
221 | remove_cmd = "apt-get -y remove appcanary"
222 | end
223 |
224 | ERB.new(TEMPLATE).result(binding)
225 | end
226 |
227 | def success?(output)
228 | output.scan("OKAC").count == 7
229 | end
230 | end
231 |
232 | def usage
233 | puts "usage: integration:single distro=centos release=5 package=releases/package_name_here.rpm"
234 | exit 1
235 | end
236 |
237 | namespace :integration do
238 |
239 | desc "run the integration test on a single distro, release and package"
240 | task :single do
241 | if ENV["DOCKER_HOST"].nil?
242 | puts "Sorry partner, you need boot2docker running."
243 | end
244 |
245 | @integration_test_path = File.dirname(__FILE__)
246 |
247 | if ENV["distro"] && ENV["release"] && ENV["package"]
248 | @distro = ENV["distro"]
249 | @release = ENV["release"]
250 | @package = ENV["package"]
251 |
252 | docker = Dockerer.new(@distro, @release, @integration_test_path, @package)
253 | docker.invoke!
254 | else
255 | usage
256 | end
257 | end
258 |
259 | desc "build every package and then run the integration test on all of them"
260 | task :test => :package do
261 | if ENV["DOCKER_HOST"].nil?
262 | puts "Sorry partner, you need boot2docker running."
263 | end
264 |
265 | puts "\n\n\n#################################"
266 | puts "Running docker integration tests."
267 | puts "#################################\n\n\n"
268 | DTestRunner.new(@built_packages).run!
269 | end
270 | end
271 |
272 |
273 |
274 |
--------------------------------------------------------------------------------
/test/test_detect.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -xeo pipefail
3 |
4 | BASEPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )"
5 |
6 | LSB_RELEASE="test\/data\/lsb-release"
7 | OS_RELEASE="test\/data\/os-release"
8 | RH_RELEASE="test\/data\/redhat-release"
9 |
10 | DETECT_PATH="agent/resources/detect_linux.sh"
11 |
12 | test_detect() {
13 | local replace=${1} with=${2} assert=${3} run_check=""
14 |
15 | run_check=$(cat ${DETECT_PATH} | sed -e "s/${replace}/${with}/" | bash)
16 | if [[ ${run_check} != ${assert} ]]; then
17 | echo "FAIL on ${assert} was ${run_check}"
18 | exit 1
19 | else
20 | echo "OK ${assert}"
21 | fi
22 | }
23 |
24 | main() {
25 | test_detect "\/etc\/lsb-release" ${LSB_RELEASE} "Ubuntu/14.04"
26 | test_detect "\/etc\/os-release" ${OS_RELEASE} "centos/7"
27 | test_detect "\/etc\/redhat-release" ${RH_RELEASE} "centos/6"
28 | }
29 |
30 | main
31 |
--------------------------------------------------------------------------------
/test/test_server.rb:
--------------------------------------------------------------------------------
1 | require 'sinatra'
2 | require 'json'
3 | require 'pry'
4 | require 'base64'
5 |
6 | set :bind, '0.0.0.0'
7 | def print_bod(body)
8 | bod = JSON.load(body.read)
9 | puts "#" * 10
10 | if bod && bod["contents"]
11 | bod["contents"] = Base64.decode64(bod["contents"])
12 | end
13 | puts bod
14 | puts "#" * 10
15 | end
16 |
17 | get '/' do
18 | "Hello world"
19 | end
20 |
21 | post '/api/v1/agent/heartbeat/:id' do
22 | print_bod(request.body)
23 | {success: true}.to_json
24 | end
25 |
26 | post '/api/v1/agent/servers' do
27 | print_bod(request.body)
28 | {uuid:"12345"}.to_json
29 | end
30 |
31 | put '/api/v1/agent/servers/:id' do
32 | print_bod(request.body)
33 | "OK"
34 | end
35 |
36 | get '/api/v1/agent/servers/:id' do
37 | print_bod(request.body)
38 | content_type :json
39 | File.read("dump.json")
40 | end
41 |
--------------------------------------------------------------------------------
/test/var/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/appcanary/agent/48db7f45c6b2ed2d8d1dede8751e92d61756d984/test/var/.gitkeep
--------------------------------------------------------------------------------