├── .gitignore ├── LICENSE.md ├── README.md ├── changelog.txt ├── coverage.sh ├── denyhosts-server.1 ├── denyhosts-server.conf.example ├── denyhosts-server.init.example ├── denyhosts-server.service.example ├── denyhosts_server ├── __init__.py ├── config.py ├── controllers.py ├── database.py ├── debug_views.py ├── main.py ├── models.py ├── peering.py ├── peering_views.py ├── stats.py ├── utils.py └── views.py ├── scripts └── denyhosts-server ├── setup.cfg ├── setup.py ├── static ├── css │ └── bootstrap.css ├── graph │ └── README └── js │ ├── bootstrap.js │ └── jquery.js ├── template └── stats.html └── tests ├── README ├── __init__.py ├── base.py ├── compare_peers.py ├── fill_database.py ├── peer0.conf ├── peer0.key ├── peer1.conf ├── peer1.key ├── peer2.conf ├── peer2.key ├── peer_unknown.key ├── sim_clients.py ├── test-master.conf ├── test.conf ├── test.py ├── test_concurrency.py ├── test_get_new_hosts.py ├── test_models.py ├── test_peering.py ├── test_purge_methods.py └── test_stats.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.sqlite 3 | .*.swp 4 | log.txt 5 | *.log 6 | *.conf 7 | *.key 8 | _trial_temp/ 9 | build/ 10 | dist/ 11 | *.egg-info/ 12 | MANIFEST 13 | .coverage 14 | htmlcov 15 | static/graph/*.svg 16 | static/graph/*.png 17 | static/css/*.min.css 18 | static/js/*.min.js 19 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | GNU AFFERO GENERAL PUBLIC LICENSE 2 | Version 3, 19 November 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU Affero General Public License is a free, copyleft license for 11 | software and other kinds of works, specifically designed to ensure 12 | cooperation with the community in the case of network server software. 13 | 14 | The licenses for most software and other practical works are designed 15 | to take away your freedom to share and change the works. By contrast, 16 | our General Public Licenses are intended to guarantee your freedom to 17 | share and change all versions of a program--to make sure it remains free 18 | software for all its users. 19 | 20 | When we speak of free software, we are referring to freedom, not 21 | price. Our General Public Licenses are designed to make sure that you 22 | have the freedom to distribute copies of free software (and charge for 23 | them if you wish), that you receive source code or can get it if you 24 | want it, that you can change the software or use pieces of it in new 25 | free programs, and that you know you can do these things. 26 | 27 | Developers that use our General Public Licenses protect your rights 28 | with two steps: (1) assert copyright on the software, and (2) offer 29 | you this License which gives you legal permission to copy, distribute 30 | and/or modify the software. 31 | 32 | A secondary benefit of defending all users' freedom is that 33 | improvements made in alternate versions of the program, if they 34 | receive widespread use, become available for other developers to 35 | incorporate. Many developers of free software are heartened and 36 | encouraged by the resulting cooperation. However, in the case of 37 | software used on network servers, this result may fail to come about. 38 | The GNU General Public License permits making a modified version and 39 | letting the public access it on a server without ever releasing its 40 | source code to the public. 41 | 42 | The GNU Affero General Public License is designed specifically to 43 | ensure that, in such cases, the modified source code becomes available 44 | to the community. It requires the operator of a network server to 45 | provide the source code of the modified version running there to the 46 | users of that server. Therefore, public use of a modified version, on 47 | a publicly accessible server, gives the public access to the source 48 | code of the modified version. 49 | 50 | An older license, called the Affero General Public License and 51 | published by Affero, was designed to accomplish similar goals. This is 52 | a different license, not a version of the Affero GPL, but Affero has 53 | released a new version of the Affero GPL which permits relicensing under 54 | this license. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | TERMS AND CONDITIONS 60 | 61 | 0. Definitions. 62 | 63 | "This License" refers to version 3 of the GNU Affero General Public License. 64 | 65 | "Copyright" also means copyright-like laws that apply to other kinds of 66 | works, such as semiconductor masks. 67 | 68 | "The Program" refers to any copyrightable work licensed under this 69 | License. Each licensee is addressed as "you". "Licensees" and 70 | "recipients" may be individuals or organizations. 71 | 72 | To "modify" a work means to copy from or adapt all or part of the work 73 | in a fashion requiring copyright permission, other than the making of an 74 | exact copy. The resulting work is called a "modified version" of the 75 | earlier work or a work "based on" the earlier work. 76 | 77 | A "covered work" means either the unmodified Program or a work based 78 | on the Program. 79 | 80 | To "propagate" a work means to do anything with it that, without 81 | permission, would make you directly or secondarily liable for 82 | infringement under applicable copyright law, except executing it on a 83 | computer or modifying a private copy. Propagation includes copying, 84 | distribution (with or without modification), making available to the 85 | public, and in some countries other activities as well. 86 | 87 | To "convey" a work means any kind of propagation that enables other 88 | parties to make or receive copies. Mere interaction with a user through 89 | a computer network, with no transfer of a copy, is not conveying. 90 | 91 | An interactive user interface displays "Appropriate Legal Notices" 92 | to the extent that it includes a convenient and prominently visible 93 | feature that (1) displays an appropriate copyright notice, and (2) 94 | tells the user that there is no warranty for the work (except to the 95 | extent that warranties are provided), that licensees may convey the 96 | work under this License, and how to view a copy of this License. If 97 | the interface presents a list of user commands or options, such as a 98 | menu, a prominent item in the list meets this criterion. 99 | 100 | 1. Source Code. 101 | 102 | The "source code" for a work means the preferred form of the work 103 | for making modifications to it. "Object code" means any non-source 104 | form of a work. 105 | 106 | A "Standard Interface" means an interface that either is an official 107 | standard defined by a recognized standards body, or, in the case of 108 | interfaces specified for a particular programming language, one that 109 | is widely used among developers working in that language. 110 | 111 | The "System Libraries" of an executable work include anything, other 112 | than the work as a whole, that (a) is included in the normal form of 113 | packaging a Major Component, but which is not part of that Major 114 | Component, and (b) serves only to enable use of the work with that 115 | Major Component, or to implement a Standard Interface for which an 116 | implementation is available to the public in source code form. A 117 | "Major Component", in this context, means a major essential component 118 | (kernel, window system, and so on) of the specific operating system 119 | (if any) on which the executable work runs, or a compiler used to 120 | produce the work, or an object code interpreter used to run it. 121 | 122 | The "Corresponding Source" for a work in object code form means all 123 | the source code needed to generate, install, and (for an executable 124 | work) run the object code and to modify the work, including scripts to 125 | control those activities. However, it does not include the work's 126 | System Libraries, or general-purpose tools or generally available free 127 | programs which are used unmodified in performing those activities but 128 | which are not part of the work. For example, Corresponding Source 129 | includes interface definition files associated with source files for 130 | the work, and the source code for shared libraries and dynamically 131 | linked subprograms that the work is specifically designed to require, 132 | such as by intimate data communication or control flow between those 133 | subprograms and other parts of the work. 134 | 135 | The Corresponding Source need not include anything that users 136 | can regenerate automatically from other parts of the Corresponding 137 | Source. 138 | 139 | The Corresponding Source for a work in source code form is that 140 | same work. 141 | 142 | 2. Basic Permissions. 143 | 144 | All rights granted under this License are granted for the term of 145 | copyright on the Program, and are irrevocable provided the stated 146 | conditions are met. This License explicitly affirms your unlimited 147 | permission to run the unmodified Program. The output from running a 148 | covered work is covered by this License only if the output, given its 149 | content, constitutes a covered work. This License acknowledges your 150 | rights of fair use or other equivalent, as provided by copyright law. 151 | 152 | You may make, run and propagate covered works that you do not 153 | convey, without conditions so long as your license otherwise remains 154 | in force. You may convey covered works to others for the sole purpose 155 | of having them make modifications exclusively for you, or provide you 156 | with facilities for running those works, provided that you comply with 157 | the terms of this License in conveying all material for which you do 158 | not control copyright. Those thus making or running the covered works 159 | for you must do so exclusively on your behalf, under your direction 160 | and control, on terms that prohibit them from making any copies of 161 | your copyrighted material outside their relationship with you. 162 | 163 | Conveying under any other circumstances is permitted solely under 164 | the conditions stated below. Sublicensing is not allowed; section 10 165 | makes it unnecessary. 166 | 167 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 168 | 169 | No covered work shall be deemed part of an effective technological 170 | measure under any applicable law fulfilling obligations under article 171 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 172 | similar laws prohibiting or restricting circumvention of such 173 | measures. 174 | 175 | When you convey a covered work, you waive any legal power to forbid 176 | circumvention of technological measures to the extent such circumvention 177 | is effected by exercising rights under this License with respect to 178 | the covered work, and you disclaim any intention to limit operation or 179 | modification of the work as a means of enforcing, against the work's 180 | users, your or third parties' legal rights to forbid circumvention of 181 | technological measures. 182 | 183 | 4. Conveying Verbatim Copies. 184 | 185 | You may convey verbatim copies of the Program's source code as you 186 | receive it, in any medium, provided that you conspicuously and 187 | appropriately publish on each copy an appropriate copyright notice; 188 | keep intact all notices stating that this License and any 189 | non-permissive terms added in accord with section 7 apply to the code; 190 | keep intact all notices of the absence of any warranty; and give all 191 | recipients a copy of this License along with the Program. 192 | 193 | You may charge any price or no price for each copy that you convey, 194 | and you may offer support or warranty protection for a fee. 195 | 196 | 5. Conveying Modified Source Versions. 197 | 198 | You may convey a work based on the Program, or the modifications to 199 | produce it from the Program, in the form of source code under the 200 | terms of section 4, provided that you also meet all of these conditions: 201 | 202 | a) The work must carry prominent notices stating that you modified 203 | it, and giving a relevant date. 204 | 205 | b) The work must carry prominent notices stating that it is 206 | released under this License and any conditions added under section 207 | 7. This requirement modifies the requirement in section 4 to 208 | "keep intact all notices". 209 | 210 | c) You must license the entire work, as a whole, under this 211 | License to anyone who comes into possession of a copy. This 212 | License will therefore apply, along with any applicable section 7 213 | additional terms, to the whole of the work, and all its parts, 214 | regardless of how they are packaged. This License gives no 215 | permission to license the work in any other way, but it does not 216 | invalidate such permission if you have separately received it. 217 | 218 | d) If the work has interactive user interfaces, each must display 219 | Appropriate Legal Notices; however, if the Program has interactive 220 | interfaces that do not display Appropriate Legal Notices, your 221 | work need not make them do so. 222 | 223 | A compilation of a covered work with other separate and independent 224 | works, which are not by their nature extensions of the covered work, 225 | and which are not combined with it such as to form a larger program, 226 | in or on a volume of a storage or distribution medium, is called an 227 | "aggregate" if the compilation and its resulting copyright are not 228 | used to limit the access or legal rights of the compilation's users 229 | beyond what the individual works permit. Inclusion of a covered work 230 | in an aggregate does not cause this License to apply to the other 231 | parts of the aggregate. 232 | 233 | 6. Conveying Non-Source Forms. 234 | 235 | You may convey a covered work in object code form under the terms 236 | of sections 4 and 5, provided that you also convey the 237 | machine-readable Corresponding Source under the terms of this License, 238 | in one of these ways: 239 | 240 | a) Convey the object code in, or embodied in, a physical product 241 | (including a physical distribution medium), accompanied by the 242 | Corresponding Source fixed on a durable physical medium 243 | customarily used for software interchange. 244 | 245 | b) Convey the object code in, or embodied in, a physical product 246 | (including a physical distribution medium), accompanied by a 247 | written offer, valid for at least three years and valid for as 248 | long as you offer spare parts or customer support for that product 249 | model, to give anyone who possesses the object code either (1) a 250 | copy of the Corresponding Source for all the software in the 251 | product that is covered by this License, on a durable physical 252 | medium customarily used for software interchange, for a price no 253 | more than your reasonable cost of physically performing this 254 | conveying of source, or (2) access to copy the 255 | Corresponding Source from a network server at no charge. 256 | 257 | c) Convey individual copies of the object code with a copy of the 258 | written offer to provide the Corresponding Source. This 259 | alternative is allowed only occasionally and noncommercially, and 260 | only if you received the object code with such an offer, in accord 261 | with subsection 6b. 262 | 263 | d) Convey the object code by offering access from a designated 264 | place (gratis or for a charge), and offer equivalent access to the 265 | Corresponding Source in the same way through the same place at no 266 | further charge. You need not require recipients to copy the 267 | Corresponding Source along with the object code. If the place to 268 | copy the object code is a network server, the Corresponding Source 269 | may be on a different server (operated by you or a third party) 270 | that supports equivalent copying facilities, provided you maintain 271 | clear directions next to the object code saying where to find the 272 | Corresponding Source. Regardless of what server hosts the 273 | Corresponding Source, you remain obligated to ensure that it is 274 | available for as long as needed to satisfy these requirements. 275 | 276 | e) Convey the object code using peer-to-peer transmission, provided 277 | you inform other peers where the object code and Corresponding 278 | Source of the work are being offered to the general public at no 279 | charge under subsection 6d. 280 | 281 | A separable portion of the object code, whose source code is excluded 282 | from the Corresponding Source as a System Library, need not be 283 | included in conveying the object code work. 284 | 285 | A "User Product" is either (1) a "consumer product", which means any 286 | tangible personal property which is normally used for personal, family, 287 | or household purposes, or (2) anything designed or sold for incorporation 288 | into a dwelling. In determining whether a product is a consumer product, 289 | doubtful cases shall be resolved in favor of coverage. For a particular 290 | product received by a particular user, "normally used" refers to a 291 | typical or common use of that class of product, regardless of the status 292 | of the particular user or of the way in which the particular user 293 | actually uses, or expects or is expected to use, the product. A product 294 | is a consumer product regardless of whether the product has substantial 295 | commercial, industrial or non-consumer uses, unless such uses represent 296 | the only significant mode of use of the product. 297 | 298 | "Installation Information" for a User Product means any methods, 299 | procedures, authorization keys, or other information required to install 300 | and execute modified versions of a covered work in that User Product from 301 | a modified version of its Corresponding Source. The information must 302 | suffice to ensure that the continued functioning of the modified object 303 | code is in no case prevented or interfered with solely because 304 | modification has been made. 305 | 306 | If you convey an object code work under this section in, or with, or 307 | specifically for use in, a User Product, and the conveying occurs as 308 | part of a transaction in which the right of possession and use of the 309 | User Product is transferred to the recipient in perpetuity or for a 310 | fixed term (regardless of how the transaction is characterized), the 311 | Corresponding Source conveyed under this section must be accompanied 312 | by the Installation Information. But this requirement does not apply 313 | if neither you nor any third party retains the ability to install 314 | modified object code on the User Product (for example, the work has 315 | been installed in ROM). 316 | 317 | The requirement to provide Installation Information does not include a 318 | requirement to continue to provide support service, warranty, or updates 319 | for a work that has been modified or installed by the recipient, or for 320 | the User Product in which it has been modified or installed. Access to a 321 | network may be denied when the modification itself materially and 322 | adversely affects the operation of the network or violates the rules and 323 | protocols for communication across the network. 324 | 325 | Corresponding Source conveyed, and Installation Information provided, 326 | in accord with this section must be in a format that is publicly 327 | documented (and with an implementation available to the public in 328 | source code form), and must require no special password or key for 329 | unpacking, reading or copying. 330 | 331 | 7. Additional Terms. 332 | 333 | "Additional permissions" are terms that supplement the terms of this 334 | License by making exceptions from one or more of its conditions. 335 | Additional permissions that are applicable to the entire Program shall 336 | be treated as though they were included in this License, to the extent 337 | that they are valid under applicable law. If additional permissions 338 | apply only to part of the Program, that part may be used separately 339 | under those permissions, but the entire Program remains governed by 340 | this License without regard to the additional permissions. 341 | 342 | When you convey a copy of a covered work, you may at your option 343 | remove any additional permissions from that copy, or from any part of 344 | it. (Additional permissions may be written to require their own 345 | removal in certain cases when you modify the work.) You may place 346 | additional permissions on material, added by you to a covered work, 347 | for which you have or can give appropriate copyright permission. 348 | 349 | Notwithstanding any other provision of this License, for material you 350 | add to a covered work, you may (if authorized by the copyright holders of 351 | that material) supplement the terms of this License with terms: 352 | 353 | a) Disclaiming warranty or limiting liability differently from the 354 | terms of sections 15 and 16 of this License; or 355 | 356 | b) Requiring preservation of specified reasonable legal notices or 357 | author attributions in that material or in the Appropriate Legal 358 | Notices displayed by works containing it; or 359 | 360 | c) Prohibiting misrepresentation of the origin of that material, or 361 | requiring that modified versions of such material be marked in 362 | reasonable ways as different from the original version; or 363 | 364 | d) Limiting the use for publicity purposes of names of licensors or 365 | authors of the material; or 366 | 367 | e) Declining to grant rights under trademark law for use of some 368 | trade names, trademarks, or service marks; or 369 | 370 | f) Requiring indemnification of licensors and authors of that 371 | material by anyone who conveys the material (or modified versions of 372 | it) with contractual assumptions of liability to the recipient, for 373 | any liability that these contractual assumptions directly impose on 374 | those licensors and authors. 375 | 376 | All other non-permissive additional terms are considered "further 377 | restrictions" within the meaning of section 10. If the Program as you 378 | received it, or any part of it, contains a notice stating that it is 379 | governed by this License along with a term that is a further 380 | restriction, you may remove that term. If a license document contains 381 | a further restriction but permits relicensing or conveying under this 382 | License, you may add to a covered work material governed by the terms 383 | of that license document, provided that the further restriction does 384 | not survive such relicensing or conveying. 385 | 386 | If you add terms to a covered work in accord with this section, you 387 | must place, in the relevant source files, a statement of the 388 | additional terms that apply to those files, or a notice indicating 389 | where to find the applicable terms. 390 | 391 | Additional terms, permissive or non-permissive, may be stated in the 392 | form of a separately written license, or stated as exceptions; 393 | the above requirements apply either way. 394 | 395 | 8. Termination. 396 | 397 | You may not propagate or modify a covered work except as expressly 398 | provided under this License. Any attempt otherwise to propagate or 399 | modify it is void, and will automatically terminate your rights under 400 | this License (including any patent licenses granted under the third 401 | paragraph of section 11). 402 | 403 | However, if you cease all violation of this License, then your 404 | license from a particular copyright holder is reinstated (a) 405 | provisionally, unless and until the copyright holder explicitly and 406 | finally terminates your license, and (b) permanently, if the copyright 407 | holder fails to notify you of the violation by some reasonable means 408 | prior to 60 days after the cessation. 409 | 410 | Moreover, your license from a particular copyright holder is 411 | reinstated permanently if the copyright holder notifies you of the 412 | violation by some reasonable means, this is the first time you have 413 | received notice of violation of this License (for any work) from that 414 | copyright holder, and you cure the violation prior to 30 days after 415 | your receipt of the notice. 416 | 417 | Termination of your rights under this section does not terminate the 418 | licenses of parties who have received copies or rights from you under 419 | this License. If your rights have been terminated and not permanently 420 | reinstated, you do not qualify to receive new licenses for the same 421 | material under section 10. 422 | 423 | 9. Acceptance Not Required for Having Copies. 424 | 425 | You are not required to accept this License in order to receive or 426 | run a copy of the Program. Ancillary propagation of a covered work 427 | occurring solely as a consequence of using peer-to-peer transmission 428 | to receive a copy likewise does not require acceptance. However, 429 | nothing other than this License grants you permission to propagate or 430 | modify any covered work. These actions infringe copyright if you do 431 | not accept this License. Therefore, by modifying or propagating a 432 | covered work, you indicate your acceptance of this License to do so. 433 | 434 | 10. Automatic Licensing of Downstream Recipients. 435 | 436 | Each time you convey a covered work, the recipient automatically 437 | receives a license from the original licensors, to run, modify and 438 | propagate that work, subject to this License. You are not responsible 439 | for enforcing compliance by third parties with this License. 440 | 441 | An "entity transaction" is a transaction transferring control of an 442 | organization, or substantially all assets of one, or subdividing an 443 | organization, or merging organizations. If propagation of a covered 444 | work results from an entity transaction, each party to that 445 | transaction who receives a copy of the work also receives whatever 446 | licenses to the work the party's predecessor in interest had or could 447 | give under the previous paragraph, plus a right to possession of the 448 | Corresponding Source of the work from the predecessor in interest, if 449 | the predecessor has it or can get it with reasonable efforts. 450 | 451 | You may not impose any further restrictions on the exercise of the 452 | rights granted or affirmed under this License. For example, you may 453 | not impose a license fee, royalty, or other charge for exercise of 454 | rights granted under this License, and you may not initiate litigation 455 | (including a cross-claim or counterclaim in a lawsuit) alleging that 456 | any patent claim is infringed by making, using, selling, offering for 457 | sale, or importing the Program or any portion of it. 458 | 459 | 11. Patents. 460 | 461 | A "contributor" is a copyright holder who authorizes use under this 462 | License of the Program or a work on which the Program is based. The 463 | work thus licensed is called the contributor's "contributor version". 464 | 465 | A contributor's "essential patent claims" are all patent claims 466 | owned or controlled by the contributor, whether already acquired or 467 | hereafter acquired, that would be infringed by some manner, permitted 468 | by this License, of making, using, or selling its contributor version, 469 | but do not include claims that would be infringed only as a 470 | consequence of further modification of the contributor version. For 471 | purposes of this definition, "control" includes the right to grant 472 | patent sublicenses in a manner consistent with the requirements of 473 | this License. 474 | 475 | Each contributor grants you a non-exclusive, worldwide, royalty-free 476 | patent license under the contributor's essential patent claims, to 477 | make, use, sell, offer for sale, import and otherwise run, modify and 478 | propagate the contents of its contributor version. 479 | 480 | In the following three paragraphs, a "patent license" is any express 481 | agreement or commitment, however denominated, not to enforce a patent 482 | (such as an express permission to practice a patent or covenant not to 483 | sue for patent infringement). To "grant" such a patent license to a 484 | party means to make such an agreement or commitment not to enforce a 485 | patent against the party. 486 | 487 | If you convey a covered work, knowingly relying on a patent license, 488 | and the Corresponding Source of the work is not available for anyone 489 | to copy, free of charge and under the terms of this License, through a 490 | publicly available network server or other readily accessible means, 491 | then you must either (1) cause the Corresponding Source to be so 492 | available, or (2) arrange to deprive yourself of the benefit of the 493 | patent license for this particular work, or (3) arrange, in a manner 494 | consistent with the requirements of this License, to extend the patent 495 | license to downstream recipients. "Knowingly relying" means you have 496 | actual knowledge that, but for the patent license, your conveying the 497 | covered work in a country, or your recipient's use of the covered work 498 | in a country, would infringe one or more identifiable patents in that 499 | country that you have reason to believe are valid. 500 | 501 | If, pursuant to or in connection with a single transaction or 502 | arrangement, you convey, or propagate by procuring conveyance of, a 503 | covered work, and grant a patent license to some of the parties 504 | receiving the covered work authorizing them to use, propagate, modify 505 | or convey a specific copy of the covered work, then the patent license 506 | you grant is automatically extended to all recipients of the covered 507 | work and works based on it. 508 | 509 | A patent license is "discriminatory" if it does not include within 510 | the scope of its coverage, prohibits the exercise of, or is 511 | conditioned on the non-exercise of one or more of the rights that are 512 | specifically granted under this License. You may not convey a covered 513 | work if you are a party to an arrangement with a third party that is 514 | in the business of distributing software, under which you make payment 515 | to the third party based on the extent of your activity of conveying 516 | the work, and under which the third party grants, to any of the 517 | parties who would receive the covered work from you, a discriminatory 518 | patent license (a) in connection with copies of the covered work 519 | conveyed by you (or copies made from those copies), or (b) primarily 520 | for and in connection with specific products or compilations that 521 | contain the covered work, unless you entered into that arrangement, 522 | or that patent license was granted, prior to 28 March 2007. 523 | 524 | Nothing in this License shall be construed as excluding or limiting 525 | any implied license or other defenses to infringement that may 526 | otherwise be available to you under applicable patent law. 527 | 528 | 12. No Surrender of Others' Freedom. 529 | 530 | If conditions are imposed on you (whether by court order, agreement or 531 | otherwise) that contradict the conditions of this License, they do not 532 | excuse you from the conditions of this License. If you cannot convey a 533 | covered work so as to satisfy simultaneously your obligations under this 534 | License and any other pertinent obligations, then as a consequence you may 535 | not convey it at all. For example, if you agree to terms that obligate you 536 | to collect a royalty for further conveying from those to whom you convey 537 | the Program, the only way you could satisfy both those terms and this 538 | License would be to refrain entirely from conveying the Program. 539 | 540 | 13. Remote Network Interaction; Use with the GNU General Public License. 541 | 542 | Notwithstanding any other provision of this License, if you modify the 543 | Program, your modified version must prominently offer all users 544 | interacting with it remotely through a computer network (if your version 545 | supports such interaction) an opportunity to receive the Corresponding 546 | Source of your version by providing access to the Corresponding Source 547 | from a network server at no charge, through some standard or customary 548 | means of facilitating copying of software. This Corresponding Source 549 | shall include the Corresponding Source for any work covered by version 3 550 | of the GNU General Public License that is incorporated pursuant to the 551 | following paragraph. 552 | 553 | Notwithstanding any other provision of this License, you have 554 | permission to link or combine any covered work with a work licensed 555 | under version 3 of the GNU General Public License into a single 556 | combined work, and to convey the resulting work. The terms of this 557 | License will continue to apply to the part which is the covered work, 558 | but the work with which it is combined will remain governed by version 559 | 3 of the GNU General Public License. 560 | 561 | 14. Revised Versions of this License. 562 | 563 | The Free Software Foundation may publish revised and/or new versions of 564 | the GNU Affero General Public License from time to time. Such new versions 565 | will be similar in spirit to the present version, but may differ in detail to 566 | address new problems or concerns. 567 | 568 | Each version is given a distinguishing version number. If the 569 | Program specifies that a certain numbered version of the GNU Affero General 570 | Public License "or any later version" applies to it, you have the 571 | option of following the terms and conditions either of that numbered 572 | version or of any later version published by the Free Software 573 | Foundation. If the Program does not specify a version number of the 574 | GNU Affero General Public License, you may choose any version ever published 575 | by the Free Software Foundation. 576 | 577 | If the Program specifies that a proxy can decide which future 578 | versions of the GNU Affero General Public License can be used, that proxy's 579 | public statement of acceptance of a version permanently authorizes you 580 | to choose that version for the Program. 581 | 582 | Later license versions may give you additional or different 583 | permissions. However, no additional obligations are imposed on any 584 | author or copyright holder as a result of your choosing to follow a 585 | later version. 586 | 587 | 15. Disclaimer of Warranty. 588 | 589 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 590 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 591 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 592 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 593 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 594 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 595 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 596 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 597 | 598 | 16. Limitation of Liability. 599 | 600 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 601 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 602 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 603 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 604 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 605 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 606 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 607 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 608 | SUCH DAMAGES. 609 | 610 | 17. Interpretation of Sections 15 and 16. 611 | 612 | If the disclaimer of warranty and limitation of liability provided 613 | above cannot be given local legal effect according to their terms, 614 | reviewing courts shall apply local law that most closely approximates 615 | an absolute waiver of all civil liability in connection with the 616 | Program, unless a warranty or assumption of liability accompanies a 617 | copy of the Program in return for a fee. 618 | 619 | END OF TERMS AND CONDITIONS 620 | 621 | How to Apply These Terms to Your New Programs 622 | 623 | If you develop a new program, and you want it to be of the greatest 624 | possible use to the public, the best way to achieve this is to make it 625 | free software which everyone can redistribute and change under these terms. 626 | 627 | To do so, attach the following notices to the program. It is safest 628 | to attach them to the start of each source file to most effectively 629 | state the exclusion of warranty; and each file should have at least 630 | the "copyright" line and a pointer to where the full notice is found. 631 | 632 | 633 | Copyright (C) 634 | 635 | This program is free software: you can redistribute it and/or modify 636 | it under the terms of the GNU Affero General Public License as published 637 | by the Free Software Foundation, either version 3 of the License, or 638 | (at your option) any later version. 639 | 640 | This program is distributed in the hope that it will be useful, 641 | but WITHOUT ANY WARRANTY; without even the implied warranty of 642 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 643 | GNU Affero General Public License for more details. 644 | 645 | You should have received a copy of the GNU Affero General Public License 646 | along with this program. If not, see . 647 | 648 | Also add information on how to contact you by electronic and paper mail. 649 | 650 | If your software can interact with users remotely through a computer 651 | network, you should also make sure that it provides a way for users to 652 | get its source. For example, if your program is a web application, its 653 | interface could display a "Source" link that leads users to an archive 654 | of the code. There are many ways you could offer source, and different 655 | solutions will be better for different programs; see section 13 for the 656 | specific requirements. 657 | 658 | You should also get your employer (if you work as a programmer) or school, 659 | if any, to sign a "copyright disclaimer" for the program, if necessary. 660 | For more information on this, and how to apply and follow the GNU AGPL, see 661 | . 662 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # README for denyhosts-server 2 | 3 | `denyhosts-server` is a server that allows `denyhosts` clients to share blocked IP 4 | addresses. It is intended to be a drop-in replacement for the service at 5 | `xmlrpc.denyhosts.net` that up to now has been provided by the original author 6 | of `denyhosts`. 7 | 8 | ## Features 9 | - Drop-in replacement for legacy `denyhosts` sync server 10 | - Supports sqlite and mysql databases 11 | - Can run as non-privileged user 12 | - Supports database evolution for easy upgrades 13 | - Robust design which supports hundreds of connections per second 14 | - Supports bootstrapping from legacy server 15 | - Synchronisation algorithm that has safeguards against database poisoning 16 | - Fully configurable 17 | - Dynamically generated statistics web page 18 | - Peering mode: multiple servers can shared their information for load-balancing 19 | and to prevent a single point of failure 20 | 21 | ## Prerequisites 22 | - MySQL database is preferred for large sites. For testing purposes sqlite is 23 | also supported 24 | - Python 2.7 with setuptools 25 | - The other Python libraries are installed automatically by the setup.py script. 26 | The GeoIP library needs the libgeoip development headers. On a Debian system, 27 | install them by running `apt-get install libgeoip-dev`. To install the 28 | free GeoIP database, run `apt-get install geoip-database`. 29 | Note: To install the Python GeoIP library on FreeBSD, edit your 30 | `~/.pydistutils.cfg` to contain the following: 31 | ``` 32 | [build_ext] 33 | include_dirs=/usr/local/include 34 | library_dirs=/usr/local/lib 35 | ``` 36 | - `denyhosts-server` is developed and tested on a Debian GNU/Linux system. It should 37 | work on any Linux system with Python. Microsoft Windows is not a supported 38 | platform, although it should work without major modifications. 39 | - On most installations the sqlite3 Python library comes with Python 2.7. If 40 | not, you need to install it manually, possibly with using pip: 41 | `pip install pysqlite` or, on Debian/Ubuntu, `apt-get install python-pysqlite2`. 42 | - If you use a MySQL database, you need to install the appropriate Python 43 | library. possibly by running `pip install MySQL-python`. On Debian/Ubuntu, 44 | use `apt-get install python-mysqldb`. 45 | - If you're on a Debian, and possible also Ubuntu system, you'll make your life 46 | easier when you install the some packages: 47 | apt-get install python-dev python-pip python-setuptools libgeoip-dev \ 48 | geoip-database libpng-dev libxft-dev python-matplotlib python-twisted \ 49 | libsodium-dev libffi-dev yui-compressor 50 | 51 | ## Installation 52 | Run the following command: `sudo setup.py develop` to download the needed 53 | Python libraries. Then run `sudo setup.py minify_js minify_css install` to 54 | minify the used JavaScript and CSS libraries, install the Python scripts 55 | onto your system (usually in `/usr/local/lib/python2.7/dist-packages`) 56 | and the Python script `/usr/local/bin/denyhosts-server`. 57 | 58 | ## Configuration 59 | Create the database and a database user with full rights to it. Copy the 60 | `denyhosts-server.conf.example` file to `/etc/denyhosts-server.conf` and edit it. 61 | Fill in the database parameters, the location of the log file (which should be 62 | writable by the system user that will be running denyhosts-server) and 63 | other settings you wish to change. `graph_dir` in the `stats` sections is 64 | another location that should be writable by `denyhosts-server`. 65 | 66 | Prepare the database for first use with the command `denyhosts-server 67 | --recreate-database`. This will create the tables needed by denyhosts-server. 68 | 69 | ## Running denyhosts-server 70 | Simply run `denyhosts-server`. Unless there are unexpected errors, this will give no 71 | output and the server will just keep running. Configure your DenyHosts clients 72 | to use the new synchronisation server by setting 73 | ``` 74 | SYNC_SERVER=http://your.host.name:9911 75 | ``` 76 | Once the server is running, you can watch the statistics page at 77 | `http://your.host.name:9911`. 78 | 79 | These URLs look the same, but the xmlrpc library from the `DenyHosts` 80 | client will actually connect to `http://your.host.name:9911/RPC2`. The port 81 | numbers are configurable. The statistics page can be on the same port as 82 | the RPC server, or on another. 83 | 84 | The statistics page is updated by default every 10 minutes (configurable with 85 | the `update_frequency` settings). The graphs on the statistics page will only 86 | be generated when there is at least one report in the database. 87 | 88 | ## Signals 89 | When `denyhosts-server` receives the `SIGHUP` signal, it will re-read the 90 | configuration file. Changes to the database configuration are ignored. 91 | 92 | ## Updates 93 | Installing the new version of `denyhosts-server` with `./setup.py install`. 94 | Edit the configuration file at `/etc/denyhosts-server.conf` to configure any new 95 | feature added since the last release. Check `changelog.txt` for new 96 | configuration items. 97 | 98 | Stop denyhosts-server, update the database tables by running `denyhosts-server --evolve-database` and 99 | restart denyhosts-server. 100 | 101 | ## Maintenance 102 | Old reports will be automatically purged by the configurable maintenance job. 103 | See the configuration file for configuration options. To clean all reports by 104 | clients, use the `--purge-reported-addresses` command line option. To clean all 105 | reports retrieved from the legacy sync server, use the 106 | `--purge-legacy-addresses` command line option. To purge a specific IP address 107 | from both the reported and the legacy host lists, use the `--purge-ip` command 108 | line option. 109 | 110 | Note: Use these options with care. Do not use them while `denyhosts-server` is 111 | running, since this may cause database inconsistencies. Use the `--force` 112 | command line options to skip the safety prompt when using the purge options. 113 | 114 | ## Peering 115 | If you wish to configure peering between multiple servers, do the following. 116 | In the configuration file look at the `[peering]` section. Set the 117 | `key_file` option and restart `denyhosts-server`. It will create a new key file 118 | if it doesn't exist yet. If you look at the contents of the key file, note that 119 | it contains two keys. The `pub` (for "public") key should be used in the 120 | configuration files of the peers. 121 | 122 | For each peer, add two lines to the `[peering]` section. If you name the peer 123 | `PEERNAME`, add the peer's url as `peer_PEERNAME_url` and add its public key 124 | (a 32-byte random number, represented as a hexadecimal string) as 125 | `peer_PEERNAME_key`. 126 | 127 | Make sure that the peering configuration of all participating peers is consistent, 128 | i.e., every peer should know the URLs and keys of all other peers. You can use the 129 | `--check-peers` command line option to perform this consistency check. It will 130 | ask all configured peers for their list of peers, and compare them with the ones 131 | it knows. 132 | 133 | All peer-to-peer communication is fully encrypted and authenticated, to prevent 134 | denial of service attacks. Peers will share reports by clients directly when 135 | received. If a peer is offline for a while, it will NOT receive the reports 136 | it missed. 137 | 138 | When you add a new peer, you can bootstrap its database from one of the existing 139 | peers using the `--bootstrap-from-peer`` option. Give the URL of the peer you 140 | want to bootstrap from as parameter. The bootstrap option is not very well 141 | optimized since it is not anticipated to be used very often. 142 | 143 | ## Links 144 | - [`denyhosts-server` project site](https://github.com/janpascal/denyhosts_sync) 145 | - [`denyhosts` project site](https://github.com/denyhosts/denyhosts) 146 | - [Information on synchronisation algorithm](https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=622697) 147 | - [Original, seemingly abandoned `DenyHosts` project](http://www.denyhosts.net) 148 | 149 | ## Copyright and license 150 | 151 | ### denyhosts-server 152 | Copyright (C) 2015-2017 Jan-Pascal van Best 153 | 154 | This program is free software: you can redistribute it and/or modify 155 | it under the terms of the GNU Affero General Public License as published 156 | by the Free Software Foundation, either version 3 of the License, or 157 | (at your option) any later version. 158 | 159 | This program is distributed in the hope that it will be useful, 160 | but WITHOUT ANY WARRANTY; without even the implied warranty of 161 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 162 | GNU Affero General Public License for more details. 163 | 164 | You should have received a copy of the GNU Affero General Public License 165 | along with this program. If not, see . 166 | 167 | ### Synchronisation algorithm 168 | The synchronisation algorithm implemented in denyhosts-server is based 169 | on an article by Anne Bezemer, published as Debian bug#622697 and 170 | archived at [Debian bug#622697](https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=622697) 171 | The article is Copyright (C) 2011 J.A. Bezemer 172 | and licensed "either GPL >=3 or AGPL >=3, at your option". 173 | -------------------------------------------------------------------------------- /changelog.txt: -------------------------------------------------------------------------------- 1 | Release 2.2.3 (2017-07-10) 2 | - Generate graph even if no data, with banner saying 'not enough data' 3 | 4 | Release 2.2.2 (2017-05-03) 5 | - Fix unit test 6 | - Fix crash when generating stats in certain cases 7 | 8 | Release 2.2.1 (2017-05-02) 9 | - Fix for for version of python-twister>=16.0 10 | - Update documentation 11 | 12 | Release 2.2.0 (2016-06-15) 13 | - Implemented bootstrapping of database from another peer 14 | 15 | Release 2.1.0 (2016-06-10) 16 | - Implemented peering between multiple servers. See README.md for details 17 | and configuration 18 | 19 | Release 2.0.0 (2015-10-01) 20 | - Renamed to denyhosts-server to avoid confusion with Debian dh_* commands 21 | - Add percentage to country graph 22 | 23 | Release 1.4.1 (2015-10-01) 24 | - Add status command to init script 25 | - Add Description header to initscript 26 | - Remove ancient IE compatibility cruft 27 | - Put un-minified js and css in package source, minify using 28 | python setup.py minify_js minify_css 29 | 30 | Release 1.4.0 (2015-09-19) 31 | - Add historical statistics that survive a database purge, using 32 | the 'history' table 33 | - Add graph showing the distribution of attacks over countries of origin 34 | - Several small fixes 35 | - Remove hostname column from stats when reverse dns lookup is disabled 36 | 37 | Release 1.3.0 (2015-09-08) 38 | - Improve performance, removed some blocking code from the statistics update 39 | - Better graphs, replaced pygal graphing library with matplotlib 40 | - Fix crash when statistics page was requested before statistics are ready 41 | - Show ip address instead of 'disabled' when hostname resolving is disabled 42 | - Stability fixes 43 | - Add trend lines to graphs 44 | - Add historical graph, showing all reports in the database 45 | - Calculate graphs for stats page in separate thread 46 | 47 | Release 1.2.0 (2015-07-31) 48 | - Fix Content-Type headers of SVG files 49 | - Fix size of SVG graphs for Internet Explorer 50 | - Fix error with generating stats when database is empty 51 | - Improve logging of exceptions 52 | - Provide more information in the README for installing the GeoIP library 53 | 54 | Release 1.1.1 (2015-07-24) 55 | - Bugfix release, include actual JavaScript and CSS files 56 | 57 | Release 1.1 (2015-07-24) 58 | - Moved XMLRPC location to the more standard http://your.host.name:9911/RPC2 59 | - Added server statistics page at http://your.host.name:9911 60 | When updating, please review the [stats] section of the configuration file. 61 | - Write periodical basic database state to log file 62 | - Improved error handling 63 | - Do not install the configuration file by default, provide an example file instead 64 | - Provide example systemd service file and init script 65 | 66 | Release 1.0 (2015-07-15) 67 | - Unit tests added 68 | - Fix and/or document dependencies on ipaddr and other Python libraries 69 | - Add database evolution to repair bug in database maintenance, which could leave crackers without reports 70 | - Add separate legacy_expiry_days config setting 71 | - Added --purge-legacy-addresses, --purge-reported-addresses and --purge-ip command line options 72 | - Change default setting not to use legacy sync server 73 | - Sending SIGHUP now causes dh_syncserver to re-read the configuration file 74 | - Updated and clarified licensing of Anne Bezemer's algorithm 75 | 76 | Release 0.9 (2015-07-06) 77 | - Improve report merging algorithm 78 | - Use better default for parameters for sync with legacy server 79 | 80 | Release 0.4 (2015-07-04) 81 | - Make maintenance job quicker and less memory intensive 82 | - Make debug xmlrpc functions a configuration option 83 | 84 | Release 0.3 (2015-07-02) 85 | - Added README.md 86 | - Improved setup script 87 | - Fix creating initial database 88 | - Fix default log file and add cp_max to default config file 89 | - Add database schema version check at daemon startup 90 | - Exit dh_syncserver from --recreate and --evolve-database in case of error 91 | - Check for supported database type in config file 92 | - Database optimisation 93 | - Clean up default config file 94 | - Support MySQLdb database 95 | - Added --recreate-database and --evolve-database command line options 96 | - Make log level configurable 97 | - Fix concurrency issues 98 | - Define defaults for config file options 99 | - Add automatic periodical legacy sync job, to fetch hosts from legacy sync server 100 | - Stability fixes 101 | - Rename config file to dh_syncserver.conf and install in /etc/ 102 | - Added setup.py script; move main.py to dh_syncserver script 103 | - Moved code to dh_syncserver namespace to make room for tests 104 | - Use hosts_added parameter of get_new_hosts to filter out hosts just sent by client 105 | - Make tcp port to listen on configurable 106 | - Max number number of crackers reported to denyhosts configurable (default 50) 107 | - Implemented maintenance job 108 | - Check IP addresses for local addresses, RFC1918, multicast, loopback, etc 109 | - Refuse timestamps from future; return Fault reply on illegal input 110 | - Check xmlrpc parameters for validity 111 | - Added copyright info 112 | - Mostly implemented synchronisation algorithm from Debian #622697 113 | -------------------------------------------------------------------------------- /coverage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | python-coverage run /usr/bin/trial \ 3 | tests/test_get_new_hosts.py \ 4 | tests/test_models.py \ 5 | tests/test_purge_methods.py \ 6 | tests/test_stats.py 7 | python-coverage html 8 | -------------------------------------------------------------------------------- /denyhosts-server.1: -------------------------------------------------------------------------------- 1 | .TH denyhosts-server 1 "Fri Juli 24 23:51:12 CEST 2015" 2 | .SH NAME 3 | .B denyhosts-server 4 | - service to share blocked addresses from denyhosts. 5 | .SH SYNOPSIS 6 | .B denyhosts-server [-h] [-c CONFIG] [--recreate-database] 7 | .B [--evolve-database] [--purge-legacy-addresses] 8 | .B [--purge-reported-addresses] [--purge-ip IP_ADDRESS] [-f] 9 | .SH DESCRIPTION 10 | .PP 11 | denyhosts-server is a server that allows denyhosts clients to share blocked IP 12 | addresses. It is intended to be a drop-in replacement for the service at 13 | xmlrpc.denyhosts.net that up to now has been provided by the original author 14 | of denyhosts. 15 | 16 | .SH "OPTIONS 17 | .TP 18 | .B \-h 19 | Print help information and exit. 20 | .TP 21 | .B \-c CONFIG 22 | Read configuration from CONFIG 23 | .TP 24 | .B \-\-recreate-database 25 | If necessary, remove old database tables, then create new tables and exit. 26 | .TP 27 | .B \-\-evolve-database 28 | Update database schema to the latest version and exit. 29 | .TP 30 | .B \-\-purge-legacy-addresses 31 | Purge all ip addresses previously downloaded from the legacy server and exit. 32 | .TP 33 | .B \-\-purge-reported-addresses 34 | Purge all ip addresses previously reported by denyhosts clients and exit. 35 | .TP 36 | .B \-\-purge-ip IP_ADDRESS 37 | Purge given IP address from the database and exit. The address is purged 38 | from the legacy table and from the reports by denyhosts clients. 39 | .TP 40 | .B \-f, \-\-force 41 | When the \-\-recreate-database, \-\-evolve-database, \-\-purge-legacy-addresses, 42 | \-\-purge-reported-addresses or \-\-purge-ip are given, execute the given 43 | operation directly without asking questions. 44 | 45 | .SH AUTHOR 46 | Jan-Pascal van Best 47 | .SH SEE ALSO 48 | .B denyhosts 49 | (1) 50 | -------------------------------------------------------------------------------- /denyhosts-server.conf.example: -------------------------------------------------------------------------------- 1 | # section database. All configuration items besides 'type' are passed as-is 2 | # to the database connect() function 3 | 4 | # Database settings. Depending on the database type, you can add several 5 | # parameters to connect to the database. 6 | 7 | # For sqlite3, just fill in the database filename as "database" 8 | # sqlite3 is not suitable for a high volume server 9 | 10 | # For PostgreSQL use the following parameters: 11 | # type - psycopg2 12 | # dbname – the database name (only in the dsn string) 13 | # database – the database name (only as keyword argument) 14 | # user – user name used to authenticate 15 | # password – password used to authenticate 16 | # host – database host address (defaults to UNIX socket if not provided) 17 | # port – connection port number (defaults to 5432 if not provided) 18 | 19 | 20 | # For mysql, use the following parameters: 21 | # host – name of host to connect to. Default: use the local host via a UNIX socket (where applicable) 22 | # user – user to authenticate as. Default: current effective user. 23 | # passwd – password to authenticate with. Default: no password. 24 | # db – database to use. Default: no default database. 25 | # port – TCP port of MySQL server. Default: standard port (3306). 26 | # unix_socket – location of UNIX socket. Default: use default location or TCP for remote hosts. 27 | # connect_timeout – Abort if connect is not completed within given number of seconds. Default: no timeout (?) 28 | 29 | [database] 30 | # Type of database. Choice of sqlite3, MySQLdb, psycopg2 (PostgreSQL) 31 | # Default: sqlite3 32 | #type: sqlite3 33 | 34 | # Database name. Default: /var/lib/denyhosts-server/denyhosts.sqlite 35 | #database: /var/lib/denyhosts-server/denyhosts.sqlite 36 | 37 | # Maximum size of database connection pool. Default: 5 38 | # For high volume servers, set this to 100 or so. 39 | #cp_max: 5 40 | 41 | [sync] 42 | # Maximum number of cracker IP addresses reported back to 43 | # denyhosts clients per sync. Default: 50 44 | #max_reported_crackers: 50 45 | 46 | # TCP port to listen on. Default: 9911 47 | #listen_port: 9911 48 | 49 | # Enable debugging methods. See debug_views.py for details.# Default: no 50 | # Default: no 51 | #enable_debug_methods: no 52 | 53 | # Legacy server to use as a source of bad hosts, to bootstrap 54 | # the database. Leave empty if you don't want to use a legacy server. 55 | # Set legacy_server to http://xmlrpc.denyhosts.net:9911 in order to 56 | # use the legacy server maintained by the original DenyHosts author 57 | # Default: No legacy server configured. 58 | #legacy_server: 59 | 60 | # How often (in seconds) to download hosts from legacy server. 61 | # Default: 300 seconds (5 minutes) 62 | #legacy_frequency: 300 63 | 64 | # Threshold value for legacy server. Default: 10 65 | #legacy_threshold = 10 66 | 67 | # Resiliency value for legacy server (in seconds) 68 | # Default: 10800 (three hours) 69 | #legacy_resiliency = 10800 70 | 71 | [maintenance] 72 | # Maintenance interval in seconds (3600 = one hour; 86400 = one day) 73 | # Default: 3600 74 | #interval_seconds: 3600 75 | 76 | # Number of days before reports are expired. Default: 30 77 | #expiry_days: 30 78 | 79 | # Number of days before hosts retrieved from legacy server are expired. Default: 30 80 | #legacy_expiry_days: 30 81 | 82 | [logging] 83 | # Location of the log file. Default: /var/log/denyhosts-server/denyhosts-server.log 84 | #logfile: /var/log/denyhosts-server/denyhosts-server.log 85 | 86 | # Log level. One of CRITICAL, ERROR, WARNING, INFO of DEBUG 87 | # Default: INFO. Set to WARNING for high-volume server 88 | #loglevel: INFO 89 | 90 | [stats] 91 | # How often (in seconds) to update the statistics HTML page 92 | # and graphs. Default: 600 (10 minutes) 93 | #update_frequency: 600 94 | 95 | # Location of static files. Place the css directory containing 96 | # bootstrap.min.css and the js directory containing bootstrap.min.js 97 | # here. Default: static/ under the project root 98 | #static_dir: static 99 | 100 | # Location of graph files. The server will cache the generated statistic 101 | # graph images here. This directory should be writable by the server. 102 | # Default: static/graph 103 | #graph_dir: static/graph 104 | 105 | # Location of template files. 106 | # Default: template 107 | #template_dir: template 108 | 109 | # Whether to resolve hostnames for the IP addresses in the statistics 110 | # Default: yes 111 | #resolve_hostnames: yes 112 | 113 | # TCP port to serve statistics. Can be the same a the listen_port in the 114 | # [sync] section. Default: 9911 115 | #listen_port: 9911 116 | 117 | # This section deals with peering. You can configure multiple server to share 118 | # reports with each other in a secure way. 119 | # See README.md for details. 120 | 121 | [peering] 122 | # Where to keep the private key of this server. This file is generated automatically. 123 | # Default: private.key 124 | #key_file: private.key 125 | 126 | # For every peer, configure the url and the (32 byte, hex-encoded) public key 127 | # using peer_PEERNAME_url and peer_PEERNAME_key. 128 | # Default: no peers configured 129 | # 130 | # Example: 131 | #peer_1_url: http://deny1.resonatingmedia.com:9911 132 | #peer_1_key: 1cecef18aa25fca070cb22e110f32bd87dc38ae56d4c7549a0d579f0fba2835d 133 | #peer_2_url: http://deny2.resonatingmedia.com:9911 134 | #peer_2_key: 4a4f9f0a7077e132bbae7eeb663b034d79e804bce25bd30c4a8471415f9d997d 135 | # 136 | 137 | -------------------------------------------------------------------------------- /denyhosts-server.init.example: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | ### BEGIN INIT INFO 4 | # Provides: denyhosts-server 5 | # Required-Start: $syslog $network $remote_fs 6 | # Required-Stop: $syslog $network $remote_fs 7 | # Should-Start: mysql 8 | # Should-Stop: mysql 9 | # Default-Start: 2 3 4 5 10 | # Default-Stop: 0 1 6 11 | # Short-Description: DenyHosts synchronisation service 12 | # Description: Service that allows DenyHosts clients to share blocked IP 13 | # addresses. 14 | ### END INIT INFO 15 | 16 | PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin 17 | DAEMON=/usr/local/bin/denyhosts-server 18 | DESC="DenyHosts sync server" 19 | NAME=`basename $DAEMON` 20 | PIDFILE=/var/run/$NAME.pid 21 | DAEMONUSER=denyhosts-server 22 | CONFIG_FILE=/etc/denyhosts-server.conf 23 | QUIET=--quiet 24 | 25 | test -x $DAEMON || exit 0 26 | 27 | # Include defaults if available 28 | if [ -f /etc/default/denyhosts-server ] ; then 29 | . /etc/default/denyhosts-server 30 | fi 31 | DAEMON_ARGS="-c $CONFIG_FILE" 32 | 33 | #set -e 34 | 35 | . /lib/lsb/init-functions 36 | 37 | case "$1" in 38 | start) 39 | log_begin_msg "Starting $DESC: $NAME" 40 | start-stop-daemon --start $QUIET --chuid $DAEMONUSER \ 41 | --make-pidfile --pidfile $PIDFILE --background \ 42 | --startas /bin/bash -- -c "exec $DAEMON $DAEMON_ARGS >> /var/log/denyhosts-server/console.log 2>&1" 43 | log_end_msg $? 44 | ;; 45 | stop) 46 | log_begin_msg "Stopping $DESC: $NAME" 47 | start-stop-daemon --stop $QUIET --chuid $DAEMONUSER \ 48 | --pidfile $PIDFILE \ 49 | --startas /bin/bash -- -c "exec $DAEMON $DAEMON_ARGS >> /var/log/denyhosts-server/console.log 2>&1" 50 | log_end_msg $? 51 | ;; 52 | restart) 53 | $0 stop 54 | $0 start 55 | ;; 56 | status) 57 | status_of_proc "$DAEMON" $NAME 58 | ;; 59 | reload|force-reload) 60 | log_begin_msg "Reloading $DESC: $NAME" 61 | start-stop-daemon --stop --signal HUP --pidfile $PIDFILE 62 | log_end_msg $? 63 | ;; 64 | *) 65 | N=/etc/init.d/$NAME 66 | echo "Usage: $N {start|stop|restart|reload|force-reload}" >&2 67 | exit 1 68 | ;; 69 | esac 70 | 71 | exit 0 72 | -------------------------------------------------------------------------------- /denyhosts-server.service.example: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=DenyHosts synchronisation service 3 | After=syslog.target 4 | 5 | [Service] 6 | Type=simple 7 | User=denyhosts-server 8 | Group=denyhosts-server 9 | EnvironmentFile=-/etc/default/denyhosts-server 10 | ExecStart=/usr/local/bin/denyhosts-server -c /etc/denyhosts-server.conf 11 | ExecReload=/bin/kill -HUP $MAINPID 12 | Restart=on-failure 13 | StandardOutput=syslog 14 | StandardError=syslog 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /denyhosts_server/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | denyhosts_sync_server is an open source implementation of the U{DenyHosts} 3 | synchronisation server. It is based on the 4 | U{Twisted } framework and uses 5 | U{Twistar } as an ORM layer. 6 | 7 | @author: Jan-Pascal van Best U{janpascal@vanbest.org} 8 | """ 9 | version_info = (2, 2, 3) 10 | version = '.'.join(map(str, version_info)) 11 | -------------------------------------------------------------------------------- /denyhosts_server/config.py: -------------------------------------------------------------------------------- 1 | # denyhosts sync server 2 | # Copyright (C) 2015-2016 Jan-Pascal van Best 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as published 6 | # by the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU Affero General Public License for more details. 13 | 14 | # You should have received a copy of the GNU Affero General Public License 15 | # along with this program. If not, see . 16 | 17 | import ConfigParser 18 | import inspect 19 | import logging 20 | import os.path 21 | import sys 22 | import sqlite3 23 | 24 | def _get(config, section, option, default=None): 25 | try: 26 | result = config.get(section, option) 27 | except ConfigParser.NoOptionError: 28 | result = default 29 | return result 30 | 31 | def _gethex(config, section, option, default=None): 32 | try: 33 | result = config.get(section, option) 34 | except ConfigParser.NoOptionError: 35 | result = default 36 | if result is not None: 37 | result = result.decode('hex') 38 | return result 39 | 40 | def _getint(config, section, option, default=None): 41 | try: 42 | result = config.getint(section, option) 43 | except ConfigParser.NoOptionError: 44 | result = default 45 | return result 46 | 47 | def _getboolean(config, section, option, default=None): 48 | try: 49 | result = config.getboolean(section, option) 50 | except ConfigParser.NoOptionError: 51 | result = default 52 | return result 53 | 54 | def _getfloat(config, section, option, default=None): 55 | try: 56 | result = config.getfloat(section, option) 57 | except ConfigParser.NoOptionError: 58 | result = default 59 | return result 60 | 61 | def read_config(filename): 62 | global dbtype, dbparams 63 | global maintenance_interval, expiry_days, legacy_expiry_days 64 | global max_reported_crackers 65 | global logfile 66 | global loglevel 67 | global xmlrpc_listen_port 68 | global legacy_server 69 | global legacy_frequency 70 | global legacy_threshold, legacy_resiliency 71 | global enable_debug_methods 72 | global stats_frequency 73 | global stats_resolve_hostnames 74 | global stats_listen_port 75 | global static_dir, graph_dir, template_dir 76 | global key_file, peers 77 | 78 | _config = ConfigParser.SafeConfigParser() 79 | _config.readfp(open(filename,'r')) 80 | 81 | dbtype = _get(_config, "database", "type", "sqlite3") 82 | if dbtype not in ["sqlite3","MySQLdb"]: 83 | print("Database type {} not supported, exiting".format(dbtype)) 84 | sys.exit() 85 | 86 | dbparams = { 87 | key: value 88 | for (key,value) in _config.items("database") 89 | if key != "type" 90 | } 91 | if dbtype=="sqlite3": 92 | dbparams["check_same_thread"] = False 93 | dbparams["detect_types"] = sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES 94 | dbparams["cp_max"] = 1 95 | if "database" not in dbparams: 96 | dbparams["database"] = "/var/lib/denyhosts-server/denyhosts.sqlite" 97 | elif dbtype=="MySQLdb": 98 | dbparams["cp_reconnect"] = True 99 | 100 | if "cp_max" in dbparams: 101 | dbparams["cp_max"] = int(dbparams["cp_max"]) 102 | if "cp_min" in dbparams: 103 | dbparams["cp_min"] = int(dbparams["cp_min"]) 104 | if "port" in dbparams: 105 | dbparams["port"] = int(dbparams["port"]) 106 | if "connect_timeout" in dbparams: 107 | dbparams["connect_timeout"] = float(dbparams["connect_timeout"]) 108 | if "timeout" in dbparams: 109 | dbparams["timeout"] = float(dbparams["timeout"]) 110 | 111 | maintenance_interval = _getint(_config, "maintenance", "interval_seconds", 3600) 112 | expiry_days = _getfloat(_config, "maintenance", "expiry_days", 30) 113 | legacy_expiry_days = _getfloat(_config, "maintenance", "legacy_expiry_days", 30) 114 | 115 | max_reported_crackers = _getint(_config, "sync", "max_reported_crackers", 50) 116 | xmlrpc_listen_port = _getint(_config, "sync", "listen_port", 9911) 117 | enable_debug_methods = _getboolean(_config, "sync", "enable_debug_methods", False) 118 | legacy_server = _get(_config, "sync", "legacy_server", None) 119 | legacy_frequency = _getint(_config, "sync", "legacy_frequency", 300) 120 | legacy_threshold = _getint(_config, "sync", "legacy_threshold", 10) 121 | legacy_resiliency = _getint(_config, "sync", "legacy_resiliency", 10800) 122 | 123 | logfile = _get(_config, "logging", "logfile", "/var/log/denyhosts-server/denyhosts-server.log") 124 | loglevel = _get(_config, "logging", "loglevel", "INFO") 125 | try: 126 | loglevel = int(loglevel) 127 | except ValueError: 128 | try: 129 | loglevel = logging.__dict__[loglevel] 130 | except KeyError: 131 | print("Illegal log level {}".format(loglevel)) 132 | loglevel = logging.INFO 133 | 134 | stats_frequency = _getint(_config, "stats", "update_frequency", 600) 135 | package_dir = os.path.dirname(os.path.dirname(inspect.getsourcefile(read_config))) 136 | static_dir = _get(_config, "stats", "static_dir", 137 | os.path.join( 138 | package_dir, 139 | "static")) 140 | graph_dir = _get(_config, "stats", "graph_dir", os.path.join(static_dir, "graph")) 141 | template_dir = _get(_config, "stats", "template_dir", os.path.join(package_dir, "template")) 142 | stats_resolve_hostnames = _getboolean(_config, "stats", "resolve_hostnames", True) 143 | stats_listen_port = _getint(_config, "stats", "listen_port", 9911) 144 | 145 | key_file = _get(_config, "peering", "key_file", os.path.join(package_dir, "private.key")) 146 | 147 | peers = {} 148 | for item in _config.items("peering"): 149 | if item[0].startswith("peer_") and item[0].endswith("_url"): 150 | url = item[1] 151 | key_key = item[0].replace("_url", "_key") 152 | key = _gethex(_config, "peering", key_key); 153 | peers[url] = key 154 | -------------------------------------------------------------------------------- /denyhosts_server/controllers.py: -------------------------------------------------------------------------------- 1 | # denyhosts sync server 2 | # Copyright (C) 2015-2016 Jan-Pascal van Best 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as published 6 | # by the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU Affero General Public License for more details. 13 | 14 | # You should have received a copy of the GNU Affero General Public License 15 | # along with this program. If not, see . 16 | 17 | import logging 18 | import time 19 | import xmlrpclib 20 | 21 | from twisted.internet.defer import inlineCallbacks, returnValue 22 | from twisted.internet.threads import deferToThread 23 | 24 | import config 25 | import database 26 | import models 27 | from models import Cracker, Report, Legacy 28 | import utils 29 | 30 | def get_cracker(ip_address): 31 | return Cracker.find(where=["ip_address=?",ip_address], limit=1) 32 | 33 | @inlineCallbacks 34 | def handle_report_from_client(client_ip, timestamp, hosts): 35 | for cracker_ip in hosts: 36 | if not utils.is_valid_ip_address(cracker_ip): 37 | logging.warning("Illegal host ip address {} from {}".format(cracker_ip, client_ip)) 38 | raise Exception("Illegal IP address \"{}\".".format(cracker_ip)) 39 | 40 | logging.debug("Adding report for {} from {}".format(cracker_ip, client_ip)) 41 | yield utils.wait_and_lock_host(cracker_ip) 42 | try: 43 | cracker = yield Cracker.find(where=['ip_address=?', cracker_ip], limit=1) 44 | if cracker is None: 45 | cracker = Cracker(ip_address=cracker_ip, first_time=timestamp, 46 | latest_time=timestamp, resiliency=0, total_reports=0, current_reports=0) 47 | yield cracker.save() 48 | yield add_report_to_cracker(cracker, client_ip, when=timestamp) 49 | finally: 50 | utils.unlock_host(cracker_ip) 51 | logging.debug("Done adding report for {} from {}".format(cracker_ip,client_ip)) 52 | 53 | # Note: lock cracker IP first! 54 | # Report merging algorithm by Anne Bezemer, see 55 | # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=622697 56 | @inlineCallbacks 57 | def add_report_to_cracker(cracker, client_ip, when=None): 58 | if when is None: 59 | when = time.time() 60 | 61 | reports = yield Report.find( 62 | where=["cracker_id=? AND ip_address=?", cracker.id, client_ip], 63 | orderby='latest_report_time ASC' 64 | ) 65 | if len(reports) == 0: 66 | report = Report(ip_address=client_ip, first_report_time=when, latest_report_time=when) 67 | yield report.save() 68 | cracker.current_reports += 1 69 | yield report.cracker.set(cracker) 70 | elif len(reports) == 1: 71 | report = reports[0] 72 | # Add second report after 24 hours 73 | if when > report.latest_report_time + 24*3600: 74 | report = Report(ip_address=client_ip, first_report_time=when, latest_report_time=when) 75 | yield report.save() 76 | yield report.cracker.set(cracker) 77 | elif len(reports) == 2: 78 | latest_report = reports[1] 79 | # Add third report after again 24 hours 80 | if when > latest_report.latest_report_time + 24*3600: 81 | report = Report(ip_address=client_ip, first_report_time=when, latest_report_time=when) 82 | yield report.save() 83 | yield report.cracker.set(cracker) 84 | else: 85 | latest_report = reports[-1] 86 | latest_report.latest_report_time = when 87 | yield latest_report.save() 88 | 89 | cracker.total_reports += 1 90 | cracker.latest_time = when 91 | cracker.resiliency = when - cracker.first_time 92 | 93 | yield cracker.save() 94 | 95 | @inlineCallbacks 96 | def get_qualifying_crackers(min_reports, min_resilience, previous_timestamp, 97 | max_crackers, latest_added_hosts): 98 | # Thank to Anne Bezemer for the algorithm in this function. 99 | # See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=622697 100 | 101 | # This query takes care of conditions (a) and (b) 102 | # cracker_ids = yield database.runGetPossibleQualifyingCrackerQuery(min_reports, min_resilience, previous_timestamp) 103 | cracker_ids = yield database.run_query(""" 104 | SELECT DISTINCT c.id, c.ip_address 105 | FROM crackers c 106 | WHERE (c.current_reports >= ?) 107 | AND (c.resiliency >= ?) 108 | AND (c.latest_time >= ?) 109 | ORDER BY c.first_time DESC 110 | """, min_reports, min_resilience, previous_timestamp) 111 | 112 | if cracker_ids is None: 113 | returnValue([]) 114 | 115 | # Now look for conditions (c) and (d) 116 | result = [] 117 | for c in cracker_ids: 118 | cracker_id = c[0] 119 | if c[1] in latest_added_hosts: 120 | logging.debug("Skipping {}, just reported by client".format(c[1])) 121 | continue 122 | cracker = yield Cracker.find(cracker_id) 123 | if cracker is None: 124 | continue 125 | logging.debug("Examining cracker:") 126 | logging.debug(cracker) 127 | reports = yield cracker.reports.get(orderby="first_report_time ASC") 128 | #logging.debug("reports:") 129 | #for r in reports: 130 | # logging.debug(" "+str(r)) 131 | logging.debug("r[m-1].first, prev: {}, {}".format(reports[min_reports-1].first_report_time, previous_timestamp)) 132 | if (len(reports)>=min_reports and 133 | reports[min_reports-1].first_report_time >= previous_timestamp): 134 | # condition (c) satisfied 135 | logging.debug("c") 136 | result.append(cracker.ip_address) 137 | else: 138 | logging.debug("checking (d)...") 139 | satisfied = False 140 | for report in reports: 141 | #logging.debug(" "+str(report)) 142 | if (not satisfied and 143 | report.latest_report_time>=previous_timestamp and 144 | report.latest_report_time-cracker.first_time>=min_resilience): 145 | logging.debug(" d1") 146 | satisfied = True 147 | if (report.latest_report_time<=previous_timestamp and 148 | report.latest_report_time-cracker.first_time>=min_resilience): 149 | logging.debug(" d2 failed") 150 | satisfied = False 151 | break 152 | if satisfied: 153 | logging.debug("Appending {}".format(cracker.ip_address)) 154 | result.append(cracker.ip_address) 155 | else: 156 | logging.debug(" skipping") 157 | if len(result)>=max_crackers: 158 | break 159 | 160 | if len(result) < max_crackers: 161 | # Add results from legacy server 162 | extras = yield Legacy.find(where=["retrieved_time>?", previous_timestamp], 163 | orderby="retrieved_time DESC", limit=max_crackers-len(result)) 164 | result = result + [extra.ip_address for extra in extras] 165 | 166 | logging.debug("Returning {} hosts".format(len(result))) 167 | returnValue(result) 168 | 169 | # Periodical database maintenance 170 | # From algorithm by Anne Bezemer, see https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=622697 171 | # Expiry/maintenance every hour/day: 172 | # remove reports with .latestreporttime older than (for example) 1 month 173 | # and only update cracker.currentreports 174 | # remove reports that were reported by what we now "reliably" know to 175 | # be crackers themselves 176 | # remove crackers that have no reports left 177 | 178 | # TODO remove reports by identified crackers 179 | 180 | @inlineCallbacks 181 | def perform_maintenance(limit = None, legacy_limit = None): 182 | logging.info("Starting maintenance job...") 183 | 184 | if limit is None: 185 | now = time.time() 186 | limit = now - config.expiry_days * 24 * 3600 187 | 188 | if legacy_limit is None: 189 | now = time.time() 190 | legacy_limit = now - config.legacy_expiry_days * 24 * 3600 191 | 192 | reports_deleted = 0 193 | crackers_deleted = 0 194 | legacy_deleted = 0 195 | 196 | batch_size = 1000 197 | 198 | while True: 199 | old_reports = yield Report.find(where=["latest_report_time 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as published 6 | # by the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU Affero General Public License for more details. 13 | 14 | # You should have received a copy of the GNU Affero General Public License 15 | # along with this program. If not, see . 16 | 17 | import logging 18 | import datetime 19 | import time 20 | 21 | from twistar.registry import Registry 22 | from twisted.internet.defer import inlineCallbacks, returnValue 23 | 24 | import GeoIP 25 | 26 | import config 27 | import stats 28 | 29 | _quiet = False 30 | 31 | def _remove_tables(txn): 32 | global _quiet 33 | if not _quiet: 34 | print("Removing all data from database and removing tables") 35 | txn.execute("DROP TABLE IF EXISTS info") 36 | txn.execute("DROP TABLE IF EXISTS crackers") 37 | txn.execute("DROP TABLE IF EXISTS reports") 38 | txn.execute("DROP TABLE IF EXISTS legacy") 39 | txn.execute("DROP TABLE IF EXISTS history") 40 | txn.execute("DROP TABLE IF EXISTS country_history") 41 | 42 | def _evolve_database_initial(txn, dbtype): 43 | if dbtype=="sqlite3": 44 | autoincrement="AUTOINCREMENT" 45 | elif dbtype=="MySQLdb": 46 | autoincrement="AUTO_INCREMENT" 47 | 48 | txn.execute("""CREATE TABLE crackers ( 49 | id INTEGER PRIMARY KEY {}, 50 | ip_address CHAR(15), 51 | first_time INTEGER, 52 | latest_time INTEGER, 53 | total_reports INTEGER, 54 | current_reports INTEGER 55 | )""".format(autoincrement)) 56 | txn.execute("CREATE UNIQUE INDEX cracker_ip_address ON crackers (ip_address)") 57 | 58 | txn.execute("""CREATE TABLE reports( 59 | id INTEGER PRIMARY KEY {}, 60 | cracker_id INTEGER, 61 | ip_address CHAR(15), 62 | first_report_time INTEGER, 63 | latest_report_time INTEGER 64 | )""".format(autoincrement)) 65 | txn.execute("CREATE INDEX report_first_time ON reports (first_report_time)") 66 | txn.execute("CREATE UNIQUE INDEX report_cracker_ip ON reports (cracker_id, ip_address)") 67 | txn.execute("CREATE INDEX report_cracker_first ON reports (cracker_id, first_report_time)") 68 | 69 | txn.execute("""CREATE TABLE legacy( 70 | id INTEGER PRIMARY KEY {}, 71 | ip_address CHAR(15), 72 | retrieved_time INTEGER 73 | )""".format(autoincrement)) 74 | txn.execute("CREATE UNIQUE INDEX legacy_ip ON legacy (ip_address)") 75 | txn.execute("CREATE INDEX legacy_retrieved ON legacy (retrieved_time)") 76 | 77 | def _evolve_database_v1(txn, dbtype): 78 | txn.execute("""CREATE TABLE info ( 79 | `key` CHAR(32) PRIMARY KEY, 80 | `value` VARCHAR(255) 81 | )""") 82 | if dbtype=="sqlite3": 83 | txn.execute('INSERT INTO info VALUES ("schema_version", ?)', (str(_schema_version),)) 84 | elif dbtype=="MySQLdb": 85 | txn.execute('INSERT INTO info VALUES ("schema_version", %s)', (str(_schema_version),)) 86 | txn.execute('INSERT INTO info VALUES ("last_legacy_sync", 0)') 87 | 88 | def _evolve_database_v2(txn, dbtype): 89 | txn.execute("ALTER TABLE crackers ADD resiliency INTEGER") 90 | txn.execute("CREATE INDEX cracker_qual ON crackers (current_reports, resiliency, latest_time, first_time)") 91 | txn.execute("CREATE INDEX cracker_first ON crackers (first_time)") 92 | txn.execute("UPDATE crackers SET resiliency=latest_time-first_time") 93 | 94 | def _evolve_database_v3(txn, dbtype): 95 | if dbtype=="sqlite3": 96 | txn.execute("DROP INDEX cracker_qual") 97 | elif dbtype=="MySQLdb": 98 | txn.execute("ALTER TABLE crackers DROP INDEX cracker_qual") 99 | txn.execute("CREATE INDEX cracker_qual ON crackers (latest_time, current_reports, resiliency, first_time)") 100 | 101 | def _evolve_database_v4(txn, dbtype): 102 | txn.execute("CREATE INDEX report_latest ON reports (latest_report_time)") 103 | 104 | def _evolve_database_v5(txn, dbtype): 105 | if dbtype=="sqlite3": 106 | txn.execute("DROP INDEX report_cracker_ip") 107 | elif dbtype=="MySQLdb": 108 | txn.execute("ALTER TABLE reports DROP INDEX report_cracker_ip") 109 | txn.execute("CREATE INDEX report_cracker_ip ON reports (cracker_id, ip_address, latest_report_time)") 110 | 111 | def _evolve_database_v6(txn, dbtype): 112 | # Remove crackers without reports from database. This may have occured 113 | # because of a bug in controllers.perform_maintenance() 114 | txn.execute(""" 115 | DELETE FROM crackers 116 | WHERE id NOT IN 117 | ( SELECT cracker_id FROM reports ) 118 | """) 119 | 120 | def _evolve_database_v7(txn, dbtype): 121 | txn.execute("""CREATE TABLE history ( 122 | `date` DATE PRIMARY KEY, 123 | num_reports INTEGER, 124 | num_contributors INTEGER, 125 | num_reported_hosts INTEGER 126 | )""") 127 | 128 | stats.update_recent_history_txn(txn) 129 | 130 | def _evolve_database_v8(txn, dbtype): 131 | global _quiet 132 | txn.execute("""CREATE TABLE country_history ( 133 | country_code CHAR(5) PRIMARY KEY, 134 | country VARCHAR(50), 135 | num_reports INTEGER 136 | )""") 137 | txn.execute("CREATE INDEX country_history_count ON country_history(num_reports)") 138 | txn.execute('INSERT INTO `info` VALUES ("last_country_history_update", "1900-01-01")') 139 | 140 | if not _quiet: 141 | print("Calculating per-country totals...") 142 | stats.update_country_history_txn(txn, None, include_history=True) 143 | 144 | if not _quiet: 145 | print("Fixing up historical data...") 146 | stats.fixup_history_txn(txn) 147 | 148 | _evolutions = { 149 | 1: _evolve_database_v1, 150 | 2: _evolve_database_v2, 151 | 3: _evolve_database_v3, 152 | 4: _evolve_database_v4, 153 | 5: _evolve_database_v5, 154 | 6: _evolve_database_v6, 155 | 7: _evolve_database_v7, 156 | 8: _evolve_database_v8 157 | } 158 | 159 | _schema_version = len(_evolutions) 160 | 161 | def _evolve_database(txn): 162 | global _quiet 163 | if not _quiet: 164 | print("Evolving database") 165 | dbtype = config.dbtype 166 | 167 | try: 168 | txn.execute('SELECT `value` FROM info WHERE `key`="schema_version"') 169 | result = txn.fetchone() 170 | if result is not None: 171 | current_version = int(result[0]) 172 | else: 173 | if not _quiet: 174 | print("No schema version in database") 175 | _evolve_database_initial(txn, dbtype) 176 | current_version = 0 177 | except: 178 | if not _quiet: 179 | print("No schema version in database") 180 | _evolve_database_initial(txn, dbtype) 181 | current_version = 0 182 | 183 | if current_version > _schema_version: 184 | if not _quiet: 185 | print("Illegal database schema {}".format(current_version)) 186 | return 187 | 188 | if not _quiet: 189 | print("Current database schema is version {}".format(current_version)) 190 | 191 | while current_version < _schema_version: 192 | current_version += 1 193 | if not _quiet: 194 | print("Evolving database to version {}...".format(current_version)) 195 | _evolutions[current_version](txn, dbtype) 196 | 197 | if dbtype=="sqlite3": 198 | txn.execute('UPDATE info SET `value`=? WHERE `key`="schema_version"', (str(current_version),)) 199 | elif dbtype=="MySQLdb": 200 | txn.execute('UPDATE info SET `value`=%s WHERE `key`="schema_version"', (str(current_version),)) 201 | 202 | if not _quiet: 203 | print("Updated database schema, current version is {}".format(_schema_version)) 204 | 205 | def evolve_database(): 206 | return Registry.DBPOOL.runInteraction(_evolve_database) 207 | 208 | @inlineCallbacks 209 | def clean_database(quiet = False): 210 | global _quiet 211 | _quiet = quiet 212 | yield Registry.DBPOOL.runInteraction(_remove_tables) 213 | yield Registry.DBPOOL.runInteraction(_evolve_database) 214 | 215 | @inlineCallbacks 216 | def get_schema_version(): 217 | try: 218 | rows = yield Registry.DBPOOL.runQuery('SELECT `value` FROM `info` WHERE `key`="schema_version"') 219 | if rows is not None: 220 | current_version = int(rows[0][0]) 221 | else: 222 | if not _quiet: 223 | print("No schema version in database") 224 | current_version = 0 225 | except: 226 | current_version = 0 227 | returnValue(current_version) 228 | 229 | @inlineCallbacks 230 | def check_database_version(): 231 | global _quiet 232 | current_version = yield get_schema_version() 233 | 234 | if current_version != _schema_version: 235 | logging.debug("Wrong database schema {}, expecting {}, exiting".format(current_version, _schema_version)) 236 | if not _quiet: 237 | print("Wrong database schema {}, expecting {}, exiting".format(current_version, _schema_version)) 238 | from twisted.internet import reactor 239 | reactor.stop() 240 | else: 241 | logging.info("Database schema is up to date (version {})".format(current_version)) 242 | returnValue(current_version) 243 | 244 | # FIXME Not the proper way. What if there's a question mark somewhere 245 | # else in the query? 246 | def translate_query(query): 247 | global _quiet 248 | if config.dbtype == "MySQLdb": 249 | return query.replace('?', '%s') 250 | elif config.dbtype == "sqlite3": 251 | return query 252 | else: 253 | if not _quiet: 254 | print("unsupported database {}".format(config.dbtype)) 255 | return query 256 | 257 | def run_query(query, *args): 258 | return Registry.DBPOOL.runQuery(translate_query(query), args) 259 | 260 | def run_operation(query, *args): 261 | return Registry.DBPOOL.runOperation(translate_query(query), args) 262 | 263 | def run_truncate_query(table): 264 | global _quiet 265 | if config.dbtype == "MySQLdb": 266 | query = "TRUNCATE TABLE `{}`".format(table) 267 | elif config.dbtype == "sqlite3": 268 | query = "DELETE FROM `{}`".format(table) 269 | else: 270 | if not _quiet: 271 | print("unsupported database {}".format(config.dbtype)) 272 | return Registry.DBPOOL.runQuery(query) 273 | 274 | def dump_crackers(): 275 | return run_query("SELECT * FROM crackers") 276 | 277 | @inlineCallbacks 278 | def dump_table(table): 279 | rows = yield run_query("SELECT * FROM " + table) 280 | 281 | for i in xrange(len(rows)): 282 | row = rows[i] 283 | for j in xrange(len(row)): 284 | if isinstance(row[j], datetime.date): 285 | l = list(row) 286 | l[j] =time.mktime(row[j].timetuple()) 287 | rows[i] = tuple(l) 288 | 289 | def dump_reports_for_cracker(cracker_ip): 290 | logging.debug("database.dump_reports_for_cracker({})".format(cracker_ip)) 291 | return run_query("SELECT r.* FROM reports r JOIN crackers c ON r.cracker_id = c.id WHERE c.ip_address=?", cracker_ip) 292 | 293 | def bootstrap_table(table, params): 294 | if table=="info" and params[0]=="schema_version": 295 | return None 296 | query = "INSERT INTO " + table + " VALUES (" + ",".join(["?"]*len(params)) + ")" 297 | return run_operation(query, *params) 298 | 299 | def bootstrap_cracker(params): 300 | #query = "INSERT INTO crackers VALUES (" + ",".join(["?"]*len(params)) + ")" 301 | #logging.debug("Insert statement: {}".format(query)) 302 | #return run_operation(query, *params) 303 | return bootstrap_table("crackers", params) 304 | 305 | def bootstrap_report(params): 306 | #query = "INSERT INTO reports VALUES (" + ",".join(["?"]*len(params)) + ")" 307 | #logging.debug("Insert statement: {}".format(query)) 308 | #return run_operation(query, *params) 309 | return bootstrap_table("reports", params) 310 | 311 | -------------------------------------------------------------------------------- /denyhosts_server/debug_views.py: -------------------------------------------------------------------------------- 1 | # denyhosts sync server 2 | # Copyright (C) 2015 Jan-Pascal van Best 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as published 6 | # by the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU Affero General Public License for more details. 13 | 14 | # You should have received a copy of the GNU Affero General Public License 15 | # along with this program. If not, see . 16 | 17 | import time 18 | import logging 19 | import random 20 | 21 | from twisted.web import xmlrpc 22 | from twisted.web.xmlrpc import withRequest 23 | from twisted.internet.defer import inlineCallbacks, returnValue 24 | from twisted.internet import reactor 25 | import ipaddr 26 | 27 | import models 28 | from models import Cracker, Report 29 | import config 30 | import controllers 31 | import utils 32 | 33 | class DebugServer(xmlrpc.XMLRPC): 34 | """ 35 | Debug xmlrpc methods 36 | """ 37 | 38 | def __init__(self, server): 39 | self.server = server 40 | 41 | @inlineCallbacks 42 | def xmlrpc_list_all_hosts(self): 43 | crackers = yield Cracker.all() 44 | returnValue([c.ip_address for c in crackers]) 45 | 46 | # Concurrency test. Remove before public installation! 47 | @withRequest 48 | def xmlrpc_test(self, request): 49 | reactor.callLater(1, self.server.xmlrpc_add_hosts, request, ["1.1.1.1", "2.2.2.2"]) 50 | reactor.callLater(1, self.server.xmlrpc_add_hosts, request, ["1.1.1.1", "2.2.2.2"]) 51 | reactor.callLater(1, self.server.xmlrpc_add_hosts, request, ["1.1.1.1", "2.2.2.2"]) 52 | reactor.callLater(1, self.server.xmlrpc_add_hosts, request, ["1.1.1.1", "2.2.2.2"]) 53 | reactor.callLater(1, self.server.xmlrpc_add_hosts, request, ["1.1.1.1", "2.2.2.2"]) 54 | reactor.callLater(1, self.server.xmlrpc_add_hosts, request, ["1.1.1.7", "2.2.2.8"]) 55 | reactor.callLater(1, self.server.xmlrpc_add_hosts, request, ["1.1.1.7", "2.2.2.8"]) 56 | reactor.callLater(1, self.server.xmlrpc_add_hosts, request, ["1.1.1.7", "2.2.2.8"]) 57 | reactor.callLater(1, self.server.xmlrpc_add_hosts, request, ["1.1.1.7", "2.2.2.8"]) 58 | reactor.callLater(1, self.server.xmlrpc_add_hosts, request, ["1.1.1.7", "2.2.2.8"]) 59 | return 0 60 | 61 | # For concurrency testing. Remove before public installation! 62 | def xmlrpc_maintenance(self): 63 | return controllers.perform_maintenance() 64 | 65 | def random_ip_address(self): 66 | while True: 67 | ip = ".".join(map(str, (random.randint(0, 255) for _ in range(4)))) 68 | if utils.is_valid_ip_address(ip): 69 | return ip 70 | 71 | _crackers = [] 72 | @inlineCallbacks 73 | def xmlrpc_test_bulk_insert(self, count, same_crackers = False, when=None): 74 | if same_crackers and len(self._crackers) < count: 75 | logging.debug("Filling static crackers from {} to {}".format(len(self._crackers), count)) 76 | for i in xrange(len(self._crackers), count): 77 | self._crackers.append(self.random_ip_address()) 78 | 79 | if when is None: 80 | when = time.time() 81 | 82 | for i in xrange(count): 83 | reporter = self.random_ip_address() 84 | if same_crackers: 85 | cracker_ip = self._crackers[i] 86 | else: 87 | cracker_ip = self.random_ip_address() 88 | 89 | logging.debug("Adding report for {} from {} at {}".format(cracker_ip, reporter, when)) 90 | 91 | yield utils.wait_and_lock_host(cracker_ip) 92 | 93 | cracker = yield Cracker.find(where=['ip_address=?', cracker_ip], limit=1) 94 | if cracker is None: 95 | cracker = Cracker(ip_address=cracker_ip, first_time=when, latest_time=when, total_reports=0, current_reports=0) 96 | yield cracker.save() 97 | yield controllers.add_report_to_cracker(cracker, reporter, when=when) 98 | 99 | utils.unlock_host(cracker_ip) 100 | logging.debug("Done adding report for {} from {}".format(cracker_ip,reporter)) 101 | total = yield Cracker.count() 102 | total_reports = yield Report.count() 103 | returnValue((total,total_reports)) 104 | 105 | def xmlrpc_clear_bulk_cracker_list(self): 106 | self._crackers = [] 107 | return 0 108 | 109 | @inlineCallbacks 110 | def xmlrpc_get_cracker_info(self, ip): 111 | if not utils.is_valid_ip_address(ip): 112 | logging.warning("Illegal host ip address {}".format(ip)) 113 | raise xmlrpc.Fault(101, "Illegal IP address \"{}\".".format(ip)) 114 | #logging.info("Getting info for cracker {}".format(ip_address)) 115 | cracker = yield controllers.get_cracker(ip) 116 | if cracker is None: 117 | raise xmlrpc.Fault(104, "Cracker {} unknown".format(ip)) 118 | returnValue([]) 119 | 120 | #logging.info("found cracker: {}".format(cracker)) 121 | reports = yield cracker.reports.get() 122 | #logging.info("found reports: {}".format(reports)) 123 | cracker_cols=['ip_address','first_time', 'latest_time', 'resiliency', 'total_reports', 'current_reports'] 124 | report_cols=['ip_address','first_report_time', 'latest_report_time'] 125 | returnValue( [cracker.toHash(cracker_cols), [r.toHash(report_cols) for r in reports]] ) 126 | 127 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 128 | -------------------------------------------------------------------------------- /denyhosts_server/main.py: -------------------------------------------------------------------------------- 1 | # denyhosts sync server 2 | # Copyright (C) 2015-2016 Jan-Pascal van Best 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as published 6 | # by the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU Affero General Public License for more details. 13 | 14 | # You should have received a copy of the GNU Affero General Public License 15 | # along with this program. If not, see . 16 | 17 | import argparse 18 | import logging 19 | import signal 20 | import sys 21 | import ConfigParser 22 | 23 | from twisted.web import server, resource, static 24 | from twisted.enterprise import adbapi 25 | from twisted.internet import task, reactor 26 | from twisted.internet.defer import inlineCallbacks, returnValue 27 | from twisted.python import log 28 | 29 | from twistar.registry import Registry 30 | 31 | import views 32 | import debug_views 33 | import peering_views 34 | import models 35 | import controllers 36 | import config 37 | import database 38 | import stats 39 | import utils 40 | import peering 41 | 42 | import __init__ 43 | 44 | def stop_reactor(value): 45 | print(value) 46 | reactor.stop() 47 | 48 | def sighup_handler(signum, frame): 49 | global configfile 50 | global main_xmlrpc_handler 51 | 52 | logging.warning("Received SIGHUP, reloading configuration file...") 53 | debug_was_on = config.enable_debug_methods 54 | old_xmlrpc_listen_port = config.xmlrpc_listen_port 55 | old_stats_listen_port = config.stats_listen_port 56 | config.read_config(configfile) 57 | 58 | configure_logging() 59 | schedule_jobs() 60 | 61 | if debug_was_on and not config.enable_debug_methods: 62 | # Remove debug methods 63 | # Missing API in class XMLRPC 64 | del main_xmlrpc_handler.subHandlers["debug"] 65 | 66 | if config.enable_debug_methods and not debug_was_on: 67 | d = debug_views.DebugServer(main_xmlrpc_handler) 68 | main_xmlrpc_handler.putSubHandler('debug', d) 69 | 70 | stop_listening().addCallback(lambda _: start_listening()) 71 | 72 | @inlineCallbacks 73 | def shutdown(): 74 | global main_xmlrpc_handler 75 | global _xmlrpc_listener 76 | global _xmlrpc_site 77 | try: 78 | site = _xmlrpc_site 79 | logging.info("shutting down, first closing listening ports...") 80 | print("Shutting down, hold on a moment...") 81 | yield stop_listening() 82 | 83 | # This doesn't work, site.session is always empty 84 | logging.info("Ports closed, waiting for current sessions to close...") 85 | logging.debug("Clients still connected: {}".format(len(site.sessions))) 86 | while not len(site.sessions)==0: 87 | logging.debug("Waiting, {} sessions still active".format(len(site.sessions))) 88 | yield task.deferLater(reactor, 1, lambda _:0, 0) 89 | 90 | logging.info("No more sessions, waiting for locked hosts...") 91 | while not utils.none_waiting(): 92 | logging.info("Waiting to shut down, {} hosts still blocked".format(utils.count_waiting())) 93 | yield task.deferLater(reactor, 1, lambda _:0, 0) 94 | logging.debug("reactor.getDelayedCalls: {}".format([c.func for c in reactor.getDelayedCalls()])) 95 | 96 | logging.info("All hosts unlocked, waiting 3 more seconds...") 97 | yield task.deferLater(reactor, 1, lambda _:0, 0) 98 | logging.debug("Waiting 2 more seconds...") 99 | yield task.deferLater(reactor, 1, lambda _:0, 0) 100 | logging.debug("Waiting 1 more second...") 101 | yield task.deferLater(reactor, 1, lambda _:0, 0) 102 | logging.info("Continuing shutdown") 103 | except: 104 | logging.exception("Error in shutdown callback") 105 | 106 | _xmlrpc_listener = None 107 | _stats_listener = None 108 | _xmlrpc_site = None 109 | 110 | # Returns a callback. Wait on it before the port(s) are actually closed 111 | def stop_listening(): 112 | logging.debug("main.stop_listening()") 113 | global _xmlrpc_listener 114 | global _stats_listener 115 | global _xmlrpc_site 116 | 117 | # It's not easy to actually close a listening port. 118 | # You need to close both the port and the protocol, 119 | # and wait for them 120 | if _xmlrpc_listener is not None: 121 | deferred = _xmlrpc_listener.stopListening() 122 | deferred.addCallback(_xmlrpc_listener.loseConnection) 123 | else: 124 | deferred = Deferred() 125 | 126 | if _stats_listener is not None: 127 | deferred.addCallback(_stats_listener.stopListening) 128 | deferred.addCallback(_stats_listener.loseConnection) 129 | 130 | _xmlrpc_listener = None 131 | _stats_listener = None 132 | _xmlrpc_site = None 133 | 134 | return deferred 135 | 136 | def start_listening(): 137 | logging.debug("main.start_listening()") 138 | global _xmlrpc_listener 139 | global _xmlrpc_site 140 | global _stats_listener 141 | global main_xmlrpc_handler 142 | 143 | # Configure web resources 144 | main_xmlrpc_handler = views.Server() 145 | stats_resource = views.WebResource() 146 | web_static = static.File(config.static_dir) 147 | static.File.contentTypes['.svg'] = 'image/svg+xml' 148 | web_graphs = static.File(config.graph_dir) 149 | 150 | # Roots 151 | if config.stats_listen_port == config.xmlrpc_listen_port: 152 | xmlrpc_root = stats_resource 153 | else: 154 | xmlrpc_root = resource.Resource() 155 | stats_root = stats_resource 156 | 157 | # /RPC2 158 | xmlrpc_root.putChild('RPC2', main_xmlrpc_handler) 159 | 160 | # Peering handler 161 | p = peering_views.PeeringServer(main_xmlrpc_handler) 162 | main_xmlrpc_handler.putSubHandler('peering', p) 163 | 164 | # xmlrpc debug handler 165 | if config.enable_debug_methods: 166 | d = debug_views.DebugServer(main_xmlrpc_handler) 167 | main_xmlrpc_handler.putSubHandler('debug', d) 168 | 169 | # /static 170 | stats_root.putChild('static', web_static) 171 | # /static/graphs 172 | web_static.putChild('graphs', web_graphs) 173 | 174 | logging.info("Start listening on port {}".format(config.xmlrpc_listen_port)) 175 | _xmlrpc_site = server.Site(xmlrpc_root) 176 | _xmlrpc_listener = reactor.listenTCP(config.xmlrpc_listen_port, _xmlrpc_site) 177 | 178 | if config.stats_listen_port == config.xmlrpc_listen_port: 179 | _stats_listener = None 180 | else: 181 | logging.info("Start serving statistics on port {}".format(config.stats_listen_port)) 182 | _stats_listener = reactor.listenTCP(config.stats_listen_port, server.Site(stats_root)) 183 | 184 | maintenance_job = None 185 | legacy_sync_job = None 186 | stats_job = None 187 | 188 | def schedule_jobs(): 189 | global maintenance_job, legacy_sync_job, stats_job 190 | 191 | # Reschedule maintenance job 192 | if maintenance_job is not None: 193 | maintenance_job.stop() 194 | maintenance_job = task.LoopingCall(controllers.perform_maintenance) 195 | maintenance_job.start(config.maintenance_interval, now=False) 196 | 197 | # Reschedule legacy sync job 198 | if legacy_sync_job is not None: 199 | legacy_sync_job.stop() 200 | legacy_sync_job = task.LoopingCall(controllers.download_from_legacy_server) 201 | legacy_sync_job.start(config.legacy_frequency, now=False) 202 | 203 | # Reschedule stats job 204 | if stats_job is not None: 205 | stats_job.stop() 206 | stats_job = task.LoopingCall(stats.update_stats_cache) 207 | stats_job.start(config.stats_frequency, now=True) 208 | 209 | def configure_logging(): 210 | # Remove all handlers associated with the root logger object. 211 | for handler in logging.root.handlers[:]: 212 | logging.root.removeHandler(handler) 213 | 214 | # Use basic configuration 215 | logging.basicConfig(filename=config.logfile, 216 | level=config.loglevel, 217 | format="%(asctime)s %(module)-8s %(levelname)-8s %(message)s", 218 | datefmt="%Y-%m-%d %H:%M:%S") 219 | 220 | # Collect Twisted log messages in Python logging system 221 | observer = log.PythonLoggingObserver() 222 | observer.start() 223 | 224 | def run_main(): 225 | global configfile 226 | global maintenance_job, legacy_sync_job 227 | global main_xmlrpc_handler, stats_resource, web_root, web_static 228 | 229 | parser = argparse.ArgumentParser(description="DenyHosts sync server") 230 | parser.add_argument("-c", "--config", default="/etc/denyhosts-server.conf", help="Configuration file") 231 | parser.add_argument("--recreate-database", action='store_true', help="Wipe and recreate the database") 232 | parser.add_argument("--evolve-database", action='store_true', help="Evolve the database to the latest schema version") 233 | parser.add_argument("--purge-legacy-addresses", action='store_true', 234 | help="Purge all hosts downloaded from the legacy server. DO NOT USE WHEN DENYHOSTS-SERVER IS RUNNING!") 235 | parser.add_argument("--purge-reported-addresses", action='store_true', 236 | help="Purge all hosts that have been reported by clients. DO NOT USE WHEN DENYHOSTS-SERVER IS RUNNING!") 237 | parser.add_argument("--purge-ip", action='store', 238 | help="Purge ip address from both legacy and reported host lists. DO NOT USE WHEN DENYHOSTS-SERVER IS RUNNING!") 239 | parser.add_argument("--check-peers", action="store_true", 240 | help="Check if all peers are responsive, and if they agree about the peer list") 241 | parser.add_argument("--bootstrap-from-peer", action="store", metavar="PEER_URL", 242 | help="First wipe database and then bootstrap database from peer. DO NOT USE WHEN DENYHOSTS-SERVER IS RUNNING!") 243 | parser.add_argument("-f", "--force", action='store_true', 244 | help="Do not ask for confirmation, execute action immediately") 245 | args = parser.parse_args() 246 | 247 | configfile = args.config 248 | 249 | try: 250 | config.read_config(args.config) 251 | except ConfigParser.NoSectionError, e: 252 | print("Error in reading the configuration file from \"{}\": {}.".format(args.config, e)) 253 | print("Please review the configuration file. Look at the supplied denyhosts-server.conf.example for more information.") 254 | sys.exit() 255 | 256 | configure_logging() 257 | 258 | peering.load_keys() 259 | 260 | Registry.DBPOOL = adbapi.ConnectionPool(config.dbtype, **config.dbparams) 261 | Registry.register(models.Cracker, models.Report, models.Legacy) 262 | 263 | single_shot = False 264 | 265 | if not args.force and (args.recreate_database 266 | or args.evolve_database 267 | or args.purge_legacy_addresses 268 | or args.purge_reported_addresses 269 | or args.recreate_database 270 | or args.bootstrap_from_peer 271 | or args.purge_ip is not None): 272 | print("WARNING: do not run this method when denyhosts-server is running.") 273 | reply = raw_input("Are you sure you want to continue (Y/N): ") 274 | if not reply.upper().startswith('Y'): 275 | sys.exit() 276 | 277 | if args.check_peers: 278 | if peering.check_peers(): 279 | sys.exit(0) 280 | else: 281 | sys.exit(1) 282 | 283 | if args.recreate_database: 284 | single_shot = True 285 | database.clean_database().addCallbacks(stop_reactor, stop_reactor) 286 | 287 | if args.evolve_database: 288 | single_shot = True 289 | database.evolve_database().addCallbacks(stop_reactor, stop_reactor) 290 | 291 | if args.bootstrap_from_peer: 292 | single_shot = True 293 | peering.bootstrap_from(args.bootstrap_from_peer).addCallbacks(stop_reactor, stop_reactor) 294 | 295 | if args.purge_legacy_addresses: 296 | single_shot = True 297 | controllers.purge_legacy_addresses().addCallbacks(stop_reactor, stop_reactor) 298 | 299 | if args.purge_reported_addresses: 300 | single_shot = True 301 | controllers.purge_reported_addresses().addCallbacks(stop_reactor, stop_reactor) 302 | 303 | if args.purge_ip is not None: 304 | single_shot = True 305 | controllers.purge_ip(args.purge_ip).addCallbacks(stop_reactor, stop_reactor) 306 | 307 | if not single_shot: 308 | signal.signal(signal.SIGHUP, sighup_handler) 309 | reactor.addSystemEventTrigger("after", "startup", database.check_database_version) 310 | reactor.addSystemEventTrigger("before", "shutdown", shutdown) 311 | 312 | start_listening() 313 | 314 | # Set up maintenance and legacy sync jobs 315 | schedule_jobs() 316 | 317 | # Start reactor 318 | logging.info("Starting denyhosts-server version {}".format(__init__.version)) 319 | reactor.run() 320 | 321 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 322 | -------------------------------------------------------------------------------- /denyhosts_server/models.py: -------------------------------------------------------------------------------- 1 | # denyhosts sync server 2 | # Copyright (C) 2015 Jan-Pascal van Best 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as published 6 | # by the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU Affero General Public License for more details. 13 | 14 | # You should have received a copy of the GNU Affero General Public License 15 | # along with this program. If not, see . 16 | 17 | import logging 18 | 19 | from twistar.dbobject import DBObject 20 | 21 | class Cracker(DBObject): 22 | HASMANY=['reports'] 23 | column_names=['ip_address','first_time', 'latest_time', 'resiliency', 'total_reports', 'current_reports'] 24 | 25 | def __str__(self): 26 | return "Cracker({},{},{},{},{},{})".format(self.id,self.ip_address,self.first_time,self.latest_time,self.resiliency,self.total_reports,self.current_reports) 27 | 28 | class Report(DBObject): 29 | BELONGSTO=['cracker'] 30 | column_names=['ip_address','first_report_time', 'latest_report_time'] 31 | 32 | def __str__(self): 33 | return "Report({},{},{},{})".format(self.id,self.ip_address,self.first_report_time,self.latest_report_time) 34 | 35 | class Legacy(DBObject): 36 | TABLENAME="legacy" 37 | pass 38 | 39 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 40 | -------------------------------------------------------------------------------- /denyhosts_server/peering.py: -------------------------------------------------------------------------------- 1 | # denyhosts sync server 2 | # Copyright (C) 2016 Jan-Pascal van Best 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as published 6 | # by the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU Affero General Public License for more details. 13 | 14 | # You should have received a copy of the GNU Affero General Public License 15 | # along with this program. If not, see . 16 | 17 | from __future__ import print_function 18 | import logging 19 | import json 20 | import os.path 21 | import sys 22 | import xmlrpclib 23 | from xmlrpclib import ServerProxy 24 | 25 | from twisted.internet.defer import inlineCallbacks, returnValue 26 | from twisted.internet.threads import deferToThread 27 | 28 | import libnacl.public 29 | import libnacl.utils 30 | 31 | import __init__ 32 | import config 33 | import controllers 34 | import database 35 | from models import Cracker, Report 36 | import utils 37 | 38 | _own_key = None 39 | 40 | @inlineCallbacks 41 | def send_update(client_ip, timestamp, hosts): 42 | for peer in config.peers: 43 | logging.debug("Sending update to peer {}".format(peer)) 44 | data = { 45 | "client_ip": client_ip, 46 | "timestamp": timestamp, 47 | "hosts": hosts 48 | } 49 | data_json = json.dumps(data) 50 | crypted = _peer_boxes[peer].encrypt(data_json) 51 | base64 = crypted.encode('base64') 52 | 53 | try: 54 | server = yield deferToThread(ServerProxy, peer) 55 | yield deferToThread(server.peering.update, _own_key.pk.encode('hex'), base64) 56 | except: 57 | logging.warning("Unable to send update to peer {}".format(peer)) 58 | 59 | def decrypt_message(peer_key, message): 60 | peer = None 61 | for _peer in config.peers: 62 | if config.peers[_peer] == peer_key: 63 | peer = _peer 64 | break 65 | if peer is None: 66 | logging.warning("Got message from unknown peer with key {}".format(peer_key.encode('hex'))) 67 | raise Exception("Unknown key {}".format(peer_key.encode('hex'))) 68 | 69 | # Critical point: use our own key, instead of the one supplied by the peer 70 | message = _peer_boxes[peer].decrypt(message) 71 | return message 72 | 73 | @inlineCallbacks 74 | def handle_update(peer_key, update): 75 | json_data = decrypt_message(peer_key, update) 76 | data = json.loads(json_data) 77 | 78 | hosts = data["hosts"] 79 | client_ip = data["client_ip"] 80 | timestamp = data["timestamp"] 81 | 82 | yield controllers.handle_report_from_client(client_ip, timestamp, hosts) 83 | 84 | @inlineCallbacks 85 | def handle_schema_version(peer_key, please): 86 | data = decrypt_message(peer_key, please) 87 | 88 | if data != "please": 89 | logging.warning("Request for schema_version is something else than please: {}".format(data)) 90 | raise Exception("Illegal request {}".format(data)) 91 | 92 | schema_version = yield database.get_schema_version() 93 | 94 | returnValue(schema_version) 95 | 96 | @inlineCallbacks 97 | def handle_all_hosts(peer_key, please): 98 | data = decrypt_message(peer_key, please) 99 | 100 | if data != "please": 101 | logging.warning("Request for all_hosts is something else than please: {}".format(data)) 102 | raise Exception("Illegal request {}".format(data)) 103 | 104 | crackers = yield database.dump_crackers() 105 | 106 | logging.debug("Sending addresses of {} crackers to peer".format(len(crackers))) 107 | logging.debug("Crackers: ".format(crackers)) 108 | 109 | returnValue(crackers) 110 | 111 | @inlineCallbacks 112 | def handle_all_reports_for_host(peer_key, host): 113 | host = decrypt_message(peer_key, host) 114 | 115 | logging.debug("peering.handle_all_reports_for_host({})".format(host)) 116 | 117 | if not utils.is_valid_ip_address(host): 118 | logging.warning("Illegal IP address for all_reports_for_host: {}".format(host)) 119 | raise Exception("Illegal request {}".format(data)) 120 | 121 | reports = yield database.dump_reports_for_cracker(host) 122 | 123 | if reports is None: 124 | reports = [] 125 | 126 | #logging.debug("all_reports for {}: {}".format(host, reports)) 127 | returnValue(reports) 128 | 129 | @inlineCallbacks 130 | def handle_dump_table(peer_key, table): 131 | table = decrypt_message(peer_key, table) 132 | 133 | logging.debug("peering.handle_dump_table({})".format(table)) 134 | 135 | if table not in [ "info", "legacy", "history", "country_history" ]: 136 | logging.warning("Illegal table for dump_table: {}".format(table)) 137 | raise Exception("Illegal request {}".format(table)) 138 | 139 | rows = yield database.dump_table(table) 140 | 141 | if rows is None: 142 | rows = [] 143 | 144 | returnValue(rows) 145 | 146 | def list_peers(peer_key, please): 147 | data = decrypt_message(peer_key, please) 148 | 149 | if data != "please": 150 | logging.warning("Request for list_peers is something else than please: {}".format(data)) 151 | raise Exception("Illegal request {}".format(data)) 152 | 153 | return { 154 | "server_version": __init__.version, 155 | "peers": { 156 | peer: config.peers[peer].encode('hex') 157 | for peer in config.peers 158 | } 159 | } 160 | 161 | @inlineCallbacks 162 | def bootstrap_from(peer_url): 163 | crypted = _peer_boxes[peer_url].encrypt("please") 164 | please_base64 = crypted.encode('base64') 165 | 166 | server = yield deferToThread(ServerProxy, peer_url) 167 | remote_schema = yield server.peering.schema_version(_own_key.pk.encode('hex'), please_base64) 168 | 169 | print("Initializing database...") 170 | yield database.clean_database() 171 | 172 | local_schema = yield database.get_schema_version() 173 | 174 | if remote_schema != local_schema: 175 | raise Exception("Unable to bootstrap from {}: remote database schema version is {}, local {}".format(peer_url, remote_schema, local_schema)) 176 | 177 | logging.debug("Remote database schema: {}; local schema: {}".format(remote_schema, local_schema)) 178 | 179 | hosts = yield deferToThread(server.peering.all_hosts, _own_key.pk.encode('hex'), please_base64) 180 | 181 | #logging.debug("Hosts from peer: {}".format(hosts)) 182 | print("Copying data of {} hosts from peer".format(len(hosts)), end="") 183 | 184 | count = 0 185 | for host in hosts: 186 | if count%100 == 0: 187 | print(".", end="") 188 | sys.stdout.flush() 189 | count += 1 190 | yield database.bootstrap_cracker(host) 191 | 192 | host_ip = host[1] 193 | 194 | crypted = _peer_boxes[peer_url].encrypt(host_ip) 195 | base64 = crypted.encode('base64') 196 | response = yield deferToThread(server.peering.all_reports_for_host, _own_key.pk.encode('hex'), base64) 197 | #logging.debug("All reports response: {}".format(response)) 198 | 199 | for r in response: 200 | database.bootstrap_report(r) 201 | print(" Done") 202 | 203 | for table in [ "info", "legacy", "history", "country_history" ]: 204 | print("Copying {} table from peer...".format(table)) 205 | crypted = _peer_boxes[peer_url].encrypt(table) 206 | base64 = crypted.encode('base64') 207 | rows = yield deferToThread(server.peering.dump_table, _own_key.pk.encode('hex'), base64) 208 | for row in rows: 209 | database.bootstrap_table(table, row) 210 | 211 | def load_keys(): 212 | global _own_key 213 | global _peer_boxes 214 | 215 | try: 216 | _own_key = libnacl.utils.load_key(config.key_file) 217 | except: 218 | logging.info("No private key yet, creating one in {}".format(config.key_file)) 219 | _own_key = libnacl.public.SecretKey() 220 | _own_key.save(config.key_file) 221 | 222 | _peer_boxes = { 223 | peer: 224 | libnacl.public.Box(_own_key.sk, libnacl.public.PublicKey(config.peers[peer])) 225 | for peer in config.peers 226 | } 227 | 228 | logging.debug("Configured peers: {}".format(config.peers)) 229 | 230 | def check_peers(): 231 | """ Connect to all configured peers. Check if they are reachable, and if their 232 | list of peers and associated keys conforms to mine """ 233 | success = True 234 | for peer in config.peers: 235 | print("Examining peer {}...".format(peer)) 236 | peer_server = ServerProxy(peer) 237 | try: 238 | response = peer_server.list_peers(_own_key.pk.encode('hex'), _peer_boxes[peer].encrypt('please').encode('base64')) 239 | except Exception, e: 240 | print("Error requesting peer list from {} (maybe it's down, or it doesn't know my key!)".format(peer)) 241 | print("Error message: {}".format(e)) 242 | success = False 243 | continue 244 | 245 | print(" Peer version: {}".format(response["server_version"])) 246 | peer_list = response["peers"] 247 | 248 | # peer list should contain all the peers I know, except for the peer I'm asking, but including myself 249 | seen_peers = set() 250 | for other_peer in config.peers: 251 | if other_peer == peer: 252 | continue 253 | if other_peer not in peer_list: 254 | print(" Peer {} does not know peer {}!".format(peer, other_peer)) 255 | success = False 256 | continue 257 | if config.peers[other_peer] != peer_list[other_peer].decode('hex'): 258 | print(" Peer {} knows peer {} but with key {} instead of {}!".format(peer, other_peer, peer_list[other_peer], config.peers[other_peer].encode('hex'))) 259 | success = False 260 | continue 261 | print(" Common peer (OK): {}".format(other_peer)) 262 | seen_peers.add(other_peer) 263 | 264 | # Any keys not seen should be my own 265 | own_key_seen = False 266 | for other_peer in peer_list: 267 | if other_peer in seen_peers: 268 | continue 269 | if peer_list[other_peer].decode('hex') == _own_key.pk: 270 | own_key_seen = True 271 | print(" Peer {} knows me as {} (OK)".format(peer, other_peer)) 272 | continue 273 | print(" Peer {} knows about (to me) unknown peer {} with key {}!".format(peer, other_peer, peer_list[other_peer])) 274 | success = False 275 | 276 | if not own_key_seen: 277 | print(" Peer {} does not know about me!") 278 | success = False 279 | 280 | if success: 281 | print("All peer servers configured correctly") 282 | else: 283 | print("Inconsistent peer server configuration, check all configuration files!") 284 | 285 | return success 286 | 287 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 288 | -------------------------------------------------------------------------------- /denyhosts_server/peering_views.py: -------------------------------------------------------------------------------- 1 | 2 | # denyhosts sync server 3 | # Copyright (C) 2015-2016 Jan-Pascal van Best 4 | 5 | # This program is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU Affero General Public License as published 7 | # by the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | 10 | # This program is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU Affero General Public License for more details. 14 | 15 | # You should have received a copy of the GNU Affero General Public License 16 | # along with this program. If not, see . 17 | 18 | import time 19 | import logging 20 | import random 21 | 22 | from twisted.web import server, xmlrpc, error 23 | from twisted.web.resource import Resource 24 | from twisted.web.xmlrpc import withRequest 25 | from twisted.internet.defer import inlineCallbacks, returnValue 26 | from twisted.internet import reactor 27 | from twisted.python import log 28 | 29 | import models 30 | from models import Cracker, Report 31 | import config 32 | import controllers 33 | import utils 34 | import stats 35 | import peering 36 | 37 | class PeeringServer(xmlrpc.XMLRPC): 38 | """ 39 | Peering xmlrpc methods 40 | """ 41 | 42 | @withRequest 43 | @inlineCallbacks 44 | def xmlrpc_update(self, request, key, update): 45 | try: 46 | logging.info("update({}, {})".format(key, update)) 47 | key = key.decode('hex') 48 | update = update.decode('base64') 49 | yield peering.handle_update(key, update) 50 | except xmlrpc.Fault, e: 51 | raise e 52 | except Exception, e: 53 | log.err(_why="Exception in update") 54 | raise xmlrpc.Fault(105, "Error in update({},{})".format(key, update)) 55 | returnValue(0) 56 | 57 | @withRequest 58 | @inlineCallbacks 59 | def xmlrpc_schema_version(self, request, key, please): 60 | try: 61 | logging.info("schema_version({}, {})".format(key, please)) 62 | key = key.decode('hex') 63 | please = please.decode('base64') 64 | result = yield peering.handle_schema_version(key, please) 65 | returnValue(result) 66 | except xmlrpc.Fault, e: 67 | raise e 68 | except Exception, e: 69 | log.err(_why="Exception in schema_version") 70 | raise xmlrpc.Fault(106, "Error in schema_version({},{})".format(key, please)) 71 | 72 | @withRequest 73 | @inlineCallbacks 74 | def xmlrpc_all_hosts(self, request, key, please): 75 | try: 76 | logging.info("all_hosts({}, {})".format(key, please)) 77 | key = key.decode('hex') 78 | please = please.decode('base64') 79 | result = yield peering.handle_all_hosts(key, please) 80 | returnValue(result) 81 | except xmlrpc.Fault, e: 82 | raise e 83 | except Exception, e: 84 | log.err(_why="Exception in all_hosts") 85 | raise xmlrpc.Fault(106, "Error in all_hosts({},{})".format(key, please)) 86 | 87 | @withRequest 88 | @inlineCallbacks 89 | def xmlrpc_all_reports_for_host(self, request, key, host): 90 | try: 91 | logging.info("all_reports_for_hos({}, {})".format(key, host)) 92 | key = key.decode('hex') 93 | host = host.decode('base64') 94 | result = yield peering.handle_all_reports_for_host(key, host) 95 | returnValue(result) 96 | except xmlrpc.Fault, e: 97 | raise e 98 | except Exception, e: 99 | log.err(_why="Exception in all_updates_for_host") 100 | raise xmlrpc.Fault(107, "Error in all_updates_for_host({},{})".format(key, host)) 101 | 102 | @withRequest 103 | @inlineCallbacks 104 | def xmlrpc_dump_table(self, request, key, host): 105 | try: 106 | logging.info("dump_table({}, {})".format(key, host)) 107 | key = key.decode('hex') 108 | host = host.decode('base64') 109 | result = yield peering.handle_dump_table(key, host) 110 | returnValue(result) 111 | except xmlrpc.Fault, e: 112 | raise e 113 | except Exception, e: 114 | log.err(_why="Exception in dump_table") 115 | raise xmlrpc.Fault(106, "Error in dump_table({},{})".format(key, host)) 116 | 117 | @withRequest 118 | @inlineCallbacks 119 | def xmlrpc_list_peers(self, request, key, please): 120 | try: 121 | logging.info("Received list_peers call") 122 | logging.info("list_peers({}, {})".format(key, please)) 123 | key = key.decode('hex') 124 | please = please.decode('base64') 125 | result = peering.list_peers(key, please) 126 | yield 127 | returnValue(result) 128 | except xmlrpc.Fault, e: 129 | raise e 130 | except Exception, e: 131 | log.err(_why="Exception in list_peers") 132 | raise xmlrpc.Fault(108, "Error in list_peers({},{})".format(key, please)) 133 | returnValue(0) 134 | 135 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 136 | -------------------------------------------------------------------------------- /denyhosts_server/stats.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # denyhosts sync server 4 | # Copyright (C) 2015-2017 Jan-Pascal van Best 5 | 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU Affero General Public License as published 8 | # by the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU Affero General Public License for more details. 15 | 16 | # You should have received a copy of the GNU Affero General Public License 17 | # along with this program. If not, see . 18 | 19 | 20 | import config 21 | import datetime 22 | import logging 23 | import os.path 24 | import socket 25 | import time 26 | 27 | from twisted.internet import reactor, threads, task 28 | from twisted.internet.defer import inlineCallbacks, returnValue 29 | from twisted.python import log 30 | from twistar.registry import Registry 31 | 32 | from jinja2 import Template, Environment, FileSystemLoader 33 | 34 | import GeoIP 35 | 36 | import matplotlib 37 | # Prevent errors from matplotlib instantiating a Tk window 38 | matplotlib.use('Agg') 39 | import matplotlib.pyplot as plt 40 | import matplotlib.dates as mdates 41 | import numpy 42 | 43 | import models 44 | import database 45 | import __init__ 46 | 47 | def format_datetime(value, format='medium'): 48 | dt = datetime.datetime.fromtimestamp(value) 49 | if format == 'full': 50 | format="EEEE, d. MMMM y 'at' HH:mm:ss" 51 | elif format == 'medium': 52 | format="%a %d-%m-%Y %H:%M:%S" 53 | return dt.strftime(format) 54 | 55 | def insert_zeroes(rows, max = None): 56 | result = [] 57 | index = 0 58 | if max is None: 59 | max = rows[-1][0] + 1 60 | 61 | for value in xrange(max): 62 | if index < len(rows) and rows[index][0] == value: 63 | result.append(rows[index]) 64 | index += 1 65 | else: 66 | result.append((value,0)) 67 | return result 68 | 69 | def humanize_number(number, pos): 70 | """Return a humanized string representation of a number.""" 71 | abbrevs = ( 72 | (1E15, 'P'), 73 | (1E12, 'T'), 74 | (1E9, 'G'), 75 | (1E6, 'M'), 76 | (1E3, 'k'), 77 | (1, '') 78 | ) 79 | if number < 1000: 80 | return str(number) 81 | for factor, suffix in abbrevs: 82 | if number >= factor: 83 | break 84 | return '%.*f%s' % (0, number / factor, suffix) 85 | 86 | # Functions containing blocking io, call from thread! 87 | def fixup_crackers(hosts): 88 | gi = GeoIP.new(GeoIP.GEOIP_MEMORY_CACHE) 89 | for host in hosts: 90 | try: 91 | host.country = gi.country_name_by_addr(host.ip_address) 92 | except Exception, e: 93 | logging.debug("Exception looking up country for {}: {}".format(host.ip_address, e)) 94 | host.country = '' 95 | try: 96 | if config.stats_resolve_hostnames: 97 | hostinfo = socket.gethostbyaddr(host.ip_address) 98 | host.hostname = hostinfo[0] 99 | else: 100 | host.hostname = host.ip_address 101 | except Exception, e: 102 | logging.debug("Exception looking up reverse DNS for {}: {}".format(host.ip_address, e)) 103 | host.hostname = "-" 104 | 105 | def make_daily_graph(txn): 106 | # Calculate start of daily period: yesterday on the beginning of the 107 | # current hour 108 | now = time.time() 109 | dt_now = datetime.datetime.fromtimestamp(now) 110 | start_hour = dt_now.hour 111 | dt_onthehour = dt_now.replace(minute=0, second=0, microsecond=0) 112 | dt_start = dt_onthehour - datetime.timedelta(days=1) 113 | yesterday = int(dt_start.strftime('%s')) 114 | 115 | txn.execute(database.translate_query(""" 116 | SELECT CAST((first_report_time-?)/3600 AS UNSIGNED INTEGER), count(*) 117 | FROM reports 118 | WHERE first_report_time > ? 119 | GROUP BY CAST((first_report_time-?)/3600 AS UNSIGNED INTEGER) 120 | ORDER BY first_report_time ASC 121 | """), (yesterday, yesterday, yesterday)) 122 | rows = txn.fetchall() 123 | no_data = False 124 | if not rows: 125 | logging.debug("No data for past 24 hours") 126 | no_data = True 127 | rows = [(0,0)] 128 | #logging.debug("Daily: {}".format(rows)) 129 | rows = insert_zeroes(rows, 24) 130 | #logging.debug("Daily: {}".format(rows)) 131 | 132 | x = [dt_start + datetime.timedelta(hours=row[0]) for row in rows] 133 | y = [row[1] for row in rows] 134 | 135 | # calc the trendline 136 | x_num = mdates.date2num(x) 137 | 138 | z = numpy.polyfit(x_num, y, 1) 139 | p = numpy.poly1d(z) 140 | 141 | xx = numpy.linspace(x_num.min(), x_num.max(), 100) 142 | dd = mdates.num2date(xx) 143 | 144 | fig = plt.figure() 145 | ax = fig.gca() 146 | ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M')) 147 | ax.xaxis.set_major_locator(mdates.HourLocator(interval=4)) 148 | ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(humanize_number)) 149 | ax.set_title("Reports per hour") 150 | ax.plot(x,y, linestyle='solid', marker='o', markerfacecolor='blue') 151 | ax.plot(dd, p(xx), "b--") 152 | ax.set_ybound(lower=0) 153 | fig.autofmt_xdate() 154 | if no_data: 155 | fig.text(0.5, 0.5, "Not enough data", size="x-large", 156 | ha="center", va="center") 157 | fig.savefig(os.path.join(config.graph_dir, 'hourly.svg')) 158 | fig.clf() 159 | plt.close(fig) 160 | 161 | def make_monthly_graph(txn): 162 | # Calculate start of monthly period: last month on the beginning of the 163 | # current day 164 | today = datetime.date.today() 165 | dt_start = today - datetime.timedelta(weeks=4) 166 | 167 | txn.execute(database.translate_query(""" 168 | SELECT date, num_reports 169 | FROM history 170 | WHERE date >= ? 171 | ORDER BY date ASC 172 | """), (dt_start,)) 173 | rows = txn.fetchall() 174 | no_data = False 175 | if rows is None or len(rows)==0: 176 | no_data = True 177 | x = [ today, ] 178 | y = [ 0, ] 179 | else: 180 | (x,y) = zip(*rows) 181 | 182 | # calc the trendline 183 | x_num = mdates.date2num(x) 184 | 185 | if not no_data: 186 | z = numpy.polyfit(x_num, y, 1) 187 | p = numpy.poly1d(z) 188 | xx = numpy.linspace(x_num.min(), x_num.max(), 100) 189 | dd = mdates.num2date(xx) 190 | 191 | fig = plt.figure() 192 | ax = fig.gca() 193 | ax.xaxis.set_major_formatter(mdates.DateFormatter('%d %b')) 194 | ax.xaxis.set_major_locator(mdates.DayLocator(interval=4)) 195 | ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(humanize_number)) 196 | ax.set_title("Reports per day") 197 | ax.plot(x,y, linestyle='solid', marker='o', markerfacecolor='blue') 198 | if not no_data: 199 | ax.plot(dd, p(xx),"b--") 200 | ax.set_ybound(lower=0) 201 | fig.autofmt_xdate() 202 | if no_data: 203 | fig.text(0.5, 0.5, "Not enough data", size="x-large", 204 | ha="center", va="center") 205 | fig.savefig(os.path.join(config.graph_dir, 'monthly.svg')) 206 | fig.clf() 207 | plt.close(fig) 208 | 209 | def make_history_graph(txn): 210 | # Graph since first record 211 | txn.execute(database.translate_query(""" 212 | SELECT date FROM history 213 | ORDER BY date ASC 214 | LIMIT 1 215 | """)) 216 | first_time = txn.fetchall() 217 | if first_time is not None and len(first_time)>0 and first_time[0][0] is not None: 218 | dt_first = first_time[0][0] 219 | else: 220 | dt_first= datetime.date.today() 221 | num_days = ( datetime.date.today() - dt_first ).days 222 | #logging.debug("First day in data set: {}".format(dt_first)) 223 | #logging.debug("Number of days in data set: {}".format(num_days)) 224 | no_data = False 225 | if num_days == 0: 226 | no_data = True 227 | x = [ dt_first, ] 228 | y = [ 0, ] 229 | else: 230 | txn.execute(database.translate_query(""" 231 | SELECT date, num_reports 232 | FROM history 233 | ORDER BY date ASC 234 | """)) 235 | rows = txn.fetchall() 236 | (x,y) = zip(*rows) 237 | 238 | # calc the trendline 239 | x_num = mdates.date2num(x) 240 | 241 | if not no_data: 242 | z = numpy.polyfit(x_num, y, 1) 243 | p = numpy.poly1d(z) 244 | 245 | xx = numpy.linspace(x_num.min(), x_num.max(), 100) 246 | dd = mdates.num2date(xx) 247 | 248 | fig = plt.figure() 249 | ax = fig.gca() 250 | 251 | locator = mdates.AutoDateLocator(interval_multiples=False) 252 | ax.xaxis.set_major_locator(locator) 253 | ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(locator)) 254 | ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(humanize_number)) 255 | ax.set_title("Reports per day") 256 | if (num_days<100): 257 | ax.plot(x,y, linestyle='solid', marker='o', markerfacecolor='blue') 258 | else: 259 | ax.plot(x,y, linestyle='solid', marker='') 260 | if not no_data: 261 | ax.plot(dd, p(xx),"b--") 262 | ax.set_ybound(lower=0) 263 | fig.autofmt_xdate() 264 | 265 | if num_days == 0: 266 | fig.text(0.50, 0.50, "Not enough data", size="x-large", ha="center", va="center") 267 | 268 | fig.savefig(os.path.join(config.graph_dir, 'history.svg')) 269 | fig.clf() 270 | plt.close(fig) 271 | 272 | def make_contrib_graph(txn): 273 | # Number of reporters over days 274 | txn.execute(database.translate_query(""" 275 | SELECT date FROM history 276 | ORDER BY date ASC 277 | LIMIT 1 278 | """)) 279 | first_time = txn.fetchall() 280 | if first_time is not None and len(first_time)>0 and first_time[0][0] is not None: 281 | dt_first = first_time[0][0] 282 | else: 283 | dt_first = datetime.date.today() 284 | num_days = ( datetime.date.today() - dt_first ).days 285 | if num_days == 0: 286 | x = [dt_first, ] 287 | y = [0, ] 288 | else: 289 | txn.execute(database.translate_query(""" 290 | SELECT date, num_contributors 291 | FROM history 292 | ORDER BY date ASC 293 | """)) 294 | rows = txn.fetchall() 295 | (x,y) = zip(*rows) 296 | 297 | fig = plt.figure() 298 | ax = fig.gca() 299 | locator = mdates.AutoDateLocator(interval_multiples=False) 300 | ax.xaxis.set_major_locator(locator) 301 | ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(locator)) 302 | ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(humanize_number)) 303 | ax.set_title("Number of contributors") 304 | if (num_days<100): 305 | ax.plot(x,y, linestyle='solid', marker='o', markerfacecolor='blue') 306 | else: 307 | ax.plot(x,y, linestyle='solid', marker='') 308 | ax.set_ybound(lower=0) 309 | fig.autofmt_xdate() 310 | if num_days == 0: 311 | fig.text(0.50, 0.50, "Not enough data", size="x-large", ha="center", va="center") 312 | fig.savefig(os.path.join(config.graph_dir, 'contrib.svg')) 313 | fig.clf() 314 | plt.close(fig) 315 | 316 | def make_country_piegraph(txn): 317 | # Total reports per country 318 | limit = 10 # Fixme configurable 319 | txn.execute(database.translate_query(""" 320 | SELECT country, num_reports 321 | FROM country_history 322 | ORDER BY num_reports DESC 323 | LIMIT ? 324 | """),(limit,)) 325 | 326 | rows = txn.fetchall() 327 | if rows is None or len(rows)==0: 328 | return 329 | 330 | (labels,sizes) = zip(*rows) 331 | 332 | fig = plt.figure() 333 | ax = fig.add_subplot(111) 334 | ax.pie(sizes, labels=labels, 335 | autopct='%1.1f%%', shadow=True, startangle=90) 336 | # Set aspect ratio to be equal so that pie is drawn as a circle. 337 | ax.axis('equal') 338 | 339 | fig.savefig(os.path.join(config.graph_dir, 'country_pie.svg')) 340 | fig.clf() 341 | plt.close(fig) 342 | 343 | def make_country_bargraph(txn): 344 | # Total reports per country 345 | limit = 10 # Fixme configurable 346 | 347 | txn.execute(""" 348 | SELECT sum(num_reports) 349 | FROM country_history 350 | """) 351 | 352 | rows = txn.fetchall() 353 | no_data = False 354 | if rows is None or len(rows)==0 or rows[0] is None or rows[0][0] is None: 355 | no_data = True 356 | total_reports = 1 357 | countries = ["Unknown",] 358 | counts = [1,] 359 | max_count = 1 360 | else: 361 | total_reports = int(rows[0][0]) 362 | 363 | txn.execute(database.translate_query(""" 364 | SELECT country, num_reports 365 | FROM country_history 366 | ORDER BY num_reports DESC 367 | LIMIT ? 368 | """),(limit,)) 369 | 370 | rows = txn.fetchall() 371 | if rows is None or len(rows)==0: 372 | return 373 | 374 | (countries,counts) = zip(*reversed(rows)) 375 | max_count = max(counts) 376 | 377 | fig = plt.figure() 378 | ax = fig.add_subplot(111) 379 | 380 | y_pos = numpy.arange(len(countries)) 381 | bars = ax.barh(y_pos, counts, align='center', alpha=0.6) 382 | count = 0 383 | for bar in bars: 384 | height = bar.get_height() 385 | ax.text(max_count / 20., bar.get_y() + height / 2., 386 | "{}% {}".format(round(counts[count]*1.0/total_reports*100), countries[count] ), 387 | ha='left', va='center') 388 | count += 1 389 | ax.set_yticks(y_pos) 390 | ax.set_yticklabels([]) 391 | ax.set_title('Number of attacks per country of origin (top {})'.format(limit)) 392 | ax.xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(humanize_number)) 393 | ax.set_ylim(ymin=-1) 394 | fig.tight_layout() 395 | if no_data: 396 | fig.text(0.50, 0.50, "Not enough data", size="x-large", ha="center", va="center") 397 | 398 | fig.savefig(os.path.join(config.graph_dir, 'country_bar.svg')) 399 | fig.clf() 400 | plt.close(fig) 401 | 402 | _cache = None 403 | _stats_busy = False 404 | 405 | @inlineCallbacks 406 | def update_stats_cache(): 407 | global _stats_busy 408 | global _cache 409 | if _stats_busy: 410 | logging.debug("Already updating statistics cache, exiting") 411 | returnValue(None) 412 | _stats_busy = True 413 | 414 | logging.debug("Updating statistics cache...") 415 | 416 | # Fill history table for yesterday, when necessary 417 | yield update_recent_history() 418 | yield update_country_history() 419 | 420 | now = time.time() 421 | stats = {} 422 | stats["last_updated"] = now 423 | stats["has_hostnames"] = config.stats_resolve_hostnames 424 | # Note paths configured in main.py by the Resource objects 425 | stats["static_base"] = "../static" 426 | stats["graph_base"] = "../static/graphs" 427 | stats["server_version"] = __init__.version 428 | try: 429 | #rows = yield database.run_query("SELECT num_hosts,num_reports, num_clients, new_hosts FROM stats ORDER BY time DESC LIMIT 1") 430 | stats["num_hosts"] = yield models.Cracker.count() 431 | stats["num_reports"] = yield models.Report.count() 432 | 433 | rows = yield database.run_query("SELECT count(DISTINCT ip_address) FROM reports") 434 | if len(rows)>0: 435 | stats["num_clients"] = rows[0][0] 436 | else: 437 | stats["num_clients"] = 0 438 | 439 | yesterday = now - 24*3600 440 | stats["daily_reports"] = yield models.Report.count(where=["first_report_time>?", yesterday]) 441 | stats["daily_new_hosts"] = yield models.Cracker.count(where=["first_time>?", yesterday]) 442 | 443 | recent_hosts = yield models.Cracker.find(orderby="latest_time DESC", limit=10) 444 | yield threads.deferToThread(fixup_crackers, recent_hosts) 445 | stats["recent_hosts"] = recent_hosts 446 | 447 | most_reported_hosts = yield models.Cracker.find(orderby="total_reports DESC", limit=10) 448 | yield threads.deferToThread(fixup_crackers, most_reported_hosts) 449 | stats["most_reported_hosts"] = most_reported_hosts 450 | 451 | logging.info("Stats: {} reports for {} hosts from {} reporters".format( 452 | stats["num_reports"], stats["num_hosts"], stats["num_clients"])) 453 | 454 | yield Registry.DBPOOL.runInteraction(make_daily_graph) 455 | yield Registry.DBPOOL.runInteraction(make_monthly_graph) 456 | yield Registry.DBPOOL.runInteraction(make_contrib_graph) 457 | yield Registry.DBPOOL.runInteraction(make_history_graph) 458 | yield Registry.DBPOOL.runInteraction(make_country_bargraph) 459 | 460 | if _cache is None: 461 | _cache = {} 462 | _cache["stats"] = stats 463 | _cache["time"] = time.time() 464 | logging.debug("Finished updating statistics cache...") 465 | except Exception, e: 466 | log.err(_why="Error updating statistics: {}".format(e)) 467 | logging.warning("Error updating statistics: {}".format(e)) 468 | 469 | _stats_busy = False 470 | 471 | @inlineCallbacks 472 | def render_stats(): 473 | global _cache 474 | logging.info("Rendering statistics page...") 475 | if _cache is None: 476 | while _cache is None: 477 | logging.debug("No statistics cached yet, waiting for cache generation to finish...") 478 | yield task.deferLater(reactor, 1, lambda _:0, 0) 479 | 480 | now = time.time() 481 | try: 482 | env = Environment(loader=FileSystemLoader(config.template_dir)) 483 | env.filters['datetime'] = format_datetime 484 | template = env.get_template('stats.html') 485 | html = template.render(_cache["stats"]) 486 | 487 | logging.info("Done rendering statistics page...") 488 | returnValue(html) 489 | except Exception, e: 490 | log.err(_why="Error rendering statistics page: {}".format(e)) 491 | logging.warning("Error creating statistics page: {}".format(e)) 492 | 493 | def update_history_txn(txn, date): 494 | try: 495 | logging.info("Updating history table for {}".format(date)) 496 | start = time.mktime(date.timetuple()) 497 | end = start + 24*60*60 498 | #logging.debug("Date start, end: {}, {}".format(start, end)) 499 | 500 | txn.execute(database.translate_query(""" 501 | SELECT COUNT(*), 502 | COUNT(DISTINCT cracker_id), 503 | COUNT(DISTINCT ip_address) 504 | FROM reports 505 | WHERE (first_report_time>=? AND first_report_time=? AND latest_report_time0 and rows[0][0] is not None: 538 | last_filled_date = rows[0][0] 539 | else: 540 | txn.execute("SELECT MIN(first_report_time) FROM reports") 541 | first_time = txn.fetchall() 542 | if first_time is not None and len(first_time)>0 and first_time[0][0] is not None: 543 | last_filled_date = datetime.date.fromtimestamp(first_time[0][0]) 544 | else: 545 | last_filled_date = datetime.date.today() 546 | 547 | first_date = last_filled_date + datetime.timedelta(days=1) 548 | # Then fill history 549 | date = first_date 550 | while date <= last_date: 551 | update_history_txn(txn, date) 552 | date = date + datetime.timedelta(days = 1) 553 | 554 | except Exception as e: 555 | log.err(_why="Error updating history: {}".format(e)) 556 | logging.warning("Error updating history: {}".format(e)) 557 | 558 | 559 | def fixup_history_txn(txn): 560 | try: 561 | txn.execute("SELECT MIN(first_report_time) FROM reports") 562 | first_time = txn.fetchall() 563 | if first_time is not None and len(first_time)>0 and first_time[0][0] is not None: 564 | first_date = datetime.date.fromtimestamp(first_time[0][0]) 565 | else: 566 | # No data, nothing to do 567 | return 568 | 569 | last_date = datetime.date.today() - datetime.timedelta(days = 1) 570 | 571 | # Find any dates for which the history has not been filled 572 | txn.execute("SELECT date FROM history ORDER BY date ASC") 573 | rows = txn.fetchall() 574 | dates = set([row[0] for row in rows]) 575 | 576 | date = first_date 577 | while date <= last_date: 578 | if date not in dates: 579 | update_history_txn(txn, date) 580 | date = date + datetime.timedelta(days = 1) 581 | 582 | except Exception as e: 583 | log.err(_why="Error fixing up history: {}".format(e)) 584 | logging.warning("Error fixing up history: {}".format(e)) 585 | 586 | def update_recent_history(date=None): 587 | "date should be a datetime.date or None, indicating yesterday" 588 | return Registry.DBPOOL.runInteraction(update_recent_history_txn, date) 589 | 590 | def update_country_history_txn(txn, date=None, include_history = False): 591 | if date is None: 592 | date = datetime.date.today() - datetime.timedelta(days = 1) 593 | 594 | if include_history: 595 | start_time = 0 596 | else: 597 | start_time = (date - datetime.date(1970, 1, 1)).total_seconds() 598 | end_time = (date + datetime.timedelta(days=1) - datetime.date(1970, 1, 1)).total_seconds() 599 | 600 | txn.execute("SELECT country_code,country,num_reports FROM country_history") 601 | rows = txn.fetchall() 602 | result = {row[0]:(row[1],row[2]) for row in rows} 603 | 604 | gi = GeoIP.new(GeoIP.GEOIP_MEMORY_CACHE) 605 | 606 | txn.execute(database.translate_query( 607 | """SELECT crackers.ip_address,COUNT(*) as count 608 | FROM crackers LEFT JOIN reports ON reports.cracker_id = crackers.id 609 | WHERE reports.first_report_time >= ? AND reports.first_report_time < ? 610 | GROUP BY crackers.id 611 | """), (start_time, end_time)) 612 | 613 | while True: 614 | rows = txn.fetchmany(size=100) 615 | if len(rows) == 0: 616 | break 617 | for row in rows: 618 | ip = row[0] 619 | count = row[1] 620 | try: 621 | country_code = gi.country_code_by_addr(ip) 622 | country = gi.country_name_by_addr(ip) 623 | if country_code is None: 624 | country_code = "ZZ" 625 | if country is None: 626 | country = "(Unknown)" 627 | if country_code in result: 628 | count += result[country_code][1] 629 | result[country_code] = (country,count) 630 | except Exception, e: 631 | logging.debug("Exception looking up country for {}: {}".format(ip, e)) 632 | 633 | for country_code in result: 634 | country,count = result[country_code] 635 | txn.execute(database.translate_query( 636 | """REPLACE INTO country_history (country_code,country,num_reports) 637 | VALUES (?,?,?)"""), 638 | (country_code,country,count)) 639 | 640 | def update_country_history(date=None, include_history=False): 641 | "date should be a datetime.date or None, indicating yesterday" 642 | return Registry.DBPOOL.runInteraction(update_country_history_txn, date, include_history) 643 | 644 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 645 | -------------------------------------------------------------------------------- /denyhosts_server/utils.py: -------------------------------------------------------------------------------- 1 | # denyhosts sync server 2 | # Copyright (C) 2015-2016 Jan-Pascal van Best 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as published 6 | # by the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU Affero General Public License for more details. 13 | 14 | # You should have received a copy of the GNU Affero General Public License 15 | # along with this program. If not, see . 16 | 17 | import logging 18 | import ipaddr 19 | 20 | from twisted.internet.defer import inlineCallbacks, returnValue 21 | from twisted.internet import reactor, task 22 | 23 | _hosts_busy = set() 24 | 25 | @inlineCallbacks 26 | def wait_and_lock_host(host): 27 | try: 28 | while host in _hosts_busy: 29 | logging.debug("waiting to update host {}, {} blocked now".format(host, len(_hosts_busy))) 30 | yield task.deferLater(reactor, 0.01, lambda _:0, 0) 31 | _hosts_busy.add(host) 32 | except: 33 | logging.debug("Exception in locking {}".format(host), exc_info=True) 34 | 35 | returnValue(0) 36 | 37 | def unlock_host(host): 38 | try: 39 | _hosts_busy.remove(host) 40 | #logging.debug("host {} unlocked, {} blocked now".format(host, len(_hosts_busy))) 41 | except: 42 | logging.debug("Exception in unlocking {}".format(host), exc_info=True) 43 | 44 | def none_waiting(): 45 | return len(_hosts_busy) == 0 46 | 47 | def count_waiting(): 48 | return len(_hosts_busy) 49 | 50 | def is_valid_ip_address(ip_address): 51 | try: 52 | ip = ipaddr.IPAddress(ip_address) 53 | except: 54 | return False 55 | if (ip.is_reserved or ip.is_private or ip.is_loopback or 56 | ip.is_unspecified or ip.is_multicast or 57 | ip.is_link_local): 58 | return False 59 | return True 60 | 61 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 62 | -------------------------------------------------------------------------------- /denyhosts_server/views.py: -------------------------------------------------------------------------------- 1 | # denyhosts sync server 2 | # Copyright (C) 2015-2017 Jan-Pascal van Best 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as published 6 | # by the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU Affero General Public License for more details. 13 | 14 | # You should have received a copy of the GNU Affero General Public License 15 | # along with this program. If not, see . 16 | 17 | import time 18 | import logging 19 | import random 20 | 21 | from twisted.web import server, xmlrpc, error 22 | from twisted.web.resource import Resource 23 | from twisted.web.xmlrpc import withRequest 24 | from twisted.internet.defer import inlineCallbacks, returnValue 25 | from twisted.internet import reactor 26 | from twisted.python import log 27 | 28 | import models 29 | from models import Cracker, Report 30 | import config 31 | import controllers 32 | import utils 33 | import stats 34 | import peering 35 | 36 | class Server(xmlrpc.XMLRPC): 37 | """ 38 | An example object to be published. 39 | """ 40 | 41 | @withRequest 42 | @inlineCallbacks 43 | def xmlrpc_add_hosts(self, request, hosts): 44 | try: 45 | x_real_ip = request.requestHeaders.getRawHeaders("X-Real-IP") 46 | remote_ip = x_real_ip[0] if x_real_ip else request.getClientIP() 47 | now = time.time() 48 | 49 | logging.info("add_hosts({}) from {}".format(hosts, remote_ip)) 50 | yield controllers.handle_report_from_client(remote_ip, now, hosts) 51 | try: 52 | yield peering.send_update(remote_ip, now, hosts) 53 | except xmlrpc.Fault, e: 54 | raise e 55 | except Exception, e: 56 | logging.warning("Error sending update to peers") 57 | except xmlrpc.Fault, e: 58 | raise e 59 | except Exception, e: 60 | log.err(_why="Exception in add_hosts") 61 | raise xmlrpc.Fault(104, "Error adding hosts: {}".format(e)) 62 | 63 | returnValue(0) 64 | 65 | @withRequest 66 | @inlineCallbacks 67 | def xmlrpc_get_new_hosts(self, request, timestamp, threshold, hosts_added, resiliency): 68 | try: 69 | x_real_ip = request.requestHeaders.getRawHeaders("X-Real-IP") 70 | remote_ip = x_real_ip[0] if x_real_ip else request.getClientIP() 71 | 72 | logging.debug("get_new_hosts({},{},{},{}) from {}".format(timestamp, threshold, 73 | hosts_added, resiliency, remote_ip)) 74 | try: 75 | timestamp = long(timestamp) 76 | threshold = int(threshold) 77 | resiliency = long(resiliency) 78 | except: 79 | logging.warning("Illegal arguments to get_new_hosts from client {}".format(remote_ip)) 80 | raise xmlrpc.Fault(102, "Illegal parameters.") 81 | 82 | now = time.time() 83 | # refuse timestamps from the future 84 | if timestamp > now: 85 | logging.warning("Illegal timestamp to get_new_hosts from client {}".format(remote_ip)) 86 | raise xmlrpc.Fault(103, "Illegal timestamp.") 87 | 88 | for host in hosts_added: 89 | if not utils.is_valid_ip_address(host): 90 | logging.warning("Illegal host ip address {}".format(host)) 91 | raise xmlrpc.Fault(101, "Illegal IP address \"{}\".".format(host)) 92 | 93 | # TODO: maybe refuse timestamp from far past because it will 94 | # cause much work? OTOH, denyhosts will use timestamp=0 for 95 | # the first run! 96 | # TODO: check if client IP is a known cracker 97 | 98 | result = {} 99 | result['timestamp'] = str(long(time.time())) 100 | result['hosts'] = yield controllers.get_qualifying_crackers( 101 | threshold, resiliency, timestamp, 102 | config.max_reported_crackers, set(hosts_added)) 103 | logging.debug("returning: {}".format(result)) 104 | except xmlrpc.Fault, e: 105 | raise e 106 | except Exception, e: 107 | log.err(_why="Exception in xmlrpc_get_new_hosts") 108 | raise xmlrpc.Fault(105, "Error in get_new_hosts: {}".format(str(e))) 109 | returnValue( result) 110 | 111 | class WebResource(Resource): 112 | #isLeaf = True 113 | 114 | def getChild(self, name, request): 115 | if name == '': 116 | return self 117 | return Resource.getChild(self, name, request) 118 | 119 | def render_GET(self, request): 120 | logging.debug("GET({})".format(request)) 121 | request.setHeader("Content-Type", "text/html; charset=utf-8") 122 | def done(result): 123 | if result is None: 124 | request.write("

An error has occurred

") 125 | else: 126 | request.write(result.encode('utf-8')) 127 | request.finish() 128 | def fail(err): 129 | request.processingFailed(err) 130 | stats.render_stats().addCallbacks(done, fail) 131 | return server.NOT_DONE_YET 132 | 133 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 134 | -------------------------------------------------------------------------------- /scripts/denyhosts-server: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # denyhosts sync server 4 | # Copyright (C) 2015 Jan-Pascal van Best 5 | 6 | # This program is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU Affero General Public License as published 8 | # by the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | 11 | # This program is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU Affero General Public License for more details. 15 | 16 | # You should have received a copy of the GNU Affero General Public License 17 | # along with this program. If not, see . 18 | 19 | 20 | import denyhosts_server.main 21 | 22 | if __name__ == '__main__': 23 | denyhosts_server.main.run_main() 24 | 25 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 26 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [minify_js] 2 | sources = static/js/bootstrap.js static/js/jquery.js 3 | output = static/js/%s.min.js 4 | 5 | [minify_css] 6 | sources = static/css/bootstrap.css 7 | output = static/css/%s.min.css 8 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup 4 | from glob import glob 5 | from denyhosts_server import version 6 | 7 | etcpath = "/etc" 8 | 9 | setup(name='denyhosts-server', 10 | version=version, 11 | description='DenyHosts Synchronisation Server', 12 | author='Jan-Pascal van Best', 13 | author_email='janpascal@vanbest.org', 14 | url='https://github.com/janpascal/denyhosts_sync', 15 | packages=['denyhosts_server'], 16 | install_requires=["Twisted", "twistar", "ipaddr", "jinja2", "numpy", "matplotlib", "GeoIP", "minify", "libnacl"], 17 | scripts=['scripts/denyhosts-server'], 18 | data_files=[ 19 | ('static/js', glob('static/js/*.min.js')), 20 | ('static/css', glob('static/css/*.min.css')), 21 | ('static/graph', glob('static/graph/README')), 22 | ('template', glob('template/*')), 23 | ('docs', [ 24 | 'README.md', 25 | 'LICENSE.md', 26 | 'changelog.txt', 27 | 'denyhosts-server.conf.example', 28 | 'denyhosts-server.service.example', 29 | 'denyhosts-server.init.example' 30 | ] 31 | ), 32 | ], 33 | license=""" 34 | Copyright (C) 2015-2017 Jan-Pascal van Best 35 | 36 | This program is free software: you can redistribute it and/or modify 37 | it under the terms of the GNU Affero General Public License as published 38 | by the Free Software Foundation, either version 3 of the License, or 39 | (at your option) any later version. 40 | 41 | This program is distributed in the hope that it will be useful, 42 | but WITHOUT ANY WARRANTY; without even the implied warranty of 43 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 44 | GNU Affero General Public License for more details. 45 | 46 | You should have received a copy of the GNU Affero General Public License 47 | along with this program. If not, see . 48 | """ 49 | ) 50 | -------------------------------------------------------------------------------- /static/graph/README: -------------------------------------------------------------------------------- 1 | This is a cache directory that will contain dynamically generate graphs 2 | -------------------------------------------------------------------------------- /template/stats.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | DenyHosts 8 | 9 | 10 | 11 | 12 |

DenyHosts synchronisation server statistics

13 | 14 | 15 | 16 | 17 |
18 |
19 |

Summary

20 |
21 |
22 |
23 |
24 |
25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 |
Number of clients contributing data{{ num_clients }}
Number of reported hosts{{ num_hosts }}
Total number of reports{{ num_reports }}
Reports in last 24 hours{{ daily_reports }}
New hosts in last 24 hours{{ daily_new_hosts }}
Synchronisation server version{{ server_version }}
45 |
46 |
47 | 74 |
75 |
76 | 77 |
78 |
79 | 80 |
81 |
82 | 83 |
84 |
85 | 86 |
87 |
88 | 89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 | 97 | {% macro hostlist(title, hostlist) -%} 98 |
99 |
100 |

{{ title }}

101 |
102 |
103 | 104 | 105 | 106 | {% if has_hostnames %} 107 | 108 | {% endif %} 109 | 110 | 111 | 112 | 113 | 114 | {% for host in hostlist %} 115 | 116 | 117 | {% if has_hostnames %} 118 | 119 | {% endif %} 120 | 121 | 122 | 123 | 124 | 125 | {% endfor %} 126 |
IP addressHost nameCountryLatest reportFirst reportReport count
{{ host.ip_address }}{{ host.hostname }}{{ host.country }}{{ host.latest_time|datetime }}{{ host.first_time|datetime }}{{ host.total_reports }}
127 |
128 |
129 | {%- endmacro %} 130 | 131 | {{ hostlist("Recent reports (time in UTC)", recent_hosts) }} 132 | 133 | {{ hostlist("Most reported hosts (time in UTC)", most_reported_hosts) }} 134 | 135 |
136 | Page last updated at {{ last_updated|datetime }} 137 |
138 | 139 | 140 | -------------------------------------------------------------------------------- /tests/README: -------------------------------------------------------------------------------- 1 | Run these tests from the project root directory using 2 | 3 | $ trial tests 4 | 5 | Trial is the Twisted test runner. On Debian systems, it is part of the 6 | python-twisted-core package. 7 | 8 | This directory also contains some scripts that are not unit test and cannot be 9 | run using trial: test.py, fill_database.py and sim_clients.py. The latter two 10 | scripts are used for performance testing. 11 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/janpascal/denyhosts_sync/50482237cf8d3dd5662d3832ef98a2dc94b19968/tests/__init__.py -------------------------------------------------------------------------------- /tests/base.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import inspect 3 | import os 4 | import os.path 5 | 6 | from denyhosts_server import config 7 | from denyhosts_server import models 8 | from denyhosts_server import main 9 | from denyhosts_server import database 10 | 11 | from twisted.trial import unittest 12 | from twisted.enterprise import adbapi 13 | from twisted.internet.defer import inlineCallbacks, returnValue 14 | 15 | from twistar.registry import Registry 16 | 17 | class TestBase(unittest.TestCase): 18 | @inlineCallbacks 19 | def setUp(self, config_basename="test.conf"): 20 | configfile = os.path.join( 21 | os.path.dirname(inspect.getsourcefile(TestBase)), 22 | config_basename 23 | ) 24 | 25 | config.read_config(configfile) 26 | 27 | Registry.DBPOOL = adbapi.ConnectionPool(config.dbtype, **config.dbparams) 28 | Registry.register(models.Cracker, models.Report, models.Legacy) 29 | 30 | yield database.clean_database(quiet=True) 31 | main.configure_logging() 32 | -------------------------------------------------------------------------------- /tests/compare_peers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import xmlrpclib 3 | import time 4 | import sys 5 | 6 | 7 | server = 'http://localhost:9911' 8 | print("Connecting to server {}".format(server)) 9 | s = xmlrpclib.ServerProxy(server) 10 | 11 | peer1 = 'http://localhost:9921' 12 | print("Connecting to server {}".format(peer1)) 13 | s1 = xmlrpclib.ServerProxy(peer1) 14 | 15 | print("peer0 all hosts:") 16 | print s.debug.list_all_hosts() 17 | 18 | print("peer1 all hosts:") 19 | print s1.debug.list_all_hosts() 20 | 21 | print("peer0 cracker info for 69.192.72.154:") 22 | try: 23 | print s.debug.get_cracker_info("69.192.72.154") 24 | except Exception, e: 25 | print e 26 | 27 | print("peer1 cracker info for 69.192.72.154:") 28 | try: 29 | print s1.debug.get_cracker_info("69.192.72.154") 30 | except Exception, e: 31 | print e 32 | 33 | -------------------------------------------------------------------------------- /tests/fill_database.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import xmlrpclib 3 | import time 4 | import threading 5 | import random 6 | 7 | server = 'http://localhost:9911' 8 | 9 | def run(server, count, known_crackers): 10 | #print("Connecting to server {}".format(server)) 11 | s = xmlrpclib.ServerProxy(server) 12 | start_time = time.time() 13 | s.debug.test_bulk_insert(count, known_crackers, start_time - random.random()*7*24*3600) 14 | #print("Inserting {} hosts took {} seconds".format(count, time.time() - start_time)) 15 | 16 | def run_insert_test(server, num_threads, count, known_crackers): 17 | start_time = time.time() 18 | 19 | threads = [] 20 | #print("Creating threads...") 21 | for i in xrange(num_threads): 22 | thread = threading.Thread(target=run, args=(server, count, known_crackers)) 23 | threads.append(thread) 24 | 25 | #print("Starting threads...") 26 | for i in xrange(num_threads): 27 | threads[i].start() 28 | 29 | #print("Waiting for threads...") 30 | for i in xrange(num_threads): 31 | threads[i].join() 32 | 33 | print("Inserting {} hosts {} times took {} seconds".format(count, num_threads, time.time() - start_time)) 34 | print("Average time per insert: {} seconds".format( (time.time() - start_time) / count / num_threads)) 35 | 36 | 37 | s = xmlrpclib.ServerProxy(server) 38 | for num_threads in xrange(20, 59): 39 | print("Inserting {} hosts {} times, please wait...".format(100, num_threads)) 40 | s.debug.clear_bulk_cracker_list() 41 | run_insert_test(server, num_threads, 100, True) 42 | time.sleep(3) 43 | 44 | print("==============================================") 45 | 46 | #s = xmlrpclib.ServerProxy(server) 47 | #for i in xrange(100): 48 | # s.clear_bulk_cracker_list() 49 | # print("Adding 20*5000=100,000 hosts...") 50 | # run_insert_test(server, 20, 5000, False) 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /tests/peer0.conf: -------------------------------------------------------------------------------- 1 | # section database. All configuration items besides 'type' are passed as-is 2 | # to the database connect() function 3 | 4 | # Database settings. Depending on the database type, you can add several 5 | # parameters to connect to the database. 6 | 7 | # For sqlite3, just fill in the database filename as "database" 8 | # For mysqldb, see 9 | # http://mysql-python.sourceforge.net/MySQLdb.html#functions-and-attributes 10 | # for possible parameters. 11 | [database] 12 | # Type of database. Choice of sqlite3, MySQLdb, psycopg2 (PostgreSQL) 13 | # Default: sqlite3 14 | type: sqlite3 15 | database: ../_trial_temp/peer0.sqlite 16 | 17 | [sync] 18 | # Maximum number of cracker IP addresses reported back to 19 | # denyhosts clients per sync. Default: 50 20 | #max_reported_crackers: 50 21 | 22 | # TCP port to listen on. Default: 9911 23 | #listen_port: 9911 24 | 25 | # legacy server to use as a source of bad hosts 26 | #legacy_server: http://xmlrpc.denyhosts.net:9911 27 | #legacy_server: 28 | 29 | # How often (in seconds) to download hosts from legacy server. 30 | # Default: 300 seconds (5 minutes) 31 | legacy_frequency: 300 32 | #legacy_frequency: 60 33 | 34 | # Threshold value for legacy server 35 | legacy_threshold = 3 36 | 37 | # Resiliency value for legacy server (in seconds) 38 | legacy_resiliency = 18000 39 | 40 | enable_debug_methods: yes 41 | 42 | [maintenance] 43 | # Maintenance interval in seconds (3600 = one hour; 86400 = one day) 44 | interval_seconds: 3600 45 | #interval_seconds: 30 46 | 47 | # Number of days before reports are expired. Default: 30 48 | expiry_days: 30 49 | # For testing, around 2 minutes 50 | #expiry_days: 0.001 51 | 52 | [logging] 53 | # Location of the log file. Default: /var/log/denyhosts-sync/sync.log 54 | #logfile: /var/log/denyhosts-sync/sync.log 55 | logfile: ../_trial_temp/peer0.log 56 | #logfile: /home/janpascal/denyhosts_sync_server_twisted/dh_syncserver.log 57 | 58 | # Log level. One of CRITICAL, ERROR, WARNING, INFO of DEBUG 59 | # Default: INFO. Set to WARNING for high-volume server 60 | loglevel: DEBUG 61 | 62 | [stats] 63 | update_frequency: 60 64 | resolve_hostnames: off 65 | listen_port: 8800 66 | 67 | [peering] 68 | key_file: ../tests/peer0.key 69 | 70 | peer_1_url: http://localhost:9921 71 | peer_1_key: 4a4f9f0a7077e132bbae7eeb663b034d79e804bce25bd30c4a8471415f9d997d 72 | 73 | peer_2_url: http://localhost:9922 74 | peer_2_key: 9bc434efb1a2366af585c555cd69219f9ddad6f121bc02ba60d2187a1a1cf551 75 | 76 | 77 | -------------------------------------------------------------------------------- /tests/peer0.key: -------------------------------------------------------------------------------- 1 | {"pub": "680323e4df1f1d1a42d71e57d0c1275362117f6a14e188afbf62eb5641d9fe15", "priv": "9717d5f9d94e730a5b5ffb23d0a70aec21e5d4a00505e0f934790beeb19dd583"} -------------------------------------------------------------------------------- /tests/peer1.conf: -------------------------------------------------------------------------------- 1 | # section database. All configuration items besides 'type' are passed as-is 2 | # to the database connect() function 3 | 4 | # Database settings. Depending on the database type, you can add several 5 | # parameters to connect to the database. 6 | 7 | # For sqlite3, just fill in the database filename as "database" 8 | # For mysqldb, see 9 | # http://mysql-python.sourceforge.net/MySQLdb.html#functions-and-attributes 10 | # for possible parameters. 11 | [database] 12 | # Type of database. Choice of sqlite3, MySQLdb, psycopg2 (PostgreSQL) 13 | # Default: sqlite3 14 | type: sqlite3 15 | database: _trial_temp/peer1.sqlite 16 | 17 | [sync] 18 | # Maximum number of cracker IP addresses reported back to 19 | # denyhosts clients per sync. Default: 50 20 | #max_reported_crackers: 50 21 | 22 | # TCP port to listen on. Default: 9911 23 | listen_port: 9921 24 | 25 | # legacy server to use as a source of bad hosts 26 | #legacy_server: http://xmlrpc.denyhosts.net:9911 27 | #legacy_server: 28 | 29 | # How often (in seconds) to download hosts from legacy server. 30 | # Default: 300 seconds (5 minutes) 31 | legacy_frequency: 300 32 | #legacy_frequency: 60 33 | 34 | # Threshold value for legacy server 35 | legacy_threshold = 3 36 | 37 | # Resiliency value for legacy server (in seconds) 38 | legacy_resiliency = 18000 39 | 40 | enable_debug_methods: yes 41 | 42 | [maintenance] 43 | # Maintenance interval in seconds (3600 = one hour; 86400 = one day) 44 | interval_seconds: 3600 45 | #interval_seconds: 30 46 | 47 | # Number of days before reports are expired. Default: 30 48 | expiry_days: 30 49 | # For testing, around 2 minutes 50 | #expiry_days: 0.001 51 | 52 | [logging] 53 | # Location of the log file. Default: /var/log/denyhosts-sync/sync.log 54 | #logfile: /var/log/denyhosts-sync/sync.log 55 | logfile: _trial_temp/peer1.log 56 | #logfile: /home/janpascal/denyhosts_sync_server_twisted/dh_syncserver.log 57 | 58 | # Log level. One of CRITICAL, ERROR, WARNING, INFO of DEBUG 59 | # Default: INFO. Set to WARNING for high-volume server 60 | loglevel: DEBUG 61 | 62 | [stats] 63 | update_frequency: 60 64 | resolve_hostnames: off 65 | listen_port: 8801 66 | 67 | [peering] 68 | key_file: tests/peer1.key 69 | 70 | peer_0_url: http://localhost:9911 71 | peer_0_key: 680323e4df1f1d1a42d71e57d0c1275362117f6a14e188afbf62eb5641d9fe15 72 | 73 | peer_2_url: http://localhost:9922 74 | peer_2_key: 9bc434efb1a2366af585c555cd69219f9ddad6f121bc02ba60d2187a1a1cf551 75 | 76 | -------------------------------------------------------------------------------- /tests/peer1.key: -------------------------------------------------------------------------------- 1 | {"pub": "4a4f9f0a7077e132bbae7eeb663b034d79e804bce25bd30c4a8471415f9d997d", "priv": "fb295afe297679ce8c6498051a3a28d9c72003f01b962386d9254d52eae350b9"} -------------------------------------------------------------------------------- /tests/peer2.conf: -------------------------------------------------------------------------------- 1 | # section database. All configuration items besides 'type' are passed as-is 2 | # to the database connect() function 3 | 4 | # Database settings. Depending on the database type, you can add several 5 | # parameters to connect to the database. 6 | 7 | # For sqlite3, just fill in the database filename as "database" 8 | # For mysqldb, see 9 | # http://mysql-python.sourceforge.net/MySQLdb.html#functions-and-attributes 10 | # for possible parameters. 11 | [database] 12 | # Type of database. Choice of sqlite3, MySQLdb, psycopg2 (PostgreSQL) 13 | # Default: sqlite3 14 | type: sqlite3 15 | database: _trial_temp/peer2.sqlite 16 | 17 | [sync] 18 | # Maximum number of cracker IP addresses reported back to 19 | # denyhosts clients per sync. Default: 50 20 | #max_reported_crackers: 50 21 | 22 | # TCP port to listen on. Default: 9911 23 | listen_port: 9922 24 | 25 | # legacy server to use as a source of bad hosts 26 | #legacy_server: http://xmlrpc.denyhosts.net:9911 27 | #legacy_server: 28 | 29 | # How often (in seconds) to download hosts from legacy server. 30 | # Default: 300 seconds (5 minutes) 31 | legacy_frequency: 300 32 | #legacy_frequency: 60 33 | 34 | # Threshold value for legacy server 35 | legacy_threshold = 3 36 | 37 | # Resiliency value for legacy server (in seconds) 38 | legacy_resiliency = 18000 39 | 40 | enable_debug_methods: yes 41 | 42 | [maintenance] 43 | # Maintenance interval in seconds (3600 = one hour; 86400 = one day) 44 | interval_seconds: 3600 45 | #interval_seconds: 30 46 | 47 | # Number of days before reports are expired. Default: 30 48 | expiry_days: 30 49 | # For testing, around 2 minutes 50 | #expiry_days: 0.001 51 | 52 | [logging] 53 | # Location of the log file. Default: /var/log/denyhosts-sync/sync.log 54 | #logfile: /var/log/denyhosts-sync/sync.log 55 | logfile: _trial_temp/peer2.log 56 | #logfile: /home/janpascal/denyhosts_sync_server_twisted/dh_syncserver.log 57 | 58 | # Log level. One of CRITICAL, ERROR, WARNING, INFO of DEBUG 59 | # Default: INFO. Set to WARNING for high-volume server 60 | loglevel: DEBUG 61 | 62 | [stats] 63 | update_frequency: 60 64 | resolve_hostnames: off 65 | listen_port: 8802 66 | 67 | [peering] 68 | key_file: tests/peer2.key 69 | 70 | peer_0_url: http://localhost:9911 71 | peer_0_key: 680323e4df1f1d1a42d71e57d0c1275362117f6a14e188afbf62eb5641d9fe15 72 | 73 | peer_1_url: http://localhost:9921 74 | peer_1_key: 4a4f9f0a7077e132bbae7eeb663b034d79e804bce25bd30c4a8471415f9d997d 75 | 76 | -------------------------------------------------------------------------------- /tests/peer2.key: -------------------------------------------------------------------------------- 1 | {"pub": "9bc434efb1a2366af585c555cd69219f9ddad6f121bc02ba60d2187a1a1cf551", "priv": "72bc343dbe4c76e6395c76ee3350777a079ece02b4df1818c65514bc151b242a"} -------------------------------------------------------------------------------- /tests/peer_unknown.key: -------------------------------------------------------------------------------- 1 | {"pub": "892e6f5c7d0fdb18f4926674d58646e049bad94f719096e066dbcccacad6a034", "priv": "c47280e5eea5bd7d3a4b75f397461c6637e7694483c5f4e8fa189e7c239a6628"} -------------------------------------------------------------------------------- /tests/sim_clients.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import xmlrpclib 3 | import time 4 | import threading 5 | import random 6 | import ipaddr 7 | 8 | def is_valid_ip_address(ip_address): 9 | try: 10 | ip = ipaddr.IPAddress(ip_address) 11 | except: 12 | return False 13 | if (ip.is_reserved or ip.is_private or ip.is_loopback or 14 | ip.is_unspecified or ip.is_multicast or 15 | ip.is_link_local): 16 | return False 17 | return True 18 | 19 | def random_ip_address(): 20 | while True: 21 | ip = ".".join(map(str, (random.randint(0, 255) for _ in range(4)))) 22 | if is_valid_ip_address(ip): 23 | return ip 24 | 25 | server = 'http://localhost:9911' 26 | 27 | def run(server, count): 28 | # Assuming every client will supply 1 new host every 8minutes 29 | # and ask for newly recognised crackers 30 | s = xmlrpclib.ServerProxy(server) 31 | for i in xrange(count): 32 | ip = random_ip_address() 33 | try: 34 | s.add_hosts([ip]) 35 | s.get_new_hosts(time.time()-480, 3, [ip], 3600) 36 | except Exception, e: 37 | print("Got exception {}".format(e)) 38 | 39 | def run_sim(server, num_threads, count): 40 | start_time = time.time() 41 | 42 | threads = [] 43 | print("Creating threads...") 44 | for i in xrange(num_threads): 45 | thread = threading.Thread(target=run, args=(server, count)) 46 | threads.append(thread) 47 | 48 | print("Starting threads...") 49 | for i in xrange(num_threads): 50 | threads[i].start() 51 | 52 | #print("Waiting for threads...") 53 | for i in xrange(num_threads): 54 | threads[i].join() 55 | 56 | end_time = time.time() 57 | print("Simulation {} clients {} times took {} seconds".format(count, num_threads, end_time - start_time)) 58 | print("Average time per client : {} seconds".format( (end_time - start_time) / count / num_threads)) 59 | print("Average requests per seconds: {}".format( 60 | count * num_threads / (end_time - start_time))) 61 | 62 | 63 | run_sim(server, 60, 100) 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /tests/test-master.conf: -------------------------------------------------------------------------------- 1 | # Configuration file for unit tests. 2 | 3 | [database] 4 | type: sqlite3 5 | database: unittest.sqlite 6 | 7 | [maintenance] 8 | 9 | [sync] 10 | 11 | [logging] 12 | logfile: unittest.log 13 | loglevel: DEBUG 14 | 15 | [stats] 16 | static_dir: static 17 | graph_dir: graph 18 | template_dir: template 19 | 20 | 21 | [peering] 22 | key_file: ../../test_master.key 23 | is_master: true 24 | 25 | # If is_master is true: configure _only_ slave servers 26 | slave_server_1: http://localhost:9921 27 | slave_key_1: 4a4f9f0a7077e132bbae7eeb663b034d79e804bce25bd30c4a8471415f9d997d 28 | 29 | slave_server_2: http://localhost:9922 30 | slave_key_2: 9bc434efb1a2366af585c555cd69219f9ddad6f121bc02ba60d2187a1a1cf551 31 | -------------------------------------------------------------------------------- /tests/test.conf: -------------------------------------------------------------------------------- 1 | # Configuration file for unit tests. 2 | 3 | [database] 4 | type: sqlite3 5 | database: unittest.sqlite 6 | 7 | [maintenance] 8 | 9 | [sync] 10 | 11 | [logging] 12 | logfile: unittest.log 13 | loglevel: DEBUG 14 | 15 | [stats] 16 | static_dir: static 17 | graph_dir: graph 18 | template_dir: template 19 | 20 | [peering] 21 | -------------------------------------------------------------------------------- /tests/test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import xmlrpclib 3 | import time 4 | import sys 5 | 6 | 7 | server = 'http://localhost:9911' 8 | print("Connecting to server {}".format(server)) 9 | s = xmlrpclib.ServerProxy(server) 10 | 11 | #print s.add_hosts(["127.0.0.3"]) 12 | #print s.add_hosts(["192.168.1.22"]) 13 | print("Adding one host (four times)") 14 | print s.add_hosts(["69.192.72.154"]) 15 | print s.add_hosts(["69.192.72.155"]) 16 | print s.add_hosts(["69.192.72.156"]) 17 | print s.add_hosts(["69.192.72.157"]) 18 | 19 | # Concurrency testing 20 | #for i in range(0, 100): 21 | # print("Running test {}".format(i)) 22 | # s.debug.test() 23 | # print("Running maintenance") 24 | # s.debug.maintenance() 25 | # time.sleep(5) 26 | 27 | s.debug.test() 28 | s.debug.maintenance() 29 | 30 | #print s.add_hosts(["test4.example.org"]) 31 | 32 | #print("New crackers, resilience=3600") 33 | #print s.get_new_hosts(time.time()-3600, 3, ["old.example.net"], 3600) 34 | #print("New crackers, resilience=60") 35 | #print s.get_new_hosts(time.time()-3600, 3, ["old.example.net"], 60) 36 | #print("New crackers, resilience=60, min_reporters=2") 37 | #print s.get_new_hosts(time.time()-3600, 2, ["old.example.net"], 60) 38 | #print("New crackers, resilience=60, min_reporters=1") 39 | #print s.get_new_hosts(time.time()-3600, 1, ["old.example.net"], 60) 40 | print("Illegal arguments: ") 41 | try: 42 | print s.get_new_hosts("12312iasda", 1, ["old.example.net"], 60) 43 | except Exception, e: 44 | print(e) 45 | print("New crackers, resilience=60, min_reporters=1") 46 | print s.get_new_hosts(time.time()-3600, 1, ["69.192.72.154"], 60) 47 | 48 | peer1 = 'http://localhost:9921' 49 | print("Connecting to server {}".format(peer1)) 50 | s1 = xmlrpclib.ServerProxy(peer1) 51 | 52 | print("peer1 new crackers, resilience=60, min_reporters=1") 53 | print s1.get_new_hosts(time.time()-3600, 1, ["69.192.72.154"], 60) 54 | 55 | print("peer0 all hosts:") 56 | print s.debug.list_all_hosts() 57 | 58 | print("peer1 all hosts:") 59 | print s1.debug.list_all_hosts() 60 | 61 | #print("All hosts:") 62 | #print s.list_all_hosts() 63 | # #print s.dump_database() 64 | # 65 | #print("Cracker info for 69.192.72.154:") 66 | #try: 67 | # print s.get_cracker_info("69.192.72.154") 68 | #except Exception, e: 69 | # print e 70 | # 71 | -------------------------------------------------------------------------------- /tests/test_concurrency.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from denyhosts_server import models 4 | from denyhosts_server import controllers 5 | from denyhosts_server import views 6 | from denyhosts_server.models import Cracker, Report 7 | 8 | from twisted.internet import reactor,defer,task 9 | from twisted.internet.defer import inlineCallbacks, returnValue 10 | 11 | import base 12 | 13 | def random_ip_address(): 14 | return ".".join(map(str, (random.randint(0, 255) for _ in range(4)))) 15 | 16 | def sleep(seconds): 17 | d = defer.Deferred() 18 | reactor.callLater(seconds, d.callback, seconds) 19 | return d 20 | 21 | class MockHeaders: 22 | def __init__(self, ip): 23 | self._ip = ip 24 | 25 | def getRawHeaders(self,key): 26 | return [self._ip,] 27 | 28 | class MockRequest: 29 | def __init__(self, ip): 30 | self._ip = ip 31 | self.received_headers = {} 32 | self.requestHeaders = MockHeaders(ip) 33 | 34 | def getClientIP(self): 35 | return self._ip 36 | 37 | class ConcurrencyTest(base.TestBase): 38 | 39 | @inlineCallbacks 40 | def test_try_and_confuse_server(self): 41 | self.view = views.Server() 42 | request = MockRequest("127.0.0.1") 43 | for i in range(0, 25): 44 | print("count:{}".format(i)) 45 | 46 | self.count = 0 47 | def called(result): 48 | self.count += 1 49 | 50 | for t in range(5): 51 | task.deferLater(reactor, 0.01, self.view.xmlrpc_add_hosts, request, ["1.1.1.1", "2.2.2.2"]).addCallback(called) 52 | self.count -= 1 53 | for t in range(5): 54 | task.deferLater(reactor, 0.01, self.view.xmlrpc_add_hosts, request, ["1.1.1.7", "2.2.2.8"]).addCallback(called) 55 | self.count -= 1 56 | task.deferLater(reactor, 0.01, controllers.perform_maintenance).addCallback(called) 57 | self.count -= 1 58 | 59 | while self.count < 0: 60 | yield sleep(0.1) 61 | """" 62 | def run(self, count): 63 | for i in xrange(count): 64 | ip = random_ip_address() 65 | try: 66 | self.view.add_hosts([ip]) 67 | self.view.get_new_hosts(time.time()-480, 3, [ip], 3600) 68 | except Exception, e: 69 | print("Got exception {}".format(e)) 70 | 71 | @inlineCallbacks 72 | def test_simulate_clients(self): 73 | num_threads = 20 74 | num_runs = 100 75 | threads = [] 76 | 77 | print("Creating threads...") 78 | for i in xrange(num_threads): 79 | thread = threading.Thread(target=run, args=(num_runs)) 80 | threads.append(thread) 81 | 82 | print("Starting threads...") 83 | for i in xrange(num_threads): 84 | threads[i].start() 85 | 86 | #print("Waiting for threads...") 87 | for i in xrange(num_threads): 88 | threads[i].join() 89 | """ 90 | -------------------------------------------------------------------------------- /tests/test_get_new_hosts.py: -------------------------------------------------------------------------------- 1 | # denyhosts sync server 2 | # Copyright (C) 2015 Jan-Pascal van Best 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as published 6 | # by the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU Affero General Public License for more details. 13 | 14 | # You should have received a copy of the GNU Affero General Public License 15 | # along with this program. If not, see . 16 | 17 | import time 18 | 19 | from denyhosts_server import models 20 | from denyhosts_server import controllers 21 | from denyhosts_server import database 22 | from denyhosts_server.models import Cracker, Report 23 | 24 | from twisted.internet.defer import inlineCallbacks, returnValue 25 | 26 | import base 27 | import logging 28 | 29 | class GetNewHostsTest(base.TestBase): 30 | 31 | @inlineCallbacks 32 | def test_get_qualifying_crackers(self): 33 | now = time.time() 34 | c = yield Cracker(ip_address="192.168.1.1", first_time=now, latest_time=now, total_reports=0, current_reports=0).save() 35 | 36 | # First test: cracker without reports should not be reported 37 | hosts = yield controllers.get_qualifying_crackers(1, 0, now, 50, []) 38 | self.assertEqual(len(hosts), 0, "Cracker without reports should not be returned") 39 | 40 | client_ip = "1.1.1.1" 41 | yield controllers.add_report_to_cracker(c, client_ip, when=now) 42 | 43 | hosts = yield controllers.get_qualifying_crackers(1, -1, now-1, 50, []) 44 | self.assertEqual(len(hosts), 1, "When one report, get_new_hosts with resilience <0 should return one host") 45 | 46 | hosts = yield controllers.get_qualifying_crackers(2, -1, now-1, 50, []) 47 | self.assertEqual(len(hosts), 0, "When one report, get_new_hosts with resilience 0 and threshold 2 should return empty list") 48 | 49 | client_ip = "1.1.1.2" 50 | yield controllers.add_report_to_cracker(c, client_ip, when=now+3600) 51 | 52 | hosts = yield controllers.get_qualifying_crackers(2, 3500, now-1, 50, []) 53 | self.assertEqual(len(hosts), 1, "Two reports should result in a result") 54 | 55 | hosts = yield controllers.get_qualifying_crackers(2, 3500, now-1, 50, [c.ip_address]) 56 | self.assertEqual(len(hosts), 0, "Two reports, remove reported host from list") 57 | 58 | hosts = yield controllers.get_qualifying_crackers(3, 3500, now-1, 50, []) 59 | self.assertEqual(len(hosts), 0, "Two reports, asked for three") 60 | 61 | hosts = yield controllers.get_qualifying_crackers(2, 4000, now-1, 50, []) 62 | self.assertEqual(len(hosts), 0, "Two reports, not enough resiliency") 63 | 64 | logging.debug("Testing d2") 65 | client_ip = "1.1.1.3" 66 | yield controllers.add_report_to_cracker(c, client_ip, when=now+7200) 67 | 68 | hosts = yield controllers.get_qualifying_crackers(2, 3500, now+3601, 50, []) 69 | self.assertEqual(len(hosts), 0, "Condition (d2)") 70 | 71 | logging.debug("Testing d1") 72 | client_ip = "1.1.1.3" 73 | yield controllers.add_report_to_cracker(c, client_ip, when=now+7200+24*3600+1) 74 | 75 | hosts = yield controllers.get_qualifying_crackers(2, 24*3600+1, now+3601, 50, []) 76 | self.assertEqual(len(hosts), 1, "Condition (d1)") 77 | 78 | 79 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 80 | -------------------------------------------------------------------------------- /tests/test_models.py: -------------------------------------------------------------------------------- 1 | # denyhosts sync server 2 | # Copyright (C) 2015 Jan-Pascal van Best 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as published 6 | # by the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU Affero General Public License for more details. 13 | 14 | # You should have received a copy of the GNU Affero General Public License 15 | # along with this program. If not, see . 16 | 17 | import time 18 | 19 | from denyhosts_server import models 20 | from denyhosts_server import controllers 21 | from denyhosts_server.models import Cracker, Report 22 | 23 | from twisted.internet.defer import inlineCallbacks, returnValue 24 | 25 | import base 26 | 27 | class ModelsTest(base.TestBase): 28 | 29 | @inlineCallbacks 30 | def test_add_cracker(self): 31 | cracker_ip = "127.0.0.1" 32 | now = time.time() 33 | cracker = Cracker(ip_address=cracker_ip, first_time=now, latest_time=now, total_reports=0, current_reports=0, resiliency=0) 34 | cracker = yield cracker.save() 35 | 36 | c2 = yield controllers.get_cracker(cracker_ip) 37 | yield self.assertIsNotNone(c2) 38 | self.assertEqual(c2.ip_address, cracker.ip_address, "Save and re-fetch cracker from database") 39 | self.assertEqual(c2.id, cracker.id, "Save and re-fetch cracker from database") 40 | self.assertEqual(c2.first_time, cracker.first_time, "Save and re-fetch cracker from database") 41 | self.assertEqual(c2.latest_time, cracker.latest_time, "Save and re-fetch cracker from database") 42 | self.assertEqual(c2.total_reports, cracker.total_reports, "Save and re-fetch cracker from database") 43 | self.assertEqual(c2.current_reports, cracker.current_reports, "Save and re-fetch cracker from database") 44 | self.assertEqual(c2.id, cracker.id, "Save and re-fetch cracker from database") 45 | 46 | @inlineCallbacks 47 | def test_add_report(self): 48 | now = time.time() 49 | yield Cracker(ip_address="192.168.1.1", first_time=now, latest_time=now, total_reports=0, current_reports=0).save() 50 | c = yield controllers.get_cracker("192.168.1.1") 51 | yield self.assertIsNotNone(c) 52 | yield controllers.add_report_to_cracker(c, "127.0.0.1") 53 | 54 | r = yield Report.find(where=["cracker_id=? and ip_address=?",c.id,"127.0.0.1"], limit=1) 55 | returnValue(self.assertIsNotNone(r, "Added report is in database")) 56 | 57 | @inlineCallbacks 58 | def test_add_multiple_reports(self): 59 | now = time.time() 60 | 61 | yield Cracker(ip_address="192.168.1.1", first_time=now, latest_time=now, total_reports=0, current_reports=0).save() 62 | c = yield controllers.get_cracker("192.168.1.1") 63 | yield self.assertIsNotNone(c) 64 | yield controllers.add_report_to_cracker(c, "127.0.0.1", now) 65 | 66 | reports = yield Report.find(where=["cracker_id=? and ip_address=?",c.id,"127.0.0.1"]) 67 | self.assertEqual(len(reports), 1, "First added report is in database") 68 | 69 | # Add second report shortly after first 70 | yield controllers.add_report_to_cracker(c, "127.0.0.1", now+1) 71 | reports = yield Report.find(where=["cracker_id=? and ip_address=?",c.id,"127.0.0.1"]) 72 | self.assertEqual(len(reports), 1, "Second added report should be ignored") 73 | 74 | # Add second report after 24 hours 75 | yield controllers.add_report_to_cracker(c, "127.0.0.1", now+24*3600+1) 76 | reports = yield Report.find(where=["cracker_id=? and ip_address=?",c.id,"127.0.0.1"]) 77 | self.assertEqual(len(reports), 2, "Second report should be added after 24 hours ") 78 | 79 | # Add third report shortly after second, should be ignored 80 | yield controllers.add_report_to_cracker(c, "127.0.0.1", now+24*3600+10) 81 | reports = yield Report.find(where=["cracker_id=? and ip_address=?",c.id,"127.0.0.1"]) 82 | self.assertEqual(len(reports), 2, "Third report shortly after second should be ignored") 83 | 84 | # Add third report after again 24 hours 85 | yield controllers.add_report_to_cracker(c, "127.0.0.1", now+2*24*3600+20) 86 | reports = yield Report.find(where=["cracker_id=? and ip_address=?",c.id,"127.0.0.1"]) 87 | self.assertEqual(len(reports), 3, "Third report after again 24 hours, should be added") 88 | 89 | # Add fourth report 90 | time_added = now + 2*24*3600 + 30 91 | yield controllers.add_report_to_cracker(c, "127.0.0.1", time_added) 92 | reports = yield Report.find( 93 | where=["cracker_id=? and ip_address=?",c.id,"127.0.0.1"], 94 | orderby='latest_report_time asc') 95 | self.assertEqual(len(reports), 3, "Fourth report, should be merged") 96 | self.assertEqual(reports[-1].latest_report_time, time_added, "Latest report time should be updated") 97 | 98 | self.assertEquals(c.current_reports, 1, "Only one unique reporter should be counted") 99 | self.assertEquals(c.total_reports, 6, "Cracker reported six timed in total") 100 | 101 | # Perform maintenance, expire original report 102 | yield controllers.perform_maintenance(limit = now+1) 103 | reports = yield Report.find(where=["cracker_id=? and ip_address=?",c.id,"127.0.0.1"]) 104 | self.assertEqual(len(reports), 2, "Maintenance should remove oldest report") 105 | yield c.refresh() 106 | self.assertEqual(c.current_reports, 1, "Maintenance should still leave one unique reporter") 107 | 108 | # Perform maintenance, expire second report 109 | yield controllers.perform_maintenance(limit = now+24*3600+11) 110 | reports = yield Report.find(where=["cracker_id=? and ip_address=?",c.id,"127.0.0.1"]) 111 | self.assertEqual(len(reports), 1, "Maintenance should remove one more report") 112 | yield c.refresh() 113 | self.assertEqual(c.current_reports, 1, "Maintenance should still leave one unique reporter") 114 | 115 | # Perform maintenance again, expire last report and cracker 116 | yield controllers.perform_maintenance(limit = now+2*24*3600+31) 117 | reports = yield Report.find(where=["cracker_id=? and ip_address=?",c.id,"127.0.0.1"]) 118 | self.assertEqual(len(reports), 0, "Maintenance should remove last report") 119 | cracker = yield controllers.get_cracker("192.168.1.1") 120 | self.assertIsNone(cracker, "Maintenance should remove cracker") 121 | 122 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 123 | -------------------------------------------------------------------------------- /tests/test_peering.py: -------------------------------------------------------------------------------- 1 | # denyhosts sync server 2 | # Copyright (C) 2015 Jan-Pascal van Best 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as published 6 | # by the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU Affero General Public License for more details. 13 | 14 | # You should have received a copy of the GNU Affero General Public License 15 | # along with this program. If not, see . 16 | 17 | import json 18 | import os 19 | import os.path 20 | import signal 21 | import time 22 | import subprocess 23 | import xmlrpclib 24 | from xmlrpclib import ServerProxy 25 | 26 | from denyhosts_server import config 27 | from denyhosts_server import models 28 | from denyhosts_server import controllers 29 | from denyhosts_server import database 30 | from denyhosts_server import version as server_version 31 | from denyhosts_server.models import Cracker, Report 32 | from denyhosts_server import peering 33 | 34 | from twisted.internet import reactor, task 35 | from twisted.internet.defer import inlineCallbacks, returnValue 36 | from twisted.python import log 37 | 38 | import libnacl.public 39 | import libnacl.utils 40 | 41 | import base 42 | import logging 43 | 44 | 45 | class TestPeering(base.TestBase): 46 | @inlineCallbacks 47 | def setUp(self): 48 | yield base.TestBase.setUp(self, "peer0.conf") 49 | peering.load_keys() 50 | 51 | # Start peer servers 52 | cwd = os.path.join(os.getcwd(), "..") 53 | log.msg("Cleaning peer servers...") 54 | peer1=subprocess.Popen("PYTHONPATH=. scripts/denyhosts-server -c tests/peer1.conf --recreate-database --force > /dev/null 2>&1", cwd=cwd, shell=True) 55 | peer2=subprocess.Popen("PYTHONPATH=. scripts/denyhosts-server -c tests/peer2.conf --recreate-database --force > /dev/null 2>&1", cwd=cwd, shell=True) 56 | peer1.wait() 57 | peer2.wait() 58 | 59 | log.msg("Starting peer servers...") 60 | self.peer1=subprocess.Popen("PYTHONPATH=. scripts/denyhosts-server -c tests/peer1.conf >/dev/null 2>&1", cwd=cwd, shell=True, preexec_fn=os.setsid) 61 | self.peer2=subprocess.Popen("PYTHONPATH=. scripts/denyhosts-server -c tests/peer2.conf >/dev/null 2>&1", cwd=cwd, shell=True, preexec_fn=os.setsid) 62 | 63 | log.msg("Waiting until peer servers are responsive...") 64 | for peer_url in config.peers: 65 | server = ServerProxy(peer_url) 66 | is_up = False 67 | start_time = time.time() 68 | while not is_up and time.time() - start_time < 10: 69 | try: 70 | server.get_new_hosts(time.time(), 1, [], 3600) 71 | is_up = True 72 | log.msg("Peer {} is up!".format(peer_url)) 73 | except: 74 | time.sleep(0.2) 75 | if not is_up: 76 | self.fail("Failed to start peer {}".format(peer_url)) 77 | 78 | def tearDown(self): 79 | log.msg("Shutting down peer1...") 80 | os.killpg(self.peer1.pid, signal.SIGTERM) 81 | log.msg("Shutting down peer2...") 82 | os.killpg(self.peer2.pid, signal.SIGTERM) 83 | 84 | log.msg("Waiting for peers to shut down...") 85 | self.peer1.wait() 86 | self.peer2.wait() 87 | 88 | log.msg("Peers shut down") 89 | 90 | @inlineCallbacks 91 | def test_send_update_to_peers(self): 92 | yield peering.send_update("11.11.11.11", time.time(), ["1.1.1.1"]) 93 | yield task.deferLater(reactor, 1.0, lambda _:0, 0) 94 | for peer_url in config.peers: 95 | server = ServerProxy(peer_url) 96 | response = server.get_new_hosts(time.time() - 15, 1, [], 0) 97 | log.msg("Peer {} has new hosts {}".format(peer_url, response)) 98 | self.assertIn("1.1.1.1", response["hosts"], "Peer did not receive hosts!") 99 | 100 | @inlineCallbacks 101 | def _test_with_key(self, keypair, public_key): 102 | box = libnacl.public.Box(keypair.sk, peering._own_key.pk) 103 | 104 | data = { 105 | "client_ip": "11.11.11.11", 106 | "timestamp": time.time(), 107 | "hosts": ["1.1.1.1"] 108 | } 109 | data_json = json.dumps(data) 110 | crypted = box.encrypt(data_json) 111 | base64 = crypted.encode('base64') 112 | yield peering.handle_update(public_key, crypted) 113 | 114 | def _insert_peer(self): 115 | peer_url = "http://test.peer:9911" 116 | peer_keypair = libnacl.public.SecretKey() 117 | config.peers[peer_url] = peer_keypair.pk 118 | peering._peer_boxes[peer_url] = libnacl.public.Box(peering._own_key.sk, libnacl.public.PublicKey(peer_keypair.pk)) 119 | return peer_keypair 120 | 121 | @inlineCallbacks 122 | def test_unknown_key(self): 123 | keypair = self._insert_peer() 124 | log.msg("Testing whether message from known keypair is accepted") 125 | yield self._test_with_key(keypair, keypair.pk) 126 | log.msg("Testing whether message from unknown keypair is rejected") 127 | unknown_keypair = libnacl.public.SecretKey() 128 | yield self.assertFailure(self._test_with_key(unknown_keypair, unknown_keypair.pk), Exception) 129 | log.msg("Testing whether message with seemingly valid key but signed by unknown keypair is rejected") 130 | unknown_keypair = libnacl.public.SecretKey() 131 | yield self.assertFailure(self._test_with_key(unknown_keypair, keypair.pk), Exception) 132 | 133 | @inlineCallbacks 134 | def test_list_peers(self): 135 | peer_keypair = self._insert_peer() 136 | box = libnacl.public.Box(peer_keypair.sk, peering._own_key.pk) 137 | please = box.encrypt("please") 138 | response = yield peering.list_peers(peer_keypair.pk, please) 139 | log.msg(response) 140 | peer_list = response["peers"] 141 | self.assertEqual(len(peer_list), len(config.peers), "Peer list not correct length") 142 | self.assertEqual(response["server_version"], server_version, "Incorrect server version in response") 143 | for peer_url in config.peers: 144 | self.assertIn(peer_url, peer_list, "Peer missing from received list") 145 | self.assertEqual(config.peers[peer_url].encode('hex'), peer_list[peer_url], "Wrong key in received peer list") 146 | 147 | def test_check_peers(self): 148 | result = peering.check_peers() 149 | self.assertEqual(result, True, "Check peers should be successful for default config") 150 | 151 | # TODO: check case where one of the peers doesn't know another peer, or has a wrong key for it 152 | 153 | def test_check_peers_missing(self): 154 | # check case where one of the peers has an extra peer that I don't know about 155 | # Remove one of the peers from my list 156 | peer_to_remove = config.peers.keys()[0] 157 | del config.peers[peer_to_remove] 158 | del peering._peer_boxes[peer_to_remove] 159 | 160 | result = peering.check_peers() 161 | self.assertEqual(result, False, "Check peers should fail if a peer is missing from my own config") 162 | 163 | def test_check_peers_extra(self): 164 | self._insert_peer() 165 | 166 | result = peering.check_peers() 167 | self.assertEqual(result, False, "Check peers should fail if one of my peers does not exist") 168 | 169 | def test_check_peers_wrong_own_key(self): 170 | config.key_file = "../tests/peer_unknown.key" 171 | peering.load_keys() 172 | 173 | result = peering.check_peers() 174 | self.assertEqual(result, False, "Check peers should fail if peers do not know me") 175 | 176 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 177 | -------------------------------------------------------------------------------- /tests/test_purge_methods.py: -------------------------------------------------------------------------------- 1 | # denyhosts sync server 2 | # Copyright (C) 2015 Jan-Pascal van Best 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as published 6 | # by the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU Affero General Public License for more details. 13 | 14 | # You should have received a copy of the GNU Affero General Public License 15 | # along with this program. If not, see . 16 | 17 | import time 18 | 19 | from denyhosts_server import models 20 | from denyhosts_server import controllers 21 | from denyhosts_server import database 22 | from denyhosts_server.models import Cracker, Report, Legacy 23 | 24 | from twisted.internet.defer import inlineCallbacks, returnValue 25 | 26 | import base 27 | 28 | class PurgingTest(base.TestBase): 29 | 30 | @inlineCallbacks 31 | def test_purge_reports(self): 32 | now = time.time() 33 | c = yield Cracker(ip_address="192.168.1.1", first_time=now, latest_time=now, total_reports=0, current_reports=0).save() 34 | c2 = yield Cracker(ip_address="192.168.1.2", first_time=now, latest_time=now, total_reports=0, current_reports=0).save() 35 | c3 = yield Cracker(ip_address="192.168.1.3", first_time=now, latest_time=now, total_reports=0, current_reports=0).save() 36 | 37 | yield controllers.add_report_to_cracker(c, "127.0.0.1", when=now) 38 | yield controllers.add_report_to_cracker(c, "127.0.0.2", when=now) 39 | yield controllers.add_report_to_cracker(c, "127.0.0.9", when=now) 40 | yield controllers.add_report_to_cracker(c2, "127.0.0.3", when=now) 41 | 42 | yield controllers.purge_legacy_addresses() 43 | 44 | crackers = yield Cracker.all() 45 | self.assertEqual(len(crackers), 3, "Should still have three crackers after purging legacy") 46 | 47 | reports = yield Report.all() 48 | self.assertEqual(len(reports), 4, "Should still have four reports after purging legacy") 49 | 50 | yield controllers.purge_reported_addresses() 51 | 52 | crackers = yield Cracker.all() 53 | self.assertEqual(len(crackers), 0, "Should have no crackers after purging reports") 54 | 55 | reports = yield Report.all() 56 | self.assertEqual(len(reports), 0, "Should have no reports after purging reports") 57 | 58 | @inlineCallbacks 59 | def test_purge_legacy(self): 60 | now = time.time() 61 | legacy = yield Legacy(ip_address="192.168.211.1", retrieved_time=now).save() 62 | legacy = yield Legacy(ip_address="192.168.211.2", retrieved_time=now).save() 63 | 64 | legacy = yield Legacy.all() 65 | self.assertEqual(len(legacy), 2, "Should have two legacy reports") 66 | 67 | yield controllers.purge_reported_addresses() 68 | 69 | legacy = yield Legacy.all() 70 | self.assertEqual(len(legacy), 2, "Should still have two legacy reports after purging client reports") 71 | 72 | yield controllers.purge_legacy_addresses() 73 | 74 | legacy = yield Legacy.all() 75 | self.assertEqual(len(legacy), 0, "Should no legacy reports after purging legacy") 76 | 77 | rows = yield database.run_query('SELECT `value` FROM info WHERE `key`="last_legacy_sync"') 78 | self.assertEqual(rows[0][0], '0', "Purging legacy should reset last legacy sync time") 79 | 80 | @inlineCallbacks 81 | def test_purge_ip(self): 82 | now = time.time() 83 | c = yield Cracker(ip_address="192.168.211.1", first_time=now, latest_time=now, total_reports=0, current_reports=0).save() 84 | c2 = yield Cracker(ip_address="192.168.211.2", first_time=now, latest_time=now, total_reports=0, current_reports=0).save() 85 | c3 = yield Cracker(ip_address="192.168.211.3", first_time=now, latest_time=now, total_reports=0, current_reports=0).save() 86 | 87 | yield controllers.add_report_to_cracker(c, "127.0.0.1", when=now) 88 | yield controllers.add_report_to_cracker(c, "127.0.0.2", when=now) 89 | yield controllers.add_report_to_cracker(c, "127.0.0.9", when=now) 90 | yield controllers.add_report_to_cracker(c2, "127.0.0.3", when=now) 91 | 92 | legacy = yield Legacy(ip_address="192.168.211.1", retrieved_time=now).save() 93 | legacy = yield Legacy(ip_address="192.168.211.2", retrieved_time=now).save() 94 | 95 | yield controllers.purge_ip("192.168.211.1") 96 | 97 | crackers = yield Cracker.find(orderby='ip_address ASC') 98 | self.assertEqual(len(crackers), 2, "Should still have two crackers after purging one") 99 | self.assertEquals(crackers[0].ip_address, "192.168.211.2", "Should remove the right cracker") 100 | self.assertEquals(crackers[1].ip_address, "192.168.211.3", "Should remove the right cracker") 101 | 102 | reports = yield Report.all() 103 | self.assertEqual(len(reports), 1, "Should still have one report left after purging cracker with three reports") 104 | self.assertEquals(reports[0].ip_address, "127.0.0.3", "Should remove the right report") 105 | 106 | legacy = yield Legacy.all() 107 | self.assertEqual(len(legacy), 1, "Should still have one legacy reports after purging one") 108 | self.assertEquals(legacy[0].ip_address, "192.168.211.2", "Should remove the right legacy host") 109 | 110 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 111 | -------------------------------------------------------------------------------- /tests/test_stats.py: -------------------------------------------------------------------------------- 1 | # denyhosts sync server 2 | # Copyright (C) 2015 Jan-Pascal van Best 3 | 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU Affero General Public License as published 6 | # by the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU Affero General Public License for more details. 13 | 14 | # You should have received a copy of the GNU Affero General Public License 15 | # along with this program. If not, see . 16 | 17 | import datetime 18 | import inspect 19 | import os 20 | import os.path 21 | import time 22 | import traceback 23 | 24 | from denyhosts_server.models import Cracker, Report, Legacy 25 | from denyhosts_server import config 26 | from denyhosts_server import controllers 27 | from denyhosts_server import stats 28 | 29 | from twisted.internet.defer import inlineCallbacks, returnValue 30 | 31 | from twistar.registry import Registry 32 | 33 | import base 34 | 35 | class StatsTest(base.TestBase): 36 | 37 | @inlineCallbacks 38 | def test_fixup(self): 39 | now = time.time() 40 | c1 = yield Cracker(ip_address="194.109.6.92", first_time=now, latest_time=now, total_reports=0, current_reports=0).save() 41 | c2 = yield Cracker(ip_address="192.30.252.128", first_time=now, latest_time=now, total_reports=0, current_reports=0).save() 42 | 43 | hosts = [c1, c2] 44 | config.stats_resolve_hostnames = True 45 | stats.fixup_crackers(hosts) 46 | 47 | self.assertEqual(c1.hostname, "www.xs4all.nl", "Reverse DNS of www.xs4all.nl") 48 | self.assertEqual(c1.country, "Netherlands", "Testing geoip of www.xs4all.nl") 49 | 50 | self.assertEqual(c2.hostname, "github.com", "Reverse DNS of github.com") 51 | self.assertEqual(c2.country, "United States", "Testing geoip of github.com") 52 | 53 | def stats_settings(self): 54 | tests_dir = os.path.dirname(inspect.getsourcefile(self.__class__)) 55 | package_dir = os.path.dirname(tests_dir) 56 | config.static_dir = os.path.join(package_dir, "static") 57 | config.template_dir = os.path.join(package_dir, "template") 58 | config.graph_dir = os.path.join(os.getcwd(), "graph") 59 | try: 60 | os.mkdir(config.graph_dir) 61 | except OSError: 62 | pass 63 | config.stats_resolve_hostnames = False 64 | 65 | 66 | @inlineCallbacks 67 | def prepare_stats(self): 68 | self.stats_settings() 69 | 70 | now = time.time() 71 | c1 = yield Cracker(ip_address="192.168.1.1", first_time=now, latest_time=now, total_reports=0, current_reports=0).save() 72 | c2 = yield Cracker(ip_address="192.168.1.2", first_time=now, latest_time=now, total_reports=0, current_reports=0).save() 73 | 74 | yield controllers.add_report_to_cracker(c1, "127.0.0.1", when=now-25*3600) 75 | yield controllers.add_report_to_cracker(c1, "127.0.0.2", when=now) 76 | yield controllers.add_report_to_cracker(c1, "127.0.0.3", when=now) 77 | yield controllers.add_report_to_cracker(c2, "127.0.0.2", when=now) 78 | yield controllers.add_report_to_cracker(c2, "127.0.0.3", when=now+1) 79 | 80 | yield Registry.DBPOOL.runInteraction(stats.fixup_history_txn) 81 | yesterday = datetime.date.today() - datetime.timedelta(days=1) 82 | yield Registry.DBPOOL.runInteraction(stats.update_country_history_txn, yesterday, include_history=True) 83 | yield stats.update_stats_cache() 84 | 85 | @inlineCallbacks 86 | def test_empty_state(self): 87 | self.stats_settings() 88 | 89 | yield stats.update_stats_cache() 90 | self.assertIsNotNone(stats._cache, "Stats for empty database should not be None") 91 | 92 | cached = stats._cache["stats"] 93 | self.assertEqual(cached["num_hosts"], 0, "Number of hosts in empty database") 94 | self.assertEqual(cached["num_reports"], 0, "Number of reports in empty database") 95 | self.assertEqual(cached["num_clients"], 0, "Number of clients in empty database") 96 | 97 | html = yield stats.render_stats() 98 | #print(html) 99 | self.assertTrue("Number of clients" in html, "HTML should contain number of clients") 100 | self.assertTrue("../static/graphs/hourly.svg" in html, "HTML should contain path to hourly graph") 101 | 102 | @inlineCallbacks 103 | def test_stats_cache(self): 104 | yield self.prepare_stats() 105 | 106 | cached = stats._cache["stats"] 107 | print(cached) 108 | self.assertEqual(cached["num_hosts"], 2, "Number of hosts in database") 109 | self.assertEqual(cached["num_reports"], 5, "Number of reports in database") 110 | self.assertEqual(cached["num_clients"], 3, "Number of clients in database") 111 | 112 | self.assertEquals(cached["recent_hosts"][0].ip_address, "192.168.1.2", "Most recent host") 113 | self.assertEquals(len(cached["recent_hosts"]), 2, "Reported both hosts in most recent list") 114 | 115 | self.assertEquals(cached["recent_hosts"][0].ip_address, "192.168.1.2", "Most recent host") 116 | self.assertEquals(len(cached["recent_hosts"]), 2, "Reported both hosts in most recent list") 117 | 118 | self.assertEquals(cached["most_reported_hosts"][0].ip_address, "192.168.1.1", "Most reported host") 119 | self.assertEquals(len(cached["most_reported_hosts"]), 2, "Reported both hosts in most reported list") 120 | 121 | self.assertTrue(os.access(os.path.join(config.graph_dir, "hourly.svg"), os.R_OK), "Creation of hourly graph") 122 | self.assertTrue(os.access(os.path.join(config.graph_dir, "monthly.svg"), os.R_OK), "Creation of monthly graph") 123 | self.assertTrue(os.access(os.path.join(config.graph_dir, "contrib.svg"), os.R_OK), "Creation of contributors graph") 124 | 125 | @inlineCallbacks 126 | def test_stats_render(self): 127 | yield self.prepare_stats() 128 | 129 | html = yield stats.render_stats() 130 | #print(html) 131 | self.assertTrue("Number of clients" in html, "HTML should contain number of clients") 132 | self.assertTrue("../static/graphs/hourly.svg" in html, "HTML should contain path to hourly graph") 133 | self.assertFalse("127.0.0.1" in html, "HTML should not contain reported ip addresses") 134 | self.assertEqual(html.count("192.168.1.1"), 2, "HTML should contain ip address of hosts in tables") 135 | 136 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 137 | --------------------------------------------------------------------------------