├── LICENSE
├── README.md
├── args
├── dateargs.go
└── listargs.go
├── code
├── builder
│ ├── builder.go
│ └── builder_test.go
└── packer
│ ├── packer.go
│ └── packer_test.go
├── config
├── config.go
├── config_test.go
└── test
│ └── config.json
├── crypto
├── check
│ ├── hash.go
│ └── hash_test.go
├── data
│ └── data.go
└── file
│ └── file.go
├── datastructs
├── bitset.go
├── bitset_test.go
├── datastructs.go
├── fifo.go
├── fifo_test.go
├── hashmap.go
├── hashmap_test.go
├── rings.go
├── rings_test.go
├── set_test.go
├── sets.go
├── sortedslice.go
├── sortedslice_test.go
├── syncedmap.go
├── syncmap_test.go
├── utils.go
└── utils_test.go
├── dateutil
├── dateutil_test.go
└── parser.go
├── encoding
├── encoding.go
└── serialization_test.go
├── entropy
├── entropy.go
└── entropy_test.go
├── fileutils
├── fileutils.go
└── hash
│ ├── hash.go
│ └── hash_test.go
├── fsutil
├── fsutil.go
├── fswalker
│ ├── fswalker.go
│ ├── fswalker_test.go
│ └── test
│ │ └── test_dir
│ │ ├── loop
│ │ └── test.txt
├── logfile
│ ├── logfile.go
│ └── logfile_test.go
└── shred
│ └── shred.go
├── go.mod
├── go.sum
├── log
├── log.go
├── log_test.go
└── test
│ └── test.log
├── net
└── sftp
│ ├── sftp.go
│ └── sftp_test.go
├── ngram
├── ngram.go
└── ngram_test.go
├── progress
├── progress_test.go
└── sprogress.go
├── readers
├── readers.go
└── reverse_test.go
├── regexp
└── submatch
│ ├── submatch.go
│ └── submatch_test.go
├── runtime
└── systeminfo
│ ├── systeminfo.go
│ └── systeminfo_test.go
├── scanner
├── scanner.go
└── scanner_test.go
├── stats
├── stats.go
└── stats_test.go
└── sync
└── semaphore
├── semaphore.go
└── semaphore_test.go
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 | {one line to give the program's name and a brief idea of what it does.}
635 | Copyright (C) {year} {name of author}
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | {project} Copyright (C) {year} {fullname}
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # golang-utils
2 | Utility Library
3 |
--------------------------------------------------------------------------------
/args/dateargs.go:
--------------------------------------------------------------------------------
1 | package args
2 |
3 | import (
4 | "time"
5 |
6 | "github.com/0xrawsec/golang-utils/dateutil"
7 | )
8 |
9 | // DateVar struct
10 | type DateVar time.Time
11 |
12 | // String argument implementation
13 | func (da *DateVar) String() string {
14 | return time.Time(*da).String()
15 | }
16 |
17 | // Set argument implementation
18 | func (da *DateVar) Set(input string) error {
19 | t, err := dateutil.Parse(input)
20 | (*da) = DateVar(t)
21 | return err
22 | }
23 |
24 | // DurationVar structure
25 | type DurationVar time.Duration
26 |
27 | // String argument implementation
28 | func (da *DurationVar) String() string {
29 | return time.Duration(*da).String()
30 | }
31 |
32 | // Set argument implementation
33 | func (da *DurationVar) Set(input string) error {
34 | tda, err := time.ParseDuration(input)
35 | if err == nil {
36 | *da = DurationVar(tda)
37 | }
38 | return err
39 | }
40 |
--------------------------------------------------------------------------------
/args/listargs.go:
--------------------------------------------------------------------------------
1 | package args
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 | "strings"
7 | )
8 |
9 | // ListVar structure to deal with the flag module. Parse the argument to the
10 | // flag as comma separated string
11 | type ListVar []string
12 |
13 | // String interface implementation
14 | func (la *ListVar) String() string {
15 | return fmt.Sprintf("%s", *la)
16 | }
17 |
18 | // Set interface implementation
19 | func (la *ListVar) Set(input string) error {
20 | *la = strings.Split(input, ",")
21 | return nil
22 | }
23 |
24 | type ListIntVar []int
25 |
26 | // String interface implementation
27 | func (lia *ListIntVar) String() string {
28 | return fmt.Sprintf("%v", *lia)
29 | }
30 |
31 | // Set interface implementation
32 | func (lia *ListIntVar) Set(input string) error {
33 | lsa := strings.Split(input, ",")
34 | *lia = make([]int, len(lsa))
35 | for i, s := range lsa {
36 | iv, err := strconv.Atoi(s)
37 | if err != nil {
38 | return err
39 | }
40 | (*lia)[i] = iv
41 | }
42 | return nil
43 | }
44 |
--------------------------------------------------------------------------------
/code/builder/builder.go:
--------------------------------------------------------------------------------
1 | package builder
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "reflect"
7 | "strings"
8 |
9 | "golang.org/x/tools/imports"
10 | )
11 |
12 | /////////////////////////////// Utility function ///////////////////////////////
13 |
14 | func joinFold(ss []string, sep string, foldWidth int) string {
15 | tmpWidth := 0
16 | var out []string
17 | for _, s := range ss {
18 | tmpWidth += len(s)
19 | if tmpWidth >= foldWidth {
20 | out = append(out, fmt.Sprintf("%s%s\n", s, sep))
21 | tmpWidth = 0
22 | continue
23 | }
24 | out = append(out, fmt.Sprintf("%s%s ", s, sep))
25 | }
26 | return strings.Join(out, "")
27 | }
28 |
29 | func repr(i interface{}) string {
30 | v := reflect.ValueOf(i)
31 | switch v.Kind() {
32 | case reflect.Ptr:
33 | if v.IsZero() {
34 | return "nil"
35 | }
36 | return repr(v.Elem().Interface())
37 | case reflect.Struct:
38 | t := v.Type()
39 | structStr := make([]string, 0, v.NumField())
40 | for i := 0; i < v.NumField(); i++ {
41 | fieldName := t.Field(i).Name
42 | if string(fieldName[0]) == strings.ToUpper(string(fieldName[0])) {
43 | structStr = append(structStr, repr(v.Field(i).Interface()))
44 | }
45 | }
46 | return fmt.Sprintf("%T{%s}", v.Interface(), strings.Join(structStr, ","))
47 | case reflect.String:
48 | s := fmt.Sprintf("%s", v)
49 | if strings.Contains(s, `"`) {
50 | s = strings.Replace(s, "`", "'", -1)
51 | return fmt.Sprintf("`%s`", s)
52 | }
53 | return fmt.Sprintf("\"%s\"", s)
54 | case reflect.Slice:
55 | elements := make([]string, 0, v.Len())
56 | for i := 0; i < v.Len(); i++ {
57 | elements = append(elements, repr(v.Index(i).Interface()))
58 | }
59 | return fmt.Sprintf("%s{%v}", v.Type(), joinFold(elements, ", ", 80))
60 | case reflect.Map:
61 | elements := make([]string, 0, v.Len())
62 | for _, k := range v.MapKeys() {
63 | elements = append(elements, fmt.Sprintf("%s: %s", repr(k.Interface()), repr(v.MapIndex(k).Interface())))
64 | }
65 | return fmt.Sprintf("%s{\n%s}", v.Type(), strings.Join(elements, ",\n"))
66 | case reflect.Interface:
67 | return repr(v)
68 | default:
69 | return fmt.Sprintf("%v", v)
70 | }
71 | }
72 |
73 | type CodeBuilder struct {
74 | bytes.Buffer
75 | }
76 |
77 | func (cb *CodeBuilder) Package(packageName string) {
78 | cb.WriteString(fmt.Sprintf("package %s\n", packageName))
79 | }
80 |
81 | func (cb *CodeBuilder) DefVariable(name string, value interface{}) {
82 | cb.WriteString(fmt.Sprintf("var %s = %s\n", name, repr(value)))
83 | }
84 |
85 | func (cb *CodeBuilder) ResolveImports() {
86 | out, err := imports.Process("", cb.Bytes(), nil)
87 | if err != nil {
88 | panic(err)
89 | }
90 | cb.Reset()
91 | cb.Write(out)
92 | }
93 |
--------------------------------------------------------------------------------
/code/builder/builder_test.go:
--------------------------------------------------------------------------------
1 | package builder
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/0xrawsec/golang-utils/log"
7 | )
8 |
9 | type Matcher struct {
10 | Level int
11 | Offset int
12 | Type byte
13 | Range int
14 | Mask []byte
15 | Value []byte
16 | Flags int
17 | }
18 |
19 | func init() {
20 | log.InitLogger(log.LDebug)
21 | }
22 |
23 | func NewMatcher() (m Matcher) {
24 | m.Mask = []byte("foo")
25 | m.Value = []byte("bar")
26 | return
27 | }
28 |
29 | func TestMapBuilder1(t *testing.T) {
30 | m := map[string]bool{
31 | "foo": true,
32 | "blop": false}
33 |
34 | cb := CodeBuilder{}
35 | cb.Package("bar")
36 | cb.DefVariable("test", m)
37 | cb.ResolveImports()
38 | t.Log(cb.String())
39 | }
40 |
41 | func TestMapBuilder2(t *testing.T) {
42 | m := map[string][]byte{
43 | "foo": []byte("bar")}
44 | cb := CodeBuilder{}
45 | cb.Package("blop")
46 | cb.DefVariable("test", m)
47 | cb.ResolveImports()
48 | t.Log(cb.String())
49 | }
50 |
51 | func TestStructBuilder(t *testing.T) {
52 | m := NewMatcher()
53 | cb := CodeBuilder{}
54 | cb.Package("foo")
55 | cb.DefVariable("bar", m)
56 | t.Log(cb.String())
57 | //cb.ResolveImports()
58 | }
59 |
--------------------------------------------------------------------------------
/code/packer/packer.go:
--------------------------------------------------------------------------------
1 | package packer
2 |
3 | import (
4 | "bytes"
5 | "compress/gzip"
6 | "fmt"
7 | "io"
8 | "io/ioutil"
9 | "os"
10 | "path/filepath"
11 |
12 | "github.com/0xrawsec/golang-utils/code/builder"
13 | )
14 |
15 | type ErrResourceNotFound struct {
16 | Name string
17 | }
18 |
19 | func (e ErrResourceNotFound) Error() string {
20 | return fmt.Sprintf("Resource %s not found", e.Name)
21 | }
22 |
23 | type Packer map[string][]byte
24 |
25 | func PackReader(reader io.Reader) []byte {
26 | buf := new(bytes.Buffer)
27 | all, err := ioutil.ReadAll(reader)
28 | if err != nil {
29 | panic(err)
30 | }
31 | packer := gzip.NewWriter(buf)
32 | packer.Write(all)
33 | packer.Close()
34 | return buf.Bytes()
35 | }
36 |
37 | func UnpackReader(reader io.Reader) []byte {
38 | unpacker, err := gzip.NewReader(reader)
39 | if err != nil {
40 | panic(err)
41 | }
42 | all, err := ioutil.ReadAll(unpacker)
43 | if err != nil {
44 | panic(err)
45 | }
46 | return all
47 | }
48 |
49 | func (p *Packer) AddResource(name string, data []byte) {
50 | buf := bytes.NewBuffer(data)
51 | (*p)[name] = PackReader(buf)
52 | }
53 |
54 | func (p *Packer) AddResourceReader(name string, reader io.Reader) {
55 | (*p)[name] = PackReader(reader)
56 |
57 | }
58 |
59 | func (p *Packer) GetResource(name string) ([]byte, error) {
60 | if data, ok := (*p)[name]; ok {
61 | buf := bytes.NewBuffer(data)
62 | return UnpackReader(buf), nil
63 | }
64 | return []byte{}, ErrResourceNotFound{name}
65 | }
66 |
67 | func (p *Packer) Dumps(packageName string) []byte {
68 | b := builder.CodeBuilder{}
69 | b.Package(packageName)
70 | b.DefVariable("Resources", *p)
71 | b.ResolveImports()
72 | return b.Bytes()
73 | }
74 |
75 | func (p *Packer) Dump(packageName, outfile string) {
76 | err := os.Mkdir(packageName, 0700)
77 | if !os.IsExist(err) && err != nil {
78 | panic(err)
79 | }
80 | out, err := os.Create(filepath.Join(packageName, outfile))
81 | if err != nil && !os.IsExist(err) {
82 | panic(err)
83 | }
84 | defer out.Close()
85 | out.Write(p.Dumps(packageName))
86 | }
87 |
--------------------------------------------------------------------------------
/code/packer/packer_test.go:
--------------------------------------------------------------------------------
1 | package packer
2 |
3 | import (
4 | "os"
5 | "testing"
6 | )
7 |
8 | var (
9 | dataFile = "/bin/ls"
10 | )
11 |
12 | func TestPacker(t *testing.T) {
13 | file, err := os.Open(dataFile)
14 | if err != nil {
15 | panic(err)
16 | }
17 | defer file.Close()
18 | p := Packer{}
19 | p.AddResourceReader("/bin/ls", file)
20 | p.Dump("resources", "resources.go")
21 | }
22 |
23 | func TestUnpacker(t *testing.T) {
24 | /*d, err := resources.Resources.GetResource(dataFile)
25 | if err != nil {
26 | panic(err)
27 | }
28 | if data.Md5(d) != file.Md5(dataFile) {
29 | t.Fail()
30 | }*/
31 | }
32 |
--------------------------------------------------------------------------------
/config/config.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "encoding/json"
5 | "errors"
6 | "fmt"
7 | "io/ioutil"
8 | "os"
9 | "unsafe"
10 |
11 | "github.com/0xrawsec/golang-utils/log"
12 | )
13 |
14 | // Config : configuration structure definition
15 | type Config map[string]Value
16 |
17 | // Value : stored in the configuration
18 | type Value interface{}
19 |
20 | var (
21 | ErrNoSuchKey = errors.New("No such key")
22 | )
23 |
24 | //////////////////////////////// Utils /////////////////////////////////////////
25 |
26 | func configErrorf(fmt string, i ...interface{}) {
27 | log.Errorf(fmt, i...)
28 | os.Exit(1)
29 | }
30 |
31 | func getRequiredError(key, ofType string, err error) {
32 | configErrorf("Cannot get mandatory parameter %s as %s: %s ", key, ofType, err)
33 | }
34 |
35 | ////////////////////////////////////////////////////////////////////////////////
36 |
37 | // Loads : loads a configuration structure from a data buffer
38 | // @data : buffer containing the configuration object
39 | // return (Config, error) : the Config struct filled from data, error code
40 | func Loads(data []byte) (c Config, err error) {
41 | err = json.Unmarshal(data, &c)
42 | if err != nil {
43 | return
44 | }
45 | return
46 | }
47 |
48 | // Load : loads a configuration structure from a file
49 | // @path : path where the configuration is stored as a json file
50 | // return (Config, error) : the Config struct parsed, error code
51 | func Load(path string) (c Config, err error) {
52 | data, err := ioutil.ReadFile(path)
53 | if err != nil {
54 | return
55 | }
56 | return Loads([]byte(data))
57 | }
58 |
59 | // Dumps : Dumps Config structure into a byte slice
60 | // return ([]byte, error) : byte slice and error code
61 | func (c *Config) Dumps() (dump []byte, err error) {
62 | dump, err = json.Marshal(c)
63 | if err != nil {
64 | return
65 | }
66 | return
67 | }
68 |
69 | // Debug : prints out the configuration in debug information
70 | func (c *Config) Debug() {
71 | for key, val := range *c {
72 | log.Debugf("config[%s] = %v", key, val)
73 | }
74 | }
75 |
76 | // Get : get the Value associated to a key found in Config structure
77 | // return (Value, error) : Value associated to key and error code
78 | func (c *Config) Get(key string) (Value, error) {
79 | val, ok := (*c)[key]
80 | if !ok {
81 | return val, ErrNoSuchKey
82 | }
83 | return val, nil
84 | }
85 |
86 | // GetString gets the value associated to a key as string
87 | // return (string, error)
88 | func (c *Config) GetString(key string) (string, error) {
89 | val, ok := (*c)[key]
90 | if !ok {
91 | return "", ErrNoSuchKey
92 | }
93 | s, ok := val.(string)
94 | if !ok {
95 | return s, fmt.Errorf("Wrong type for %s (Type:%T Expecting:%T)", key, val, s)
96 | }
97 | return val.(string), nil
98 | }
99 |
100 | // GetInt64 gets the value associated to a key as int64
101 | // return (int64, error)
102 | func (c *Config) GetInt64(key string) (i int64, err error) {
103 | val, ok := (*c)[key]
104 | if !ok {
105 | return 0, ErrNoSuchKey
106 | }
107 | switch val.(type) {
108 | case int8:
109 | return int64(val.(int8)), nil
110 | case int16:
111 | return int64(val.(int16)), nil
112 | case int:
113 | return int64(val.(int)), nil
114 | case int32:
115 | return int64(val.(int32)), nil
116 | case int64:
117 | return val.(int64), nil
118 | case float64:
119 | // json loads float64 so handle that case
120 | return int64(val.(float64)), nil
121 | default:
122 | return 0, fmt.Errorf("Wrong type for %s (Type:%T Expecting:%T)", key, val, i)
123 | }
124 | }
125 |
126 | // GetUint64 gets the value associated to a key as uint64
127 | // return (uint64, error)
128 | func (c *Config) GetUint64(key string) (u uint64, err error) {
129 | val, ok := (*c)[key]
130 | if !ok {
131 | return 0, ErrNoSuchKey
132 | }
133 | switch val.(type) {
134 | case uint8:
135 | return uint64(val.(uint8)), nil
136 | case uint16:
137 | return uint64(val.(uint16)), nil
138 | case uint32:
139 | return uint64(val.(uint32)), nil
140 | case uint:
141 | return uint64(val.(uint)), nil
142 | case uint64:
143 | return val.(uint64), nil
144 | case float64:
145 | // json loads float64 so handle that case
146 | return uint64(val.(float64)), nil
147 | default:
148 | return 0, fmt.Errorf("Wrong type for %s (Type:%T Expecting:%T)", key, val, u)
149 | }
150 | }
151 |
152 | // GetSubConfig : get a subconfig referenced by key
153 | // return (Config, error)
154 | func (c *Config) GetSubConfig(key string) (Config, error) {
155 | val, err := c.Get(key)
156 | if err != nil {
157 | return Config{}, err
158 | }
159 | sc, ok := val.(map[string]interface{})
160 | if !ok {
161 | return nil, fmt.Errorf("Wrong type for %s (Type:%T Expecting:%T)", key, val, sc)
162 | }
163 | return *(*Config)(unsafe.Pointer(&(sc))), nil
164 | }
165 |
166 | // GetRequiredSubConfig : get a subconfig referenced by key
167 | // return (Config)
168 | func (c *Config) GetRequiredSubConfig(key string) Config {
169 | sc, err := c.GetSubConfig(key)
170 | if err != nil {
171 | getRequiredError(key, "map[string]interface{}", err)
172 | }
173 | return sc
174 | }
175 |
176 | // GetRequired : get the Value associated to a key found in Config structure and exit if
177 | // not available
178 | // return (Value) : Value associated to key if it exists
179 | func (c *Config) GetRequired(key string) Value {
180 | val, err := c.Get(key)
181 | if err != nil {
182 | configErrorf("Configuration parameter %s is mandatory", key)
183 | }
184 | return val
185 | }
186 |
187 | func (c *Config) GetRequiredString(key string) string {
188 | s, err := c.GetString(key)
189 | if err != nil {
190 | getRequiredError(key, "string", err)
191 | }
192 | return s
193 | }
194 |
195 | func (c *Config) GetRequiredInt64(key string) int64 {
196 | val, err := c.GetInt64(key)
197 | if err != nil {
198 | getRequiredError(key, "int64", err)
199 | }
200 | return val
201 | }
202 |
203 | func (c *Config) GetRequiredUint64(key string) uint64 {
204 | val, err := c.GetUint64(key)
205 | if err != nil {
206 | getRequiredError(key, "uint64", err)
207 | }
208 | return val
209 | }
210 |
211 | func (c *Config) GetStringSlice(key string) (s []string, err error) {
212 | s = make([]string, 0)
213 | val, err := c.Get(key)
214 | if err != nil {
215 | return
216 | }
217 | ival, ok := val.([]interface{})
218 | if !ok {
219 | return s, fmt.Errorf("Wrong type for %s (Type:%T Expecting:%T)", key, val, []interface{}{})
220 | }
221 | for _, e := range ival {
222 | s = append(s, e.(string))
223 | }
224 | return
225 | }
226 |
227 | func (c *Config) GetRequiredStringSlice(key string) []string {
228 | ss, err := c.GetStringSlice(key)
229 | if err != nil {
230 | getRequiredError(key, "[]string", err)
231 | }
232 | return ss
233 | }
234 |
235 | func (c *Config) GetUint64Slice(key string) (u []uint64, err error) {
236 | u = make([]uint64, 0)
237 | val, err := c.Get(key)
238 | if err != nil {
239 | return
240 | }
241 | ival, ok := val.([]interface{})
242 | if !ok {
243 | return u, fmt.Errorf("Wrong type for %s (Type:%T Expecting:%T)", key, val, []interface{}{})
244 | }
245 | for _, e := range ival {
246 | u = append(u, e.(uint64))
247 | }
248 | return
249 | }
250 |
251 | func (c *Config) GetRequiredUint64Slice(key string) []uint64 {
252 | val, err := c.GetUint64Slice(key)
253 | if err != nil {
254 | getRequiredError(key, "[]uint64", err)
255 | }
256 | return val
257 | }
258 |
259 | func (c *Config) GetInt64Slice(key string) (i []int64, err error) {
260 | i = make([]int64, 0)
261 | val, err := c.Get(key)
262 | if err != nil {
263 | return
264 | }
265 | ival, ok := val.([]interface{})
266 | if !ok {
267 | return i, fmt.Errorf("Wrong type for %s (Type:%T Expecting:%T)", key, val, []interface{}{})
268 | }
269 | for _, e := range ival {
270 | i = append(i, e.(int64))
271 | }
272 | return
273 | }
274 |
275 | func (c *Config) GetRequiredInt64Slice(key string) []int64 {
276 | val, err := c.GetInt64Slice(key)
277 | if err != nil {
278 | getRequiredError(key, "[]int64", err)
279 | }
280 | return val
281 | }
282 |
283 | // Set : set parameter identified by key of the Config struct with a Value
284 | func (c *Config) Set(key string, value interface{}) {
285 | (*c)[key] = value
286 | }
287 |
288 | // HasKey returns true if the configuration has the given key
289 | func (c *Config) HasKey(key string) bool {
290 | _, ok := (*c)[key]
291 | return ok
292 | }
293 |
--------------------------------------------------------------------------------
/config/config_test.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | var (
8 | configpath = "./test/config.json"
9 | conf = Config{
10 | "test": "foo",
11 | "foobar": 64,
12 | "array": []string{"this", "is", "an", "array"}}
13 | )
14 |
15 | func TestDumps(t *testing.T) {
16 | dumps, err := conf.Dumps()
17 | if err != nil {
18 | t.Error(err)
19 | }
20 | t.Log(string(dumps))
21 | }
22 |
23 | func TestLoads(t *testing.T) {
24 | dumps, err := conf.Dumps()
25 | if err != nil {
26 | t.Error(err)
27 | }
28 | loaded, err := Loads(dumps)
29 | if err != nil {
30 | t.Error(err)
31 | }
32 | t.Log(loaded)
33 | }
34 |
35 | func TestSetGet(t *testing.T) {
36 | conf.Set("Foo", map[string]string{"Foo": "bar"})
37 | val, err := conf.Get("Foo")
38 | if err == nil {
39 | t.Logf("%T, %[1]v", val)
40 | }
41 | }
42 |
43 | func TestAll(t *testing.T) {
44 | conf.Set("Foo", map[string]string{"foo": "bar"})
45 | dumps, err := conf.Dumps()
46 | if err != nil {
47 | t.Error(err)
48 | }
49 | loaded, err := Loads(dumps)
50 | if err != nil {
51 | t.Error(err)
52 | }
53 | t.Log(loaded)
54 | val, err := loaded.Get("Foo")
55 | if err == nil {
56 | t.Logf("%T, %[1]v", val)
57 | foo := val.(map[string]interface{})["foo"]
58 | t.Logf("%T, %[1]v", foo)
59 | }
60 | }
61 |
62 | func TestLoadJson(t *testing.T) {
63 | c, err := Load(configpath)
64 | if err != nil {
65 | panic(err)
66 | }
67 | m := c.GetRequiredSubConfig("misp")
68 | t.Log(m)
69 | l := c.GetRequiredSubConfig("log-search")
70 | t.Log(l)
71 | s := c.GetRequiredStringSlice("notification-recipients")
72 | t.Log(s)
73 |
74 | }
75 |
--------------------------------------------------------------------------------
/config/test/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "misp":{
3 | "protocol" :"https",
4 | "host" : "10.12.23.43",
5 | "api-key" :"foobar",
6 | "api-url" : "/foo/bar"
7 | },
8 | "log-search": {
9 | "protocol":"https",
10 | "host":"135.111.155.258:5874"
11 | },
12 | "notifier-email": "foo@test.com",
13 | "notification-recipients": [
14 | "foo@bar.com"
15 | ]
16 | }
17 |
--------------------------------------------------------------------------------
/crypto/check/hash.go:
--------------------------------------------------------------------------------
1 | package check
2 |
3 | import "regexp"
4 |
5 | var (
6 | MD5ValRegexp = regexp.MustCompile("^(?i)([0-9a-h]{32})$")
7 | SHA1ValRegexp = regexp.MustCompile("^(?i)([0-9a-h]{40})$")
8 | SHA256ValRegexp = regexp.MustCompile("^(?i)([0-9a-h]{64})$")
9 | SHA512ValRegexp = regexp.MustCompile("^(?i)([0-9a-h]{128})$")
10 | )
11 |
12 | func IsMD5(hash string) bool {
13 | return MD5ValRegexp.MatchString(hash)
14 | }
15 |
16 | func IsSHA1(hash string) bool {
17 | return SHA1ValRegexp.MatchString(hash)
18 | }
19 |
20 | func IsSHA256(hash string) bool {
21 | return SHA256ValRegexp.MatchString(hash)
22 | }
23 |
24 | func IsSHA512(hash string) bool {
25 | return SHA512ValRegexp.MatchString(hash)
26 | }
27 |
28 | func IsValidHash(hash string) bool {
29 | switch {
30 | case IsMD5(hash), IsSHA1(hash), IsSHA256(hash), IsSHA512(hash):
31 | return true
32 | default:
33 | return false
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/crypto/check/hash_test.go:
--------------------------------------------------------------------------------
1 | package check
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | var (
8 | md5 = "68b329da9893e34099c7d8ad5cb9c940"
9 | sha1 = "adc83b19e793491b1c6ea0fd8b46cd9f32e592fc"
10 | sha256 = "01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b"
11 | sha512 = "be688838ca8686e5c90689bf2ab585cef1137c999b48c70b92f67a5c34dc15697b5d11c982ed6d71be1e1e7f7b4e0733884aa97c3f7a339a8ed03577cf74be09"
12 | )
13 |
14 | func TestHashVerification(t *testing.T) {
15 | switch {
16 | case !IsValidHash(md5):
17 | t.Log("MD5 not valid")
18 | t.Fail()
19 | case !IsValidHash(sha1):
20 | t.Log("SHA1 not valid")
21 | t.Fail()
22 | case !IsValidHash(sha256):
23 | t.Log("SHA256 not valid")
24 | t.Fail()
25 | case !IsValidHash(sha512):
26 | t.Log("SHA512 not valid")
27 | t.Fail()
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/crypto/data/data.go:
--------------------------------------------------------------------------------
1 | package data
2 |
3 | import (
4 | "crypto/md5"
5 | "crypto/sha1"
6 | "crypto/sha256"
7 | "crypto/sha512"
8 | "encoding/hex"
9 | )
10 |
11 | // Md5 returns the md5 sum of data
12 | func Md5(data []byte) string {
13 | md5 := md5.New()
14 | md5.Write(data)
15 | return hex.EncodeToString(md5.Sum(nil))
16 | }
17 |
18 | // Sha1 returns the sha1 sum of data
19 | func Sha1(data []byte) string {
20 | sha1 := sha1.New()
21 | sha1.Write(data)
22 | return hex.EncodeToString(sha1.Sum(nil))
23 | }
24 |
25 | // Sha256 returns the sha256 sum of data
26 | func Sha256(data []byte) string {
27 | sha256 := sha256.New()
28 | sha256.Write(data)
29 | return hex.EncodeToString(sha256.Sum(nil))
30 | }
31 |
32 | // Sha512 returns the sha512 sum of data
33 | func Sha512(data []byte) string {
34 | sha512 := sha512.New()
35 | sha512.Write(data)
36 | return hex.EncodeToString(sha512.Sum(nil))
37 | }
38 |
--------------------------------------------------------------------------------
/crypto/file/file.go:
--------------------------------------------------------------------------------
1 | package file
2 |
3 | import (
4 | "crypto/md5"
5 | "crypto/sha1"
6 | "crypto/sha256"
7 | "crypto/sha512"
8 | "encoding/hex"
9 | "io"
10 | "os"
11 | )
12 |
13 | // Md5 return the md5 sum of a file
14 | func Md5(path string) (string, error) {
15 | var buffer [4096]byte
16 | file, err := os.Open(path)
17 | if err != nil {
18 | return "", err
19 | }
20 | defer file.Close()
21 | md5 := md5.New()
22 |
23 | for read, err := file.Read(buffer[:]); err != io.EOF && read != 0; read, err = file.Read(buffer[:]) {
24 | if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
25 | return "", err
26 | }
27 | md5.Write(buffer[:read])
28 | }
29 |
30 | return hex.EncodeToString(md5.Sum(nil)), nil
31 | }
32 |
33 | // Sha1 return the sha1 sum of a file
34 | func Sha1(path string) (string, error) {
35 | var buffer [4096]byte
36 | file, err := os.Open(path)
37 | if err != nil {
38 | return "", err
39 | }
40 | defer file.Close()
41 | sha1 := sha1.New()
42 |
43 | for read, err := file.Read(buffer[:]); err != io.EOF && read != 0; read, err = file.Read(buffer[:]) {
44 | if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
45 | return "", err
46 | }
47 | sha1.Write(buffer[:read])
48 | }
49 |
50 | return hex.EncodeToString(sha1.Sum(nil)), nil
51 | }
52 |
53 | // Sha256 return the sha256 sum of a file
54 | func Sha256(path string) (string, error) {
55 | var buffer [4096]byte
56 | file, err := os.Open(path)
57 | if err != nil {
58 | return "", err
59 | }
60 | defer file.Close()
61 | sha256 := sha256.New()
62 |
63 | for read, err := file.Read(buffer[:]); err != io.EOF && read != 0; read, err = file.Read(buffer[:]) {
64 | if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
65 | return "", err
66 | }
67 | sha256.Write(buffer[:read])
68 | }
69 |
70 | return hex.EncodeToString(sha256.Sum(nil)), nil
71 | }
72 |
73 | // Sha512 return the sha512 sum of a file
74 | func Sha512(path string) (string, error) {
75 | var buffer [4096]byte
76 | file, err := os.Open(path)
77 | if err != nil {
78 | return "", err
79 | }
80 | defer file.Close()
81 | sha512 := sha512.New()
82 |
83 | for read, err := file.Read(buffer[:]); err != io.EOF && read != 0; read, err = file.Read(buffer[:]) {
84 | if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
85 | return "", err
86 | }
87 | sha512.Write(buffer[:read])
88 | }
89 |
90 | return hex.EncodeToString(sha512.Sum(nil)), nil
91 | }
92 |
--------------------------------------------------------------------------------
/datastructs/bitset.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | // BitSet structure definition
4 | type BitSet struct {
5 | size int
6 | set []uint8
7 | }
8 |
9 | // NewBitSet creates a new bitset
10 | func NewBitSet(size int) (bs *BitSet) {
11 | bs = &BitSet{}
12 | bs.size = size
13 | if size%8 == 0 {
14 | bs.set = make([]uint8, size/8)
15 | } else {
16 | bs.set = make([]uint8, (size/8)+1)
17 | }
18 | return
19 | }
20 |
21 | // Set bit at offset o
22 | func (b *BitSet) Set(o int) {
23 | bucketID := o / 8
24 | oInBucket := uint8(o % 8)
25 | if o >= b.size {
26 | return
27 | }
28 | b.set[bucketID] = (b.set[bucketID] | 0x1<= b.size {
36 | return false
37 | }
38 | return (b.set[bucketID]&(0x1<>oInBucket == 0x1
39 | }
40 |
41 | // Len returns the length of the BitSet
42 | func (b *BitSet) Len() int {
43 | return b.size
44 | }
45 |
--------------------------------------------------------------------------------
/datastructs/bitset_test.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | import (
4 | "math/rand"
5 | "testing"
6 | )
7 |
8 | func TestBitSetBasic(t *testing.T) {
9 | bs := NewBitSet(255)
10 | offset := 10
11 |
12 | bs.Set(offset)
13 | if !bs.Get(offset) {
14 | t.Logf("Failed to retrieve bit at offset: %d", offset)
15 | t.FailNow()
16 | }
17 | }
18 |
19 | func TestBitSetRookie(t *testing.T) {
20 | bs := NewBitSet(1013)
21 | for i := 0; i < bs.Len(); i++ {
22 | if i%2 == 0 {
23 | bs.Set(i)
24 | }
25 | }
26 |
27 | for i := 0; i < bs.Len(); i++ {
28 | if i%2 == 0 {
29 | if !bs.Get(i) {
30 | t.Logf("Failed to retrieve bit at offset: %d", i)
31 | t.FailNow()
32 | }
33 | }
34 | }
35 | }
36 |
37 | func TestBitSetHardcore(t *testing.T) {
38 | for i := 0; i < 10000; i++ {
39 | size := rand.Uint32() % (10 * 1024)
40 | bs := NewBitSet(int(size))
41 | for i := 0; i < bs.Len(); i++ {
42 | if i%2 == 0 {
43 | bs.Set(i)
44 | }
45 | }
46 |
47 | for i := 0; i < bs.Len(); i++ {
48 | if i%2 == 0 {
49 | if !bs.Get(i) {
50 | t.Logf("Failed to retrieve bit at offset: %d", i)
51 | t.FailNow()
52 | }
53 | }
54 | }
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/datastructs/datastructs.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | type Hashable interface {
4 | Hash() string
5 | }
6 |
--------------------------------------------------------------------------------
/datastructs/fifo.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 | "sync"
7 | )
8 |
9 | type Element struct {
10 | Value interface{}
11 | Prev *Element
12 | Next *Element
13 | }
14 |
15 | func (e *Element) String() string {
16 | return fmt.Sprintf("(%T(%v), %p, %p)", e.Value, e.Value, e.Prev, e.Next)
17 | }
18 |
19 | type Fifo struct {
20 | sync.RWMutex
21 | e *Element
22 | last *Element
23 | size int
24 | }
25 |
26 | func (f *Fifo) Push(i interface{}) {
27 | f.Lock()
28 | defer f.Unlock()
29 | e := Element{Value: i}
30 | if f.e == nil {
31 | f.e = &e
32 | f.last = &e
33 | } else {
34 | e.Next = f.e
35 | f.e.Prev = &e
36 | f.e = &e
37 | }
38 | f.size++
39 | }
40 |
41 | func (f *Fifo) String() string {
42 | f.RLock()
43 | defer f.RUnlock()
44 | out := make([]string, 0)
45 | for e := f.e; e != nil; e = e.Next {
46 | out = append(out, e.String())
47 | }
48 | return strings.Join(out, "->")
49 | }
50 |
51 | func (f *Fifo) Empty() bool {
52 | f.RLock()
53 | defer f.RUnlock()
54 | return f.size == 0
55 | }
56 |
57 | func (f *Fifo) Pop() *Element {
58 | f.Lock()
59 | defer f.Unlock()
60 | if f.last == nil {
61 | return nil
62 | }
63 |
64 | popped := f.last
65 | f.last = f.last.Prev
66 | if f.last != nil {
67 | f.last.Next = nil
68 | }
69 | // we have to nil out f.e if we pop
70 | // the last element of the Fifo
71 | if f.size == 1 {
72 | f.e = nil
73 | }
74 | f.size--
75 | return popped
76 | }
77 |
78 | func (f *Fifo) Len() int {
79 | f.RLock()
80 | defer f.RUnlock()
81 | return f.size
82 | }
83 |
--------------------------------------------------------------------------------
/datastructs/fifo_test.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | import "testing"
4 |
5 | func TestFifoBasic(t *testing.T) {
6 | f := &Fifo{}
7 | for i := 0; i < 10; i++ {
8 | f.Push(i)
9 | t.Logf("Size: %d", f.Len())
10 | }
11 | for p := f.Pop(); p != nil; p = f.Pop() {
12 | t.Logf("popped: %s", p)
13 | t.Logf("Size: %d", f.Len())
14 | }
15 | // second loops
16 | for i := 0; i < 10; i++ {
17 | f.Push(i)
18 | t.Logf("Size: %d", f.Len())
19 | }
20 |
21 | t.Logf("Fifo: %s", f)
22 | for p := f.Pop(); p != nil; p = f.Pop() {
23 | t.Logf("popped: %s", p)
24 | t.Logf("Size: %d", f.Len())
25 | }
26 |
27 | t.Logf("Size: %d", f.Len())
28 | if !f.Empty() {
29 | t.Error("Fifo should be empty")
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/datastructs/hashmap.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | import "sync"
4 |
5 | type HashMap struct {
6 | keys map[string]Hashable
7 | values map[string]interface{}
8 | }
9 |
10 | type Item struct {
11 | Key Hashable
12 | Value interface{}
13 | }
14 |
15 | func NewHashMap() (hm *HashMap) {
16 | return &HashMap{
17 | make(map[string]Hashable),
18 | make(map[string]interface{}),
19 | }
20 | }
21 |
22 | // Contains returns true if the HashMap contains element referenced by key
23 | func (hm *HashMap) Contains(h Hashable) bool {
24 | if _, ok := (*hm).keys[h.Hash()]; ok {
25 | return ok
26 | }
27 | return false
28 | }
29 |
30 | // Get the element referenced by key in the HashMap
31 | func (hm *HashMap) Get(h Hashable) (interface{}, bool) {
32 | if _, ok := (*hm).keys[h.Hash()]; ok {
33 | v, ok := (*hm).values[h.Hash()]
34 | return v, ok
35 | }
36 | return nil, false
37 | }
38 |
39 | // Add sets key, value in the map
40 | func (hm *HashMap) Add(key Hashable, value interface{}) {
41 | (*hm).keys[key.Hash()] = key
42 | (*hm).values[key.Hash()] = value
43 | }
44 |
45 | // Del deletes the key and its associated value
46 | func (hm *HashMap) Del(key Hashable) {
47 | delete((*hm).keys, key.Hash())
48 | delete((*hm).values, key.Hash())
49 | }
50 |
51 | // Keys returns a channel of Keys used by the HashMap
52 | func (hm *HashMap) Keys() (ch chan Hashable) {
53 | ch = make(chan Hashable)
54 | go func() {
55 | defer close(ch)
56 | for _, v := range hm.keys {
57 | ch <- v
58 | }
59 | }()
60 | return
61 | }
62 |
63 | // Values returns a channel of Values contained in the HashMap
64 | func (hm *HashMap) Values() (ci chan interface{}) {
65 | ci = make(chan interface{})
66 | go func() {
67 | defer close(ci)
68 | for _, v := range hm.values {
69 | ci <- v
70 | }
71 | }()
72 | return
73 | }
74 |
75 | // Items returns a channel of Item contained in the HashMap
76 | func (hm *HashMap) Items() (ci chan Item) {
77 | ci = make(chan Item)
78 | go func() {
79 | defer close(ci)
80 | for k := range hm.keys {
81 | i := Item{(*hm).keys[k], (*hm).values[k]}
82 | ci <- i
83 | }
84 | }()
85 | return
86 | }
87 |
88 | // Len returns the length of the HashMap
89 | func (hm *HashMap) Len() int {
90 | return len(hm.keys)
91 | }
92 |
93 | // SyncedHashMap is a thread safe HashMap
94 | type SyncedHashMap struct {
95 | sync.RWMutex
96 | m *HashMap
97 | }
98 |
99 | // NewSyncedHashMap SyncedHashMap constructor
100 | func NewSyncedHashMap() (hm *SyncedHashMap) {
101 | hm = &SyncedHashMap{m: NewHashMap()}
102 | return hm
103 | }
104 |
105 | // Contains returns true if the HashMap contains element referenced by key
106 | func (hm *SyncedHashMap) Contains(key Hashable) bool {
107 | hm.RLock()
108 | defer hm.RUnlock()
109 | return hm.m.Contains(key)
110 | }
111 |
112 | // Get the element referenced by key in the HashMap
113 | func (hm *SyncedHashMap) Get(key Hashable) (interface{}, bool) {
114 | hm.RLock()
115 | defer hm.RUnlock()
116 | return hm.m.Get(key)
117 | }
118 |
119 | // Add sets key, value in the map
120 | func (hm *SyncedHashMap) Add(key Hashable, value interface{}) {
121 | hm.Lock()
122 | defer hm.Unlock()
123 | hm.m.Add(key, value)
124 | }
125 |
126 | // Del deletes the key and its associated value
127 | func (hm *SyncedHashMap) Del(key Hashable) {
128 | hm.Lock()
129 | defer hm.Unlock()
130 | hm.m.Del(key)
131 | }
132 |
133 | // Keys returns a channel of Keys used by the HashMap
134 | func (hm *SyncedHashMap) Keys() (ch chan Hashable) {
135 | hm.RLock()
136 | defer hm.RUnlock()
137 | return hm.m.Keys()
138 | }
139 |
140 | // Values returns a channel of Values contained in the HashMap
141 | func (hm *SyncedHashMap) Values() (ci chan interface{}) {
142 | hm.RLock()
143 | defer hm.RUnlock()
144 | return hm.m.Values()
145 | }
146 |
147 | // Items returns a channel of Item contained in the HashMap
148 | func (hm *SyncedHashMap) Items() (ci chan Item) {
149 | hm.RLock()
150 | defer hm.RUnlock()
151 | return hm.m.Items()
152 | }
153 |
154 | // Len returns the length of the HashMap
155 | func (hm *SyncedHashMap) Len() int {
156 | hm.RLock()
157 | defer hm.RUnlock()
158 | return hm.m.Len()
159 | }
160 |
--------------------------------------------------------------------------------
/datastructs/hashmap_test.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | import (
4 | "fmt"
5 | "math/rand"
6 | "testing"
7 | )
8 |
9 | type IntHashable int
10 |
11 | func (i IntHashable) Hash() string {
12 | return fmt.Sprintf("%d", i)
13 | }
14 |
15 | type TestHashable struct {
16 | ID string
17 | Map map[string]interface{}
18 | }
19 |
20 | func (t TestHashable) Hash() string {
21 | return t.ID
22 | }
23 |
24 | func TestBasicHashMap(t *testing.T) {
25 | hm := NewSyncedHashMap()
26 | altered := TestHashable{"altered", make(map[string]interface{})}
27 | hm.Add(TestHashable{"it", make(map[string]interface{})}, "blop")
28 | hm.Add(TestHashable{"works", make(map[string]interface{})}, 42.0)
29 | hm.Add(TestHashable{"very", make(map[string]interface{})}, int64(42))
30 | hm.Add(TestHashable{"nice", make(map[string]interface{})}, uint(2))
31 | t.Log("Printing values refered by keys")
32 | for k := range hm.Keys() {
33 | t.Log(hm.Get(k))
34 | k = altered
35 | }
36 |
37 | t.Log("Look for altered key")
38 | if hm.Contains(altered) {
39 | t.Log("Keys are modifiable and it is not good")
40 | t.Fail()
41 | }
42 |
43 | t.Log("Printing only values")
44 | for v := range hm.Values() {
45 | t.Log(v)
46 | }
47 | }
48 |
49 | func TestStressHashMap(t *testing.T) {
50 | size := 1000
51 | hm := NewSyncedHashMap()
52 |
53 | for i := 0; i < size; i++ {
54 | hm.Add(IntHashable(i), i)
55 | }
56 |
57 | if hm.Len() != size {
58 | t.Error("Hashmap has wrong size")
59 | }
60 |
61 | del := 0
62 | for item := range hm.Items() {
63 | if item.Key.(IntHashable) != IntHashable(item.Value.(int)) {
64 | t.Error("Wrong item")
65 | }
66 | // deleting item
67 | if rand.Int()%2 == 0 {
68 | hm.Del(item.Key)
69 | del++
70 | }
71 | }
72 |
73 | if hm.Len() != size-del {
74 | t.Error("Hashmap has wrong size after deletions")
75 | }
76 |
77 | }
78 |
--------------------------------------------------------------------------------
/datastructs/rings.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | )
7 |
8 | type RingSet struct {
9 | rslice *RingSlice
10 | set *Set
11 | }
12 |
13 | func NewRingSet(len int) *RingSet {
14 | rs := RingSet{NewRingSlice(len), NewSet()}
15 | return &rs
16 | }
17 |
18 | func (r RingSet) String() string {
19 | return r.rslice.String()
20 | }
21 |
22 | func (r *RingSet) Contains(item ...interface{}) bool {
23 | return r.set.Contains(item...)
24 | }
25 |
26 | func (r *RingSet) Add(item interface{}) {
27 | // we add item only if not already there
28 | if !r.Contains(item) {
29 | // we delete item only if RingSet is full
30 | if r.rslice.full {
31 | // delete the item which is going to be erased
32 | r.set.Del(r.rslice.GetItem(r.rslice.cursor))
33 | }
34 | r.rslice.Add(item)
35 | r.set.Add(item)
36 | }
37 | }
38 |
39 | func (r *RingSet) Len() int {
40 | return r.rslice.Len()
41 | }
42 |
43 | func (r *RingSet) GetItem(i int) interface{} {
44 | return r.rslice.GetItem(i)
45 | }
46 |
47 | func (r *RingSet) SetItem(i int, item interface{}) {
48 | r.rslice.SetItem(i, item)
49 | r.set.Add(item)
50 | }
51 |
52 | func (r *RingSet) Slice() []interface{} {
53 | return r.rslice.Slice()
54 | }
55 |
56 | func (r *RingSet) Copy() *RingSet {
57 | new := NewRingSet(r.Len())
58 | new.rslice = r.rslice.Copy()
59 | new.set = r.set.Copy()
60 | return new
61 | }
62 |
63 | func (r *RingSet) RingSlice() *RingSlice {
64 | return r.rslice.Copy()
65 | }
66 |
67 | func (r *RingSet) Set() *Set {
68 | return r.set.Copy()
69 | }
70 |
71 | func (r *RingSet) UnmarshalJSON(data []byte) (err error) {
72 | if err = json.Unmarshal(data, &r.rslice); err != nil {
73 | return
74 | }
75 | r.set = NewInitSet(r.rslice.Slice()...)
76 | return
77 | }
78 |
79 | func (r *RingSet) MarshalJSON() ([]byte, error) {
80 | return json.Marshal(&(r.rslice))
81 | }
82 |
83 | type RingSlice struct {
84 | ring []interface{}
85 | cursor int
86 | full bool
87 | }
88 |
89 | func NewRingSlice(len int) *RingSlice {
90 | return &RingSlice{make([]interface{}, len), 0, false}
91 | }
92 |
93 | func (r *RingSlice) incCursor() {
94 | r.cursor = r.nextCursor()
95 | if r.cursor == 0 {
96 | r.full = true
97 | }
98 | }
99 |
100 | func (r *RingSlice) nextCursor() int {
101 | if r.cursor < len(r.ring)-1 {
102 | return r.cursor + 1
103 | }
104 | return 0
105 | }
106 |
107 | func (r RingSlice) String() string {
108 | return fmt.Sprintf("%v", r.ring)
109 | }
110 |
111 | func (r *RingSlice) Add(item interface{}) {
112 | if r.cursor < len(r.ring) {
113 | r.ring[r.cursor] = item
114 | r.incCursor()
115 | }
116 | }
117 |
118 | func (r *RingSlice) Len() int {
119 | return len(r.ring)
120 | }
121 |
122 | func (r *RingSlice) GetItem(i int) interface{} {
123 | return r.ring[i]
124 | }
125 |
126 | func (r *RingSlice) SetItem(i int, item interface{}) {
127 | r.ring[i] = item
128 | }
129 |
130 | func (r *RingSlice) Slice() []interface{} {
131 | l := make([]interface{}, len(r.ring))
132 | copy(l, r.ring)
133 | return l
134 | }
135 |
136 | func (r *RingSlice) Copy() *RingSlice {
137 | new := NewRingSlice(r.Len())
138 | copy(new.ring, r.ring)
139 | new.cursor = r.cursor
140 | return new
141 | }
142 |
143 | func (r *RingSlice) UnmarshalJSON(data []byte) (err error) {
144 | r.cursor = 0
145 | return json.Unmarshal(data, &r.ring)
146 | }
147 |
148 | func (r *RingSlice) MarshalJSON() ([]byte, error) {
149 | s := make([]interface{}, len(r.ring))
150 | if len(s) > 1 {
151 | p1 := r.ring[r.cursor:len(r.ring)]
152 | p2 := r.ring[0:r.cursor]
153 | copy(s, p1)
154 | copy(s[len(p1):], p2)
155 | } else {
156 | copy(s, r.ring)
157 | }
158 | return json.Marshal(s)
159 | }
160 |
--------------------------------------------------------------------------------
/datastructs/rings_test.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | import (
4 | "encoding/json"
5 | "math/rand"
6 | "testing"
7 | )
8 |
9 | func TestRingSlice(t *testing.T) {
10 | r := NewRingSlice(10)
11 | for i := 0; i < 11; i++ {
12 | r.Add(i)
13 | }
14 | if r.GetItem(0).(int) != 10 {
15 | t.Error("Bad item at index 0")
16 | }
17 | if r.GetItem(r.Len()-1) != 9 {
18 | t.Error("Bad last item 0")
19 | }
20 | t.Log(r)
21 | }
22 |
23 | func TestRingSliceJSON(t *testing.T) {
24 | r := NewRingSlice(10)
25 | for i := 0; i < 11; i++ {
26 | r.Add(i)
27 | }
28 |
29 | b, err := json.Marshal(&r)
30 | if err != nil {
31 | t.Errorf("JSON marshalling failed")
32 | } else {
33 | t.Log(string(b))
34 | }
35 |
36 | r = NewRingSlice(0)
37 | if err := json.Unmarshal(b, &r); err != nil {
38 | t.Errorf("JSON unmarshalling failed")
39 | }
40 | // items have been reordered by json marshaling
41 | // and casted to another type float64
42 | if r.GetItem(0).(float64) != 1 {
43 | t.Error("Bad item at index 0")
44 | }
45 | if r.GetItem(r.Len()-1).(float64) != 10 {
46 | t.Error("Bad last item 0")
47 | }
48 | t.Log(r)
49 | }
50 |
51 | func TestRingSliceEmpty(t *testing.T) {
52 | for size := 0; size < 2; size++ {
53 | r := NewRingSlice(size)
54 | for i := 0; i < 100; i++ {
55 | r.Add(i)
56 | }
57 |
58 | b, err := json.Marshal(&r)
59 | if err != nil {
60 | t.Errorf("JSON marshalling failed")
61 | } else {
62 | t.Log(string(b))
63 | }
64 |
65 | if err := json.Unmarshal(b, &r); err != nil {
66 | t.Errorf("JSON unmarshalling failed")
67 | }
68 | t.Log(r)
69 | }
70 | }
71 |
72 | func TestRingSet(t *testing.T) {
73 | r := NewRingSet(10)
74 | for i := 0; i < 100; i++ {
75 | if r.Contains(i) {
76 | t.Errorf("RingSet should not contain value before being added: %d", i)
77 | }
78 | r.Add(i)
79 | if !r.Contains(i) {
80 | t.Errorf("RingSet should contain value just after being added: %d", i)
81 | }
82 | }
83 | t.Log(r)
84 |
85 | // we do some modifications on copies that should trigger error
86 | // if we modify original structures
87 | rs := r.RingSlice()
88 | set := r.Set()
89 | for i := 0; i < 10; i++ {
90 | rs.Add(i)
91 | set.Add(i)
92 | }
93 |
94 | for i := 0; i < 100; i++ {
95 | if i < 90 {
96 | // ring set should not contain those values
97 | if r.Contains(i) {
98 | t.Errorf("RingSet should not contain value: %d", i)
99 | }
100 | } else {
101 | if !r.Contains(i) {
102 | t.Errorf("RingSet should contain value: %d", i)
103 | }
104 | }
105 | }
106 |
107 | if r.rslice.Len() != r.set.Len() {
108 | t.Errorf("RingSlice and Set must have the same size")
109 | }
110 |
111 | if r.set.Len() != r.Len() {
112 | t.Errorf("Inconsistent size")
113 | }
114 |
115 | b, err := json.Marshal(&r)
116 | if err != nil {
117 | t.Errorf("Failed to marshal RingSet: %s", err)
118 | t.FailNow()
119 | }
120 | t.Log(string(b))
121 |
122 | new := NewRingSet(0)
123 | if err = json.Unmarshal(b, &new); err != nil {
124 | t.Errorf("Failed to unmarshall RingSet: %s", err)
125 | t.FailNow()
126 | }
127 | t.Log(new)
128 |
129 | // json unmarshal integers as float64
130 | for i := float64(0); i < 100; i++ {
131 | if i < 90 {
132 | // ring set should not contain those values
133 | if new.Contains(i) {
134 | t.Errorf("RingSet should not contain value: %f", i)
135 | }
136 | } else {
137 | if !new.Contains(i) {
138 | t.Errorf("RingSet should contain value: %f", i)
139 | }
140 | }
141 | }
142 |
143 | if new.rslice.Len() != new.set.Len() {
144 | t.Errorf("RingSlice and Set must have the same size, even after json un/marshalling")
145 | }
146 | }
147 |
148 | func TestRingSetNestedJSON(t *testing.T) {
149 | type T struct {
150 | R *RingSet `json:"r"`
151 | }
152 |
153 | ts := T{NewRingSet(10)}
154 | data, err := json.Marshal(&ts)
155 | if err != nil {
156 | t.Errorf("Failed to marshal nested structure: %s", err)
157 | }
158 | t.Log(string(data))
159 | }
160 |
161 | func TestRingGetSet(t *testing.T) {
162 | size := 100
163 | r := NewRingSet(100)
164 | for i := 0; i < r.Len(); i++ {
165 | r.SetItem(i, rand.Int())
166 | }
167 |
168 | s := r.Slice()
169 | for i := 0; i < size; i++ {
170 | item := r.GetItem(i)
171 | if item.(int) != s[i].(int) {
172 | t.Error("Wrong value returned by GetItem")
173 | }
174 | }
175 | }
176 |
177 | func TestRingSetCopy(t *testing.T) {
178 | size := 100
179 | r := NewRingSet(100)
180 | for i := 0; i < r.Len(); i++ {
181 | r.SetItem(i, rand.Int())
182 | }
183 |
184 | copy := r.Copy()
185 | for i := 0; i < size; i++ {
186 | if r.GetItem(i).(int) != copy.GetItem(i).(int) {
187 | t.Error("Wrong value returned by GetItem")
188 | }
189 | }
190 | }
191 |
--------------------------------------------------------------------------------
/datastructs/set_test.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | import (
4 | "encoding/json"
5 | "testing"
6 | )
7 |
8 | func TestSets(t *testing.T) {
9 | s1 := NewSyncedSet()
10 | s2 := NewSyncedSet()
11 | s1.Add("This", "is", "foo", "!!", "!!!!")
12 | s2.Add("This", "is", "bar", "!!!", "!!!!!!!")
13 | s1copy := NewSyncedSet(s1)
14 |
15 | intersection := s1.Intersect(s2)
16 | union := s1.Union(s2)
17 |
18 | t.Logf("s1.Slice: %v", s1.Slice())
19 | t.Logf("s2.Slice: %v", s2.Slice())
20 | t.Logf("s1.Intersect(s2).Slice: %v", intersection.Slice())
21 | t.Logf("s1.Union(s2).Slice: %v", union.Slice())
22 |
23 | if !s1.Contains("This", "is", "foo") {
24 | t.Error("string missing")
25 | }
26 | if !intersection.Contains("This", "is") {
27 | t.Error("string missing")
28 | }
29 | if !union.Contains("This", "is", "foo", "bar", "!!", "!!!", "!!!!", "!!!!!!!") {
30 | t.Error("string missing")
31 | }
32 | union.Del("This", "is", "foo", "!!", "!!!!")
33 | if union.Contains("This") || union.Contains("is") || union.Contains("foo") {
34 | t.Error("string should be missing")
35 | }
36 | t.Logf("union after delete: %v", union.Slice())
37 |
38 | if !s1.Equal(s1copy) {
39 | t.Error("equality test failed")
40 | }
41 |
42 | if s1.Len() != s1copy.Len() {
43 | t.Error("length is not equal between original and copy")
44 | }
45 |
46 | for it := range union.Items() {
47 | t.Logf("%v", it)
48 | }
49 | }
50 | func TestSetJSON(t *testing.T) {
51 | var data []byte
52 | var err error
53 |
54 | s1 := NewSyncedSet()
55 | s1.Add("This", "is", "bar", "!!!", "!!!!!!!")
56 |
57 | if data, err = json.Marshal(&s1); err != nil {
58 | t.Error("Failed to marshal JSON")
59 | } else {
60 | t.Log(string(data))
61 | }
62 |
63 | s2 := NewSyncedSet()
64 | if err = json.Unmarshal(data, &s2); err != nil {
65 | t.Errorf("Failed to unmarshal JSON: %s", err)
66 | t.FailNow()
67 | }
68 |
69 | if !s2.Contains("This", "is", "bar", "!!!", "!!!!!!!") {
70 | t.Error("Set does not contain expected data")
71 | }
72 | }
73 |
74 | func TestSetOrder(t *testing.T) {
75 | size := 10000
76 | s := NewSet()
77 | for i := 0; i < size; i++ {
78 | s.Add(i)
79 | }
80 |
81 | ss := s.SortSlice()
82 | for i := 0; i < size; i++ {
83 | if ss[i].(int) != i {
84 | t.Error("Bad set order")
85 | }
86 | }
87 | }
88 |
89 | func TestJSONMarshalStability(t *testing.T) {
90 | // aims at testing that order of elements in serialization is stable
91 | var data, prev []byte
92 | var err error
93 |
94 | s1 := NewSyncedSet()
95 | s1.Add("This", "is", "bar", "!!!", "!!!!!!!")
96 |
97 | for i := 0; i < 100; i++ {
98 | if data, err = json.Marshal(&s1); err != nil {
99 | t.Error("Failed to marshal JSON")
100 | }
101 | if prev == nil {
102 | goto copy
103 | }
104 | if string(data) != string(prev) {
105 | t.Error("JSON serialization is not stable")
106 | }
107 | copy:
108 | prev = make([]byte, len(data))
109 | copy(prev, data)
110 | }
111 | }
112 |
--------------------------------------------------------------------------------
/datastructs/sets.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | import (
4 | "encoding/json"
5 | "sync"
6 | )
7 |
8 | // Set datastruct that represent a thread safe set
9 | type Set struct {
10 | i uint
11 | set map[interface{}]uint
12 | }
13 |
14 | // NewSet constructs a new SyncedSet
15 | func NewSet(sets ...*Set) *Set {
16 | s := &Set{0, make(map[interface{}]uint)}
17 | for _, set := range sets {
18 | pDatas := set.Slice()
19 | s.Add(pDatas...)
20 | }
21 | return s
22 | }
23 |
24 | // NewInitSet constructs a new SyncedSet initialized with data
25 | func NewInitSet(data ...interface{}) *Set {
26 | s := NewSet()
27 | s.Add(data...)
28 | return s
29 | }
30 |
31 | // Equal returns true if both sets are equal
32 | func (s *Set) Equal(other *Set) bool {
33 | if s.Len() != other.Len() {
34 | return false
35 | }
36 | for key := range s.set {
37 | if !other.Contains(key) {
38 | return false
39 | }
40 | }
41 | return true
42 | }
43 |
44 | // Copy returns a copy of the current set
45 | func (s *Set) Copy() *Set {
46 | return NewSet(s)
47 | }
48 |
49 | // Add adds data to the set
50 | func (s *Set) Add(data ...interface{}) {
51 | for _, data := range data {
52 | s.set[data] = s.i
53 | s.i++
54 | }
55 | }
56 |
57 | // Del deletes data from the set
58 | func (s *Set) Del(data ...interface{}) {
59 | for _, data := range data {
60 | delete(s.set, data)
61 | }
62 | }
63 |
64 | // Intersect returns a pointer to a new set containing the intersection of current
65 | // set and other
66 | func (s *Set) Intersect(other *Set) *Set {
67 | newSet := NewSet()
68 | for k := range s.set {
69 | if other.Contains(k) {
70 | newSet.Add(k)
71 | }
72 | }
73 | return newSet
74 | }
75 |
76 | // Union returns a pointer to a new set containing the union of current set and other
77 | func (s *Set) Union(other *Set) *Set {
78 | newSet := NewSet()
79 | for elt := range s.set {
80 | newSet.Add(elt)
81 | }
82 | for elt := range other.set {
83 | newSet.Add(elt)
84 | }
85 | return newSet
86 | }
87 |
88 | // Contains returns true if the set contains all the data
89 | func (s *Set) Contains(data ...interface{}) bool {
90 | for _, data := range data {
91 | if _, ok := s.set[data]; !ok {
92 | return false
93 | }
94 | }
95 | return true
96 | }
97 |
98 | type sortSetItem struct {
99 | order uint
100 | item interface{}
101 | }
102 |
103 | func (i sortSetItem) Less(o Sortable) bool {
104 | return i.order < o.(sortSetItem).order
105 | }
106 |
107 | // SortSlice returns a new slice containing the data in the set
108 | // sorted by order of insertion.
109 | func (s *Set) SortSlice() []interface{} {
110 | l := NewSortedSlice()
111 | for k := range s.set {
112 | l.Insert(sortSetItem{s.set[k], k})
113 | }
114 | out := make([]interface{}, 0, s.Len())
115 | slice := l.Slice()
116 | for i := len(slice) - 1; i >= 0; i-- {
117 | out = append(out, slice[i].(sortSetItem).item)
118 | }
119 | return out
120 | }
121 |
122 | // Slice returns a pointer to a new slice containing the data in the set
123 | func (s *Set) Slice() []interface{} {
124 | out := make([]interface{}, 0, s.Len())
125 | for key := range s.set {
126 | out = append(out, key)
127 | }
128 | return out
129 | }
130 |
131 | // Items returns a channel with all the elements contained in the set
132 | func (s *Set) Items() (c chan interface{}) {
133 | c = make(chan interface{})
134 | go func() {
135 | defer close(c)
136 | for k := range s.set {
137 | c <- k
138 | }
139 | }()
140 | return c
141 |
142 | }
143 |
144 | // Len returns the length of the syncedset
145 | func (s *Set) Len() int {
146 | return len(s.set)
147 | }
148 |
149 | // UnmarshalJSON implements json.Unmarshaler interface
150 | func (s *Set) UnmarshalJSON(data []byte) (err error) {
151 | tmp := make([]interface{}, 0)
152 | s.i = 0
153 | s.set = make(map[interface{}]uint)
154 | if err = json.Unmarshal(data, &tmp); err != nil {
155 | return err
156 | }
157 | for _, data := range tmp {
158 | s.Add(data)
159 | }
160 | return
161 | }
162 |
163 | // MarshalJSON implements json.Marshaler interface
164 | func (s *Set) MarshalJSON() (data []byte, err error) {
165 | return json.Marshal(s.SortSlice())
166 | }
167 |
168 | // SyncedSet datastruct that represent a thread safe set
169 | type SyncedSet struct {
170 | sync.RWMutex
171 | set *Set
172 | }
173 |
174 | // NewSyncedSet constructs a new SyncedSet
175 | func NewSyncedSet(sets ...*SyncedSet) *SyncedSet {
176 | ss := &SyncedSet{}
177 | ss.set = NewSet()
178 | for _, set := range sets {
179 | ss.Add(set.Slice()...)
180 | }
181 | return ss
182 | }
183 |
184 | // NewInitSyncedSet constructs a new SyncedSet initialized with data
185 | func NewInitSyncedSet(data ...interface{}) *SyncedSet {
186 | ss := &SyncedSet{}
187 | ss.set = NewSet()
188 | ss.Add(data...)
189 | return ss
190 | }
191 |
192 | // Equal returns true if both sets are equal
193 | func (s *SyncedSet) Equal(other *SyncedSet) bool {
194 | s.RLock()
195 | defer s.RUnlock()
196 | test := s.set.Equal(other.set)
197 | return test
198 | }
199 |
200 | // Add adds data to the set
201 | func (s *SyncedSet) Add(data ...interface{}) {
202 | s.Lock()
203 | defer s.Unlock()
204 | s.set.Add(data...)
205 | }
206 |
207 | // Del deletes data from the set
208 | func (s *SyncedSet) Del(data ...interface{}) {
209 | s.Lock()
210 | defer s.Unlock()
211 | s.set.Del(data...)
212 | }
213 |
214 | // Intersect returns a pointer to a new set containing the intersection of current
215 | // set and other
216 | func (s *SyncedSet) Intersect(other *SyncedSet) *SyncedSet {
217 | s.RLock()
218 | defer s.RUnlock()
219 | newSet := NewInitSyncedSet(s.set.Intersect(other.set).Slice()...)
220 | return newSet
221 | }
222 |
223 | // Union returns a pointer to a new set containing the union of current set and other
224 | func (s *SyncedSet) Union(other *SyncedSet) *SyncedSet {
225 | s.RLock()
226 | defer s.RUnlock()
227 | newSet := NewInitSyncedSet(s.set.Union(other.set).Slice()...)
228 | return newSet
229 | }
230 |
231 | // Contains returns true if the syncedset contains all the data
232 | func (s *SyncedSet) Contains(data ...interface{}) bool {
233 | s.RLock()
234 | defer s.RUnlock()
235 | return s.set.Contains(data...)
236 | }
237 |
238 | // Slice returns a pointer to a new slice containing the data in the set
239 | func (s *SyncedSet) Slice() []interface{} {
240 | s.RLock()
241 | defer s.RUnlock()
242 | return s.set.Slice()
243 | }
244 |
245 | // Items returns a channel with all the elements contained in the set
246 | func (s *SyncedSet) Items() (c chan interface{}) {
247 | s.RLock()
248 | defer s.RUnlock()
249 | return s.set.Items()
250 |
251 | }
252 |
253 | // Len returns the length of the syncedset
254 | func (s *SyncedSet) Len() int {
255 | s.RLock()
256 | defer s.RUnlock()
257 | return s.set.Len()
258 | }
259 |
260 | // UnmarshalJSON implements json.Unmarshaler interface
261 | func (s *SyncedSet) UnmarshalJSON(data []byte) (err error) {
262 | s.Lock()
263 | defer s.Unlock()
264 | return s.set.UnmarshalJSON(data)
265 | }
266 |
267 | // MarshalJSON implements json.Marshaler interface
268 | func (s *SyncedSet) MarshalJSON() (data []byte, err error) {
269 | s.RLock()
270 | defer s.RUnlock()
271 | return json.Marshal(&s.set)
272 | }
273 |
--------------------------------------------------------------------------------
/datastructs/sortedslice.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | import (
4 | "fmt"
5 | "reflect"
6 | )
7 |
8 | // Sortable interface definition
9 | type Sortable interface {
10 | Less(Sortable) bool
11 | }
12 |
13 | // SortedSlice structure
14 | // by convention the smallest value is at the end
15 | type SortedSlice struct {
16 | s []Sortable
17 | }
18 |
19 | // NewSortedSlice returns an empty initialized slice. Opts takes len and cap in
20 | // order to initialize the underlying slice
21 | func NewSortedSlice(opts ...int) *SortedSlice {
22 | l, c := 0, 0
23 | if len(opts) >= 1 {
24 | l = opts[0]
25 | }
26 | if len(opts) >= 2 {
27 | c = opts[1]
28 | }
29 | return &SortedSlice{make([]Sortable, l, c)}
30 | }
31 |
32 | // Recursive function to search for the next index less than Sortable
33 | func (ss *SortedSlice) searchLessThan(e Sortable, i, j int) int {
34 | pivot := ((j + 1 - i) / 2) + i
35 | if j-i == 1 {
36 | if ss.s[i].Less(e) {
37 | return i
38 | }
39 | return j
40 | }
41 | if ss.s[pivot].Less(e) {
42 | return ss.searchLessThan(e, i, pivot)
43 | }
44 | return ss.searchLessThan(e, pivot, j)
45 | }
46 |
47 | // RangeLessThan returns the indexes of the objects Less than Sortable
48 | func (ss *SortedSlice) RangeLessThan(e Sortable) (int, int) {
49 | i := ss.searchLessThan(e, 0, len(ss.s)-1)
50 | return i, len(ss.s) - 1
51 | }
52 |
53 | // Insertion method in the slice for a structure implementing Sortable
54 | func (ss *SortedSlice) Insert(e Sortable) {
55 | switch {
56 | // Particular cases
57 | case len(ss.s) == 0, !ss.s[len(ss.s)-1].Less(e):
58 | ss.s = append(ss.s, e)
59 | case len(ss.s) == 1 && ss.s[0].Less(e):
60 | ss.s = append(ss.s, e)
61 | ss.s[1] = ss.s[0]
62 | ss.s[0] = e
63 | default:
64 | //log.Printf("want to insert v=%v into %v", e, ss.s)
65 | i := ss.searchLessThan(e, 0, len(ss.s)-1)
66 | //log.Printf("insert v=%v @ i=%d in ss=%v", e, i, ss.s)
67 | // Avoid creating intermediary slices
68 | ss.s = append(ss.s, e)
69 | copy(ss.s[i+1:], ss.s[i:])
70 | ss.s[i] = e
71 | }
72 | }
73 |
74 | // Iter returns a chan of Sortable in the slice. Start and Stop indexes can be
75 | // specified via optional parameters
76 | func (ss *SortedSlice) Iter(idx ...int) (c chan Sortable) {
77 | c = make(chan Sortable)
78 | i, j := 0, len(ss.s)-1
79 | if len(idx) >= 1 {
80 | i = idx[0]
81 | }
82 | if len(idx) >= 2 {
83 | j = idx[1]
84 | }
85 | if i < len(ss.s) && j < len(ss.s) && i <= j && i >= 0 {
86 | go func() {
87 | defer close(c)
88 | //for _, v := range ss.s {
89 | for ; i <= j; i++ {
90 | v := ss.s[i]
91 | c <- v
92 | }
93 | }()
94 | } else {
95 | close(c)
96 | }
97 | return c
98 | }
99 |
100 | // Iter returns a chan of Sortable in the slice but in reverse order. Start and
101 | // Stop indexes can be specified via optional parameters
102 | func (ss *SortedSlice) ReversedIter(idx ...int) (c chan Sortable) {
103 | c = make(chan Sortable)
104 | i, j := 0, len(ss.s)-1
105 | if len(idx) >= 1 {
106 | i = idx[0]
107 | }
108 | if len(idx) >= 2 {
109 | j = idx[1]
110 | }
111 | if i < len(ss.s) && j < len(ss.s) && i <= j && i >= 0 {
112 | go func() {
113 | defer close(c)
114 | for k := len(ss.s) - 1 - i; k >= len(ss.s)-1-j; k-- {
115 | v := ss.s[k]
116 | c <- v
117 | }
118 | }()
119 | } else {
120 | close(c)
121 | }
122 | return c
123 | }
124 |
125 | // Slice returns the underlying slice
126 | func (ss *SortedSlice) Slice() []Sortable {
127 | return ss.s
128 | }
129 |
130 | // Control controls if the slice has been properly ordered. A return value of
131 | // true means it is in good order
132 | func (ss *SortedSlice) Control() bool {
133 | v := ss.s[0]
134 | for _, tv := range ss.s {
135 | if !reflect.DeepEqual(v, tv) && !tv.Less(v) {
136 | return false
137 | }
138 | }
139 | return true
140 | }
141 |
142 | // String fmt helper
143 | func (ss *SortedSlice) String() string {
144 | return fmt.Sprintf("%v", ss.s)
145 | }
146 |
--------------------------------------------------------------------------------
/datastructs/sortedslice_test.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | import (
4 | "math/rand"
5 | "testing"
6 | "time"
7 | )
8 |
9 | type MyInt int
10 |
11 | func (m MyInt) Less(other Sortable) bool {
12 | return m < other.(MyInt)
13 | }
14 |
15 | type MyTime struct {
16 | t time.Time
17 | }
18 |
19 | func (m MyTime) Less(other Sortable) bool {
20 | return m.t.Before(other.(MyTime).t)
21 | }
22 |
23 | func (m MyTime) String() string {
24 | return m.t.Format(time.RFC3339Nano)
25 | }
26 |
27 | var (
28 | ints = [...]int{10, 13, 1, 2, 12, 99, 100, 102, 103, 103, 100, 1100, -2, -4}
29 | )
30 |
31 | func TestInsert(t *testing.T) {
32 | s := NewSortedSlice()
33 | for _, i := range ints {
34 | s.Insert(MyInt(i))
35 | t.Log(s)
36 | if !s.Control() {
37 | t.Fail()
38 | }
39 | }
40 | t.Log(s)
41 | }
42 |
43 | func TestRandom(t *testing.T) {
44 | s := NewSortedSlice()
45 | for i := 0; i < 1000; i++ {
46 | s.Insert(MyInt(rand.Int()))
47 | }
48 | if !s.Control() {
49 | t.Fail()
50 | }
51 | }
52 |
53 | func TestFail(t *testing.T) {
54 | s := NewSortedSlice()
55 | fail := [...]int{937, 821, 551, 410, 51, 320}
56 | for _, i := range fail {
57 | s.Insert(MyInt(i))
58 | }
59 | t.Log(s)
60 | }
61 |
62 | func TestTime(t *testing.T) {
63 | now := time.Now()
64 | s := NewSortedSlice()
65 | for i := 0; i < 50; i++ {
66 | mt := now.Add(time.Minute * time.Duration((rand.Int63() % 60)))
67 | mt.Add(time.Second * time.Duration((rand.Int63() % 60)))
68 |
69 | s.Insert(MyTime{mt})
70 | }
71 | if !s.Control() {
72 | t.Fail()
73 | }
74 | for mt := range s.ReversedIter() {
75 | t.Logf("%s", mt)
76 | }
77 | }
78 |
79 | func TestSearchRange(t *testing.T) {
80 | s := NewSortedSlice()
81 | for _, i := range ints {
82 | s.Insert(MyInt(i))
83 | t.Log(s)
84 | if !s.Control() {
85 | t.Fail()
86 | }
87 | }
88 | t.Log(s)
89 | t.Log(s.RangeLessThan(MyInt(0)))
90 | }
91 |
92 | func TestIter(t *testing.T) {
93 | s := NewSortedSlice()
94 | for _, i := range ints {
95 | s.Insert(MyInt(i))
96 | t.Log(s)
97 | if !s.Control() {
98 | t.Fail()
99 | }
100 | }
101 | t.Log(s)
102 | t.Log("Iter")
103 | for m := range s.Iter(0, 3) {
104 | t.Log(m)
105 | }
106 | t.Log("ReversedIter")
107 | for m := range s.ReversedIter(0, 3) {
108 | t.Log(m)
109 | }
110 | }
111 |
--------------------------------------------------------------------------------
/datastructs/syncedmap.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | import "sync"
4 |
5 | type SyncedMap struct {
6 | sync.RWMutex
7 | m map[interface{}]interface{}
8 | }
9 |
10 | func NewSyncedMap() *SyncedMap {
11 | return &SyncedMap{m: make(map[interface{}]interface{})}
12 | }
13 |
14 | func (s *SyncedMap) Add(key, value interface{}) {
15 | s.Lock()
16 | defer s.Unlock()
17 | s.m[key] = value
18 | }
19 |
20 | func (s *SyncedMap) Del(key interface{}) {
21 | s.Lock()
22 | defer s.Unlock()
23 | delete(s.m, key)
24 | }
25 |
26 | func (s *SyncedMap) Contains(key interface{}) (ok bool) {
27 | s.RLock()
28 | defer s.RUnlock()
29 | _, ok = s.m[key]
30 | return
31 | }
32 |
33 | func (s *SyncedMap) Get(key interface{}) (value interface{}, ok bool) {
34 | s.RLock()
35 | defer s.RUnlock()
36 | value, ok = s.m[key]
37 | return
38 | }
39 |
40 | func (s *SyncedMap) Keys() (keys []interface{}) {
41 | s.RLock()
42 | defer s.RUnlock()
43 | keys = make([]interface{}, 0, len(s.m))
44 | for k := range s.m {
45 | keys = append(keys, k)
46 | }
47 | return
48 | }
49 |
50 | func (s *SyncedMap) Values() (values []interface{}) {
51 | s.RLock()
52 | defer s.RUnlock()
53 | values = make([]interface{}, 0, len(s.m))
54 | for _, v := range s.m {
55 | values = append(values, v)
56 | }
57 | return
58 | }
59 |
60 | func (s *SyncedMap) Len() int {
61 | return len(s.m)
62 | }
63 |
--------------------------------------------------------------------------------
/datastructs/syncmap_test.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | import (
4 | "math/rand"
5 | "testing"
6 | )
7 |
8 | func TestBasicSyncedMap(t *testing.T) {
9 | sm := NewSyncedMap()
10 | sm.Add("foo", 2)
11 | sm.Add(2, "foo")
12 | if !sm.Contains(2) {
13 | t.Error("Map should contain 2")
14 | }
15 | if !sm.Contains("foo") {
16 | t.Error("Map should contain foo")
17 | }
18 |
19 | sm.Del(2)
20 | sm.Del("foo")
21 | if sm.Contains(2) {
22 | t.Error("Map should not contain 2")
23 | }
24 | if sm.Contains("foo") {
25 | t.Error("Map should not contain foo")
26 | }
27 | }
28 |
29 | func TestAdvancedSyncedMap(t *testing.T) {
30 | size := 10000
31 | sm := NewSyncedMap()
32 |
33 | for i := 0; i < size; i++ {
34 | sm.Add(i, "foo")
35 | }
36 |
37 | // expected size ?
38 | if sm.Len() != size {
39 | t.Error("SyncedMap is of wrong size")
40 | }
41 |
42 | // changing the data in the map
43 | for i := 0; i < size; i++ {
44 | sm.Add(i, "bar")
45 | }
46 |
47 | // re-testing that size did not change
48 | if sm.Len() != size {
49 | t.Error("SyncedMap is of wrong size")
50 | }
51 |
52 | // Testing that modification worked
53 | for i := 0; i < size; i++ {
54 | if v, ok := sm.Get(i); !ok {
55 | t.Error("This key must be there")
56 | if v.(string) != "bar" {
57 | t.Error("Wrong value")
58 | }
59 | }
60 | }
61 |
62 | ndel := 0
63 | for k := range sm.Keys() {
64 | if rand.Int()%2 == 0 {
65 | sm.Del(k)
66 | ndel++
67 | }
68 | }
69 |
70 | // testing size after deletions
71 | if sm.Len() != size-ndel {
72 | t.Error("Syncedmap has wrong size")
73 | }
74 |
75 | for _, v := range sm.Values() {
76 | if v.(string) != "bar" {
77 | t.Error("Wrong value")
78 | }
79 | }
80 |
81 | }
82 |
--------------------------------------------------------------------------------
/datastructs/utils.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | import (
4 | "reflect"
5 | )
6 |
7 | //ToInterfaceSlice converts any slice object to an array of interface{}
8 | //it can be usefull to initialize some datastructs
9 | func ToInterfaceSlice(slice interface{}) (is []interface{}) {
10 | v := reflect.ValueOf(slice)
11 | if v.Kind() == reflect.Slice {
12 | is = make([]interface{}, 0, v.Len())
13 | for i := 0; i < v.Len(); i++ {
14 | is = append(is, v.Index(i).Interface())
15 | }
16 | } else {
17 | panic("parameter must be a slice")
18 | }
19 | return is
20 | }
21 |
--------------------------------------------------------------------------------
/datastructs/utils_test.go:
--------------------------------------------------------------------------------
1 | package datastructs
2 |
3 | import "testing"
4 |
5 | func shouldPanic(t *testing.T, f func()) {
6 | defer func() { recover() }()
7 | f()
8 | t.Errorf("should have panicked")
9 | }
10 |
11 | func TestToInterfaceSlice(t *testing.T) {
12 | size := 1000
13 | intSlice := make([]int, 0)
14 |
15 | for i := 0; i < size; i++ {
16 | intSlice = append(intSlice, i)
17 | }
18 |
19 | is := ToInterfaceSlice(intSlice)
20 | if len(is) != size {
21 | t.Error("Interface slice has wrong size")
22 | }
23 |
24 | shouldPanic(t, func() { ToInterfaceSlice(42) })
25 | }
26 |
--------------------------------------------------------------------------------
/dateutil/dateutil_test.go:
--------------------------------------------------------------------------------
1 | package dateutil
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestDateStringANSIC(t *testing.T) {
8 | shouldMatch := [...]string{
9 | "Mon Jan 2 15:04:05 2006",
10 | "Mon Jan 12 15:04:05 2006"}
11 | ds := ANSIC
12 | for _, s := range shouldMatch {
13 | if ok := ds.Match(s); !ok {
14 | t.Logf("Does not match: %s", s)
15 | t.Fail()
16 | }
17 | if _, err := ds.Parse(s); err != nil {
18 | t.Logf("Cannot parse %s: %s", s, err)
19 | t.Fail()
20 | }
21 | t.Logf("Valid : %s", s)
22 | }
23 | }
24 |
25 | func TestDateStringUnix(t *testing.T) {
26 | shouldMatch := [...]string{
27 | "Mon Jan 2 15:04:05 MST 2006",
28 | "Mon Jan 12 15:04:05 MST 2006"}
29 | ds := UnixDate
30 | for _, s := range shouldMatch {
31 | if ok := ds.Match(s); !ok {
32 | t.Logf("Does not match: %s", s)
33 | t.Fail()
34 | }
35 | if _, err := ds.Parse(s); err != nil {
36 | t.Logf("Cannot parse %s: %s", s, err)
37 | t.Fail()
38 | }
39 | t.Logf("Valid : %s", s)
40 | }
41 | }
42 |
43 | func TestDateRuby(t *testing.T) {
44 | shouldMatch := [...]string{
45 | "Mon Jan 02 15:04:05 -0700 2006",
46 | "Mon Jan 12 15:04:05 +0000 2006"}
47 | ds := RubyDate
48 | for _, s := range shouldMatch {
49 | if ok := ds.Match(s); !ok {
50 | t.Logf("Does not match: %s", s)
51 | t.Fail()
52 | }
53 | if _, err := ds.Parse(s); err != nil {
54 | t.Logf("Cannot parse %s: %s", s, err)
55 | t.Fail()
56 | }
57 | t.Logf("Valid : %s", s)
58 | }
59 | }
60 |
61 | func TestRFC822(t *testing.T) {
62 | shouldMatch := [...]string{
63 | "02 Jan 06 15:04 MST",
64 | "12 Jan 06 15:04 MST"}
65 | ds := RFC822
66 | for _, s := range shouldMatch {
67 | if ok := ds.Match(s); !ok {
68 | t.Logf("Does not match: %s", s)
69 | t.Fail()
70 | }
71 | if _, err := ds.Parse(s); err != nil {
72 | t.Logf("Cannot parse %s: %s", s, err)
73 | t.Fail()
74 | }
75 | t.Logf("Valid : %s", s)
76 | }
77 | }
78 |
79 | func TestRFC822Z(t *testing.T) {
80 | shouldMatch := [...]string{
81 | "02 Jan 06 15:04 -0700",
82 | "12 Jan 06 15:04 +0700"}
83 | ds := RFC822Z
84 | for _, s := range shouldMatch {
85 | if ok := ds.Match(s); !ok {
86 | t.Logf("Does not match: %s", s)
87 | t.Fail()
88 | }
89 | if _, err := ds.Parse(s); err != nil {
90 | t.Logf("Cannot parse %s: %s", s, err)
91 | t.Fail()
92 | }
93 | t.Logf("Valid : %s", s)
94 | }
95 | }
96 |
97 | func TestRFC850(t *testing.T) {
98 | shouldMatch := [...]string{
99 | "Monday, 02-Jan-06 15:04:05 MST",
100 | "Friday, 02-Jan-99 15:04:05 MST"}
101 | ds := RFC850
102 | for _, s := range shouldMatch {
103 | if ok := ds.Match(s); !ok {
104 | t.Logf("Does not match: %s", s)
105 | t.Fail()
106 | }
107 | if _, err := ds.Parse(s); err != nil {
108 | t.Logf("Cannot parse %s: %s", s, err)
109 | t.Fail()
110 | }
111 | t.Logf("Valid : %s", s)
112 | }
113 | }
114 |
115 | func TestRFC1123(t *testing.T) {
116 | shouldMatch := [...]string{
117 | "Mon, 02 Jan 2006 15:04:05 MST",
118 | "Fri, 08 Jan 2006 15:04:05 CET"}
119 | ds := RFC1123
120 | for _, s := range shouldMatch {
121 | if ok := ds.Match(s); !ok {
122 | t.Logf("Does not match: %s", s)
123 | t.Fail()
124 | }
125 | if _, err := ds.Parse(s); err != nil {
126 | t.Logf("Cannot parse %s: %s", s, err)
127 | t.Fail()
128 | }
129 | t.Logf("Valid : %s", s)
130 | }
131 | }
132 |
133 | func TestRFC1123Z(t *testing.T) {
134 | shouldMatch := [...]string{
135 | "Mon, 02 Jan 2006 15:04:05 +1200",
136 | "Fri, 08 Jan 2006 15:04:05 -0700"}
137 | ds := RFC1123Z
138 | for _, s := range shouldMatch {
139 | if ok := ds.Match(s); !ok {
140 | t.Logf("Does not match: %s", s)
141 | t.Fail()
142 | }
143 | if _, err := ds.Parse(s); err != nil {
144 | t.Logf("Cannot parse %s: %s", s, err)
145 | t.Fail()
146 | }
147 | t.Logf("Valid : %s", s)
148 | }
149 | }
150 |
151 | func TestRFC3339(t *testing.T) {
152 | shouldMatch := [...]string{
153 | "2006-01-02T15:04:05-07:00",
154 | "2006-01-02T15:04:00Z",
155 | "2006-01-02T15:04:05+07:00"}
156 | ds := RFC3339
157 | for _, s := range shouldMatch {
158 | if ok := ds.Match(s); !ok {
159 | t.Logf("Does not match: %s", s)
160 | t.Fail()
161 | }
162 | if _, err := ds.Parse(s); err != nil {
163 | t.Logf("Cannot parse %s: %s", s, err)
164 | t.Fail()
165 | }
166 | t.Logf("Valid : %s", s)
167 | }
168 | }
169 |
170 | func TestRFC3339Nano(t *testing.T) {
171 | shouldMatch := [...]string{
172 | "2006-01-02T15:04:05.999999999Z",
173 | "2006-01-02T15:04:05.999999999-07:00",
174 | "2006-01-02T15:04:05.999999999+07:00"}
175 | ds := RFC3339Nano
176 | for _, s := range shouldMatch {
177 | if ok := ds.Match(s); !ok {
178 | t.Logf("Does not match: %s", s)
179 | t.Fail()
180 | }
181 | if _, err := ds.Parse(s); err != nil {
182 | t.Logf("Cannot parse %s: %s", s, err)
183 | t.Fail()
184 | }
185 | t.Logf("Valid : %s", s)
186 | }
187 | }
188 |
189 | func TestParse(t *testing.T) {
190 | shouldMatch := [...]string{
191 | "2006-01-02T15:04:05-07:00",
192 | "2006-01-02T15:04:05+07:00",
193 | "Mon, 02 Jan 2006 15:04:05 +1200",
194 | "Fri, 08 Jan 2006 15:04:05 -0700",
195 | "Mon Jan 2 15:04:05 2006",
196 | "Mon Jan 12 15:04:05 2006",
197 | "2006-01-02T15:04:05.999999999-07:00",
198 | "2006-01-02T15:04:05.999999999+07:00"}
199 | for _, s := range shouldMatch {
200 | if _, err := Parse(s); err != nil {
201 | t.Logf("Cannot parse %s: %s", s, err)
202 | t.Fail()
203 | } else {
204 | t.Logf("Valid : %s", s)
205 | }
206 | }
207 | }
208 |
--------------------------------------------------------------------------------
/dateutil/parser.go:
--------------------------------------------------------------------------------
1 | package dateutil
2 |
3 | import (
4 | "fmt"
5 | "regexp"
6 | "time"
7 | )
8 |
9 | var (
10 | layouts = [...]string{
11 | time.ANSIC,
12 | time.UnixDate,
13 | time.RubyDate,
14 | time.RFC1123,
15 | time.RFC1123Z,
16 | time.RFC3339,
17 | time.RFC3339Nano,
18 | time.RFC822,
19 | time.RFC822Z,
20 | time.RFC850}
21 | )
22 |
23 | const (
24 | // ANSICRe regexp for ANSIC format
25 | // "Mon Jan _2 15:04:05 2006"
26 | ANSICRe = "[A-Z][a-z]{2} [A-Z][a-z]{2} [0-1]{0,1}[0-9] [0-2][0-9]:[0-6][0-9]:[0-6][0-9] [0-9]{4}"
27 | // UnixDateRe regexp for UnixDate format
28 | // "Mon Jan _2 15:04:05 MST 2006"
29 | UnixDateRe = "[A-Z][a-z]{2} [A-Z][a-z]{2} [0-1]{0,1}[0-9] [0-2][0-9]:[0-6][0-9]:[0-6][0-9] [A-Z]+ [0-9]{4}"
30 | // RubyDateRe regexp for RubyDate format
31 | // "Mon Jan 02 15:04:05 -0700 2006"
32 | RubyDateRe = "[A-Z][a-z]{2} [A-Z][a-z]{2} [0-1][0-9] [0-2][0-9]:[0-6][0-9]:[0-6][0-9] [+-][0-9]{4} [0-9]{4}"
33 | //RFC822Re RFC822 regexp
34 | RFC822Re = "[0-1][0-9] [A-Z][a-z]{2} [0-9]{2} [0-2][0-9]:[0-6][0-9] [A-Z]+"
35 | //RFC822ZRe RFC822 regexp
36 | RFC822ZRe = "[0-1][0-9] [A-Z][a-z]{2} [0-9]{2} [0-2][0-9]:[0-6][0-9] [+-][0-9]{4}"
37 | // RFC850Re RFC850 regexp
38 | // "Monday, 02-Jan-06 15:04:05 MST"
39 | RFC850Re = "[A-Z][a-z]*, [0-1][0-9]-[A-Z][a-z]*-[0-9]{2} [0-2][0-9]:[0-6][0-9]:[0-6][0-9] [A-Z]+"
40 | // RFC1123Re RFC1123 regexp
41 | RFC1123Re = "[A-Z][a-z]{2}, [0-1][0-9] [A-Z][a-z]* [0-9]{4} [0-2][0-9]:[0-6][0-9]:[0-6][0-9] [A-Z]+"
42 | // RFC1123ZRe RFC1123Z regexp
43 | RFC1123ZRe = "[A-Z][a-z]{2}, [0-1][0-9] [A-Z][a-z]* [0-9]{4} [0-2][0-9]:[0-6][0-9]:[0-6][0-9] [+-][0-9]{4}"
44 | // RFC3339Re RFC3339 regexp
45 | RFC3339Re = "[0-9]{4}-[0-1][0-9]-[0-3][0-9]T[0-2][0-9]:[0-6][0-9]:[0-6][0-9](Z|[+-][0-1][0-9]:[0-6][0-9])"
46 | // RFC3339NanoRe RFC3339Nano regexp
47 | RFC3339NanoRe = `[0-9]{4}-[0-1][0-9]-[0-3][0-9]T[0-2][0-9]:[0-6][0-9]:[0-6][0-9]\.[0-9]{9}(Z|[+-][0-1][0-9]:[0-6][0-9])`
48 | )
49 |
50 | var (
51 | // ANSIC DateString
52 | ANSIC = NewDateString(ANSICRe, time.ANSIC)
53 | // UnixDate DateString
54 | UnixDate = NewDateString(UnixDateRe, time.UnixDate)
55 | // RubyDate DateString
56 | RubyDate = NewDateString(RubyDateRe, time.RubyDate)
57 | // RFC822 DateString
58 | RFC822 = NewDateString(RFC822Re, time.RFC822)
59 | // RFC822Z DateString
60 | RFC822Z = NewDateString(RFC822ZRe, time.RFC822Z)
61 | // RFC850 DateString
62 | RFC850 = NewDateString(RFC850Re, time.RFC850)
63 | // RFC1123 DateString
64 | RFC1123 = NewDateString(RFC1123Re, time.RFC1123)
65 | // RFC1123Z DateString
66 | RFC1123Z = NewDateString(RFC1123ZRe, time.RFC1123Z)
67 | // RFC3339 DateString
68 | RFC3339 = NewDateString(RFC3339Re, time.RFC3339)
69 | // RFC3339Nano DateString
70 | RFC3339Nano = NewDateString(RFC3339NanoRe, time.RFC3339Nano)
71 |
72 | allDateStrings = []*DateString{
73 | &ANSIC,
74 | &UnixDate,
75 | &RubyDate,
76 | &RFC822,
77 | &RFC822Z,
78 | &RFC850,
79 | &RFC1123,
80 | &RFC1123Z,
81 | &RFC3339,
82 | &RFC3339Nano}
83 | )
84 |
85 | // DateString structure
86 | type DateString struct {
87 | Regexp *regexp.Regexp
88 | Layout string
89 | }
90 |
91 | // UnknownDateFormatError error
92 | type UnknownDateFormatError struct {
93 | DateStr string
94 | }
95 |
96 | // Error error implementation
97 | func (u *UnknownDateFormatError) Error() string {
98 | return fmt.Sprintf("Unknown date format: %s", u.DateStr)
99 | }
100 |
101 | // AddDateString Adds a NewDateString to the list of default DateStrings
102 | func AddDateString(ds DateString) {
103 | allDateStrings = append(allDateStrings, &ds)
104 | }
105 |
106 | // Parse attempts to parse a time string with all the knowns DateStrings
107 | func Parse(value string) (time.Time, error) {
108 | for _, ds := range allDateStrings {
109 | if ds.Match(value) {
110 | return ds.Parse(value)
111 | }
112 | }
113 | return time.Time{}, &UnknownDateFormatError{value}
114 | }
115 |
116 | // NewDateString creates a DateString structure
117 | func NewDateString(dateRe, layout string) DateString {
118 | return DateString{regexp.MustCompile(dateRe), layout}
119 | }
120 |
121 | // Match returns true if the DateString Regexp matches b
122 | func (d *DateString) Match(value string) bool {
123 | return d.Regexp.Match([]byte(value))
124 | }
125 |
126 | // Parse parses value and returns the corresponding time.Time
127 | func (d *DateString) Parse(value string) (time.Time, error) {
128 | return time.Parse(d.Layout, value)
129 | }
130 |
--------------------------------------------------------------------------------
/encoding/encoding.go:
--------------------------------------------------------------------------------
1 | package encoding
2 |
3 | import (
4 | "bytes"
5 | "encoding/binary"
6 | "errors"
7 | "fmt"
8 | "io"
9 | "os"
10 | "reflect"
11 | )
12 |
13 | // Endianness interface definition specifies the endianness used to decode
14 | type Endianness binary.ByteOrder
15 |
16 | var (
17 | // ErrSeeking error definition
18 | ErrSeeking = errors.New("Error seeking")
19 | // ErrMultipleOffsets error definition
20 | ErrMultipleOffsets = errors.New("Only one offset argument is allowed")
21 | // ErrInvalidNilPointer
22 | ErrInvalidNilPointer = errors.New("Nil pointer is invalid")
23 | // No Pointer interface
24 | ErrNoPointerInterface = errors.New("Interface expect to be a pointer")
25 | )
26 |
27 | // Unpack data type from reader object. An optional offset can be specified.
28 | func Unpack(reader io.ReadSeeker, endianness Endianness, data interface{}, offsets ...int64) error {
29 |
30 | switch {
31 | // No offset to deal with
32 | case len(offsets) == 0:
33 | if err := binary.Read(reader, endianness, data); err != nil {
34 | return err
35 | }
36 | // An offset to deal with
37 | case len(offsets) == 1:
38 | if soughtOffset, err := reader.Seek(offsets[0], os.SEEK_SET); soughtOffset != offsets[0] || err != nil {
39 | switch {
40 | case err != nil:
41 | return err
42 | case soughtOffset != offsets[0]:
43 | return ErrSeeking
44 | default:
45 | if err := binary.Read(reader, endianness, data); err != nil {
46 | return err
47 | }
48 | }
49 | }
50 | // Error if more than one offset
51 | default:
52 | return ErrMultipleOffsets
53 | }
54 | return nil
55 | }
56 |
57 | func marshalArray(data interface{}, endianness Endianness) ([]byte, error) {
58 | var out []byte
59 | val := reflect.ValueOf(data)
60 | if val.IsNil() {
61 | return out, ErrInvalidNilPointer
62 | }
63 | elem := val.Elem()
64 | if elem.Kind() != reflect.Array {
65 | return out, fmt.Errorf("Not an Array structure")
66 | }
67 | for k := 0; k < elem.Len(); k++ {
68 | buff, err := Marshal(elem.Index(k).Addr().Interface(), endianness)
69 | if err != nil {
70 | return out, err
71 | }
72 | out = append(out, buff...)
73 | }
74 | return out, nil
75 | }
76 |
77 | func marshalSlice(data interface{}, endianness Endianness) ([]byte, error) {
78 | var out []byte
79 | val := reflect.ValueOf(data)
80 | if val.IsNil() {
81 | return out, ErrInvalidNilPointer
82 | }
83 | elem := val.Elem()
84 | if elem.Kind() != reflect.Slice {
85 | return out, fmt.Errorf("Not a Slice object")
86 | }
87 | s := elem
88 | // We first serialize slice length as a int64
89 | sliceLen := int64(s.Len())
90 | buff, err := Marshal(&sliceLen, endianness)
91 | if err != nil {
92 | return out, err
93 | }
94 | out = append(out, buff...)
95 | for k := 0; k < s.Len(); k++ {
96 | buff, err := Marshal(s.Index(k).Addr().Interface(), endianness)
97 | if err != nil {
98 | return out, err
99 | }
100 | out = append(out, buff...)
101 | }
102 | return out, nil
103 | }
104 |
105 | func Marshal(data interface{}, endianness Endianness) ([]byte, error) {
106 | var out []byte
107 | val := reflect.ValueOf(data)
108 | if val.Kind() != reflect.Ptr {
109 | return out, ErrNoPointerInterface
110 | }
111 | if val.IsNil() {
112 | return out, ErrInvalidNilPointer
113 | }
114 | elem := val.Elem()
115 | typ := elem.Type()
116 | switch typ.Kind() {
117 | case reflect.Struct:
118 | for i := 0; i < typ.NumField(); i++ {
119 | tField := typ.Field(i)
120 | // Unmarshal recursively if field of struct is a struct
121 | switch tField.Type.Kind() {
122 | case reflect.Struct:
123 | buff, err := Marshal(elem.Field(i).Addr().Interface(), endianness)
124 | if err != nil {
125 | return out, err
126 | }
127 | out = append(out, buff...)
128 | case reflect.Array:
129 | buff, err := marshalArray(elem.Field(i).Addr().Interface(), endianness)
130 | if err != nil {
131 | return out, err
132 | }
133 | out = append(out, buff...)
134 | case reflect.Slice:
135 | buff, err := marshalSlice(elem.Field(i).Addr().Interface(), endianness)
136 | if err != nil {
137 | return out, err
138 | }
139 | out = append(out, buff...)
140 | default:
141 | buff, err := Marshal(elem.Field(i).Addr().Interface(), endianness)
142 | if err != nil {
143 | return out, err
144 | }
145 | out = append(out, buff...)
146 | }
147 | }
148 | case reflect.Array:
149 | buff, err := marshalArray(elem.Addr().Interface(), endianness)
150 | if err != nil {
151 | return out, err
152 | }
153 | out = append(out, buff...)
154 |
155 | case reflect.Slice:
156 | buff, err := marshalSlice(elem.Addr().Interface(), endianness)
157 | if err != nil {
158 | return out, err
159 | }
160 | out = append(out, buff...)
161 |
162 | default:
163 | writter := new(bytes.Buffer)
164 | if err := binary.Write(writter, endianness, elem.Interface()); err != nil {
165 | return out, err
166 | }
167 | out = append(out, writter.Bytes()...)
168 | }
169 | return out, nil
170 | }
171 |
172 | func UnmarshaInitSlice(reader io.Reader, data interface{}, endianness Endianness) error {
173 | val := reflect.ValueOf(data)
174 | if val.IsNil() {
175 | return ErrInvalidNilPointer
176 | }
177 | slice := val.Elem()
178 | if slice.Kind() != reflect.Slice {
179 | return fmt.Errorf("Not a slice object")
180 | }
181 | if slice.Len() == 0 {
182 | return fmt.Errorf("Not initialized slice")
183 | }
184 | for k := 0; k < slice.Len(); k++ {
185 | err := Unmarshal(reader, slice.Index(k).Addr().Interface(), endianness)
186 | if err != nil {
187 | return err
188 | }
189 | }
190 | return nil
191 | }
192 |
193 | func unmarshalArray(reader io.Reader, data interface{}, endianness Endianness) error {
194 | val := reflect.ValueOf(data)
195 | if val.IsNil() {
196 | return ErrInvalidNilPointer
197 | }
198 | array := val.Elem()
199 | if array.Kind() != reflect.Array {
200 | return fmt.Errorf("Not an Array structure")
201 | }
202 | for k := 0; k < array.Len(); k++ {
203 | err := Unmarshal(reader, array.Index(k).Addr().Interface(), endianness)
204 | if err != nil {
205 | return err
206 | }
207 | }
208 | return nil
209 | }
210 |
211 | func unmarshalSlice(reader io.Reader, data interface{}, endianness Endianness) error {
212 | var sliceLen int64
213 | val := reflect.ValueOf(data)
214 | if val.IsNil() {
215 | return ErrInvalidNilPointer
216 | }
217 | elem := val.Elem()
218 | if elem.Kind() != reflect.Slice {
219 | return fmt.Errorf("Not a Slice object")
220 | }
221 | err := Unmarshal(reader, &sliceLen, endianness)
222 | if err != nil {
223 | return err
224 | }
225 | s := elem
226 | newS := reflect.MakeSlice(s.Type(), int(sliceLen), int(sliceLen))
227 | s.Set(newS)
228 | //return UnmarshaInitSlice(reader, newS.Interface(), endianness)
229 |
230 | for k := 0; k < s.Len(); k++ {
231 | err := Unmarshal(reader, s.Index(k).Addr().Interface(), endianness)
232 | if err != nil {
233 | return err
234 | }
235 | }
236 | return nil
237 | }
238 |
239 | func Unmarshal(reader io.Reader, data interface{}, endianness Endianness) error {
240 | val := reflect.ValueOf(data)
241 | if val.IsNil() {
242 | return ErrInvalidNilPointer
243 | }
244 | elem := val.Elem()
245 | typ := elem.Type()
246 | switch typ.Kind() {
247 | case reflect.Struct:
248 | for i := 0; i < typ.NumField(); i++ {
249 | tField := typ.Field(i)
250 | // Unmarshal recursively if field of struct is a struct
251 | switch tField.Type.Kind() {
252 | case reflect.Struct:
253 | err := Unmarshal(reader, elem.Field(i).Addr().Interface(), endianness)
254 | if err != nil {
255 | return err
256 | }
257 | case reflect.Array:
258 | err := unmarshalArray(reader, elem.Field(i).Addr().Interface(), endianness)
259 | if err != nil {
260 | return err
261 | }
262 | case reflect.Slice:
263 | err := unmarshalSlice(reader, elem.Field(i).Addr().Interface(), endianness)
264 | if err != nil {
265 | return err
266 | }
267 | default:
268 | if err := Unmarshal(reader, elem.Field(i).Addr().Interface(), endianness); err != nil {
269 | return err
270 | }
271 | }
272 | }
273 |
274 | case reflect.Array:
275 | err := unmarshalArray(reader, elem.Addr().Interface(), endianness)
276 | if err != nil {
277 | return err
278 | }
279 |
280 | case reflect.Slice:
281 | err := unmarshalSlice(reader, elem.Addr().Interface(), endianness)
282 | if err != nil {
283 | return err
284 | }
285 |
286 | default:
287 | if err := binary.Read(reader, endianness, data); err != nil {
288 | return err
289 | }
290 | }
291 | return nil
292 | }
293 |
--------------------------------------------------------------------------------
/encoding/serialization_test.go:
--------------------------------------------------------------------------------
1 | package encoding
2 |
3 | import (
4 | "bytes"
5 | "encoding/binary"
6 | "fmt"
7 | "reflect"
8 | "testing"
9 |
10 | "github.com/0xrawsec/golang-utils/log"
11 | )
12 |
13 | type OptionalHeader struct {
14 | This float32
15 | Is float64
16 | Just uint8
17 | A uint32
18 | Test int64
19 | }
20 |
21 | type Header struct {
22 | Magic []byte
23 | Array [8]byte
24 | TableLen uint64
25 | OptionalHeader OptionalHeader
26 | }
27 |
28 | func (h Header) String() string {
29 | return fmt.Sprintf("Magic: %s Array: %s TableLen: %d", h.Magic, h.Array, h.TableLen)
30 | }
31 |
32 | type OffsetTable []uint64
33 |
34 | func init() {
35 | log.InitLogger(log.LDebug)
36 | }
37 |
38 | func TestPack(t *testing.T) {
39 | b := [8]byte{}
40 | copy(b[:], "Fooobaar")
41 | h := Header{Magic: []byte("Foobar"), Array: b, TableLen: 1337}
42 | writter := new(bytes.Buffer)
43 | if err := binary.Write(writter, binary.LittleEndian, h.TableLen); err != nil {
44 | t.Error(err)
45 | }
46 | t.Logf("Successfully encoded: %q", writter.Bytes())
47 | }
48 |
49 | func TestMarshal(t *testing.T) {
50 | oh := OptionalHeader{3, 4, 1, 0, 10}
51 | b := [8]byte{}
52 | copy(b[:], "Fooobaar")
53 | h := Header{Magic: []byte("Foobar"), TableLen: 1337, Array: b, OptionalHeader: oh}
54 | enc, err := Marshal(&h, binary.LittleEndian)
55 | if err != nil {
56 | t.Error(err)
57 | }
58 | t.Logf("Successfully encoded: %q", enc)
59 | }
60 |
61 | func TestUnmarshal(t *testing.T) {
62 | oh := OptionalHeader{3, 4, 1, 0, 10}
63 | b := [8]byte{}
64 | copy(b[:], "Fooobaar")
65 | h := Header{Magic: []byte("Foobar"), TableLen: 1337, Array: b, OptionalHeader: oh}
66 | enc, err := Marshal(&h, binary.LittleEndian)
67 | if err != nil {
68 | t.Error(err)
69 | }
70 | reader := bytes.NewReader(enc)
71 | nh := Header{Magic: []byte("aaa"), TableLen: 42}
72 | err = Unmarshal(reader, &nh, binary.LittleEndian)
73 | if err != nil {
74 | t.Error(err)
75 | }
76 | t.Log(nh)
77 | if reflect.DeepEqual(nh, h) {
78 | t.Logf("Successfully decoded: %v", nh)
79 | }
80 | }
81 |
82 | func TestUnmarshalInitSlice(t *testing.T) {
83 | array := []byte{41, 42, 43}
84 | slice := make([]byte, 3)
85 | data, err := Marshal(&array, binary.LittleEndian)
86 | if err != nil {
87 | t.Error(err)
88 | }
89 | t.Logf("data: %q\n", data)
90 | reader := bytes.NewReader(data)
91 | len := int64(0)
92 | err = Unmarshal(reader, &len, binary.LittleEndian)
93 | if err != nil {
94 | t.Error(err)
95 | }
96 | err = UnmarshaInitSlice(reader, &slice, binary.LittleEndian)
97 | if err != nil {
98 | t.Error(err)
99 | }
100 | if !reflect.DeepEqual(array[:], slice) {
101 | t.Error("Test failed")
102 | }
103 | }
104 |
--------------------------------------------------------------------------------
/entropy/entropy.go:
--------------------------------------------------------------------------------
1 | package entropy
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "io"
7 | "math"
8 | )
9 |
10 | func readerEntropy(r io.Reader) float64 {
11 | var b byte
12 | var err error
13 | var sum, entropy float64
14 | counter := [256]float64{}
15 | buff := bufio.NewReader(r)
16 | for {
17 | b, err = buff.ReadByte()
18 | if err == io.EOF {
19 | break
20 | }
21 | counter[b]++
22 | sum++
23 | }
24 | if sum > 0 {
25 | for _, count := range counter {
26 | if count > 0 {
27 | p := count / sum
28 | entropy += p * math.Log2(p)
29 | }
30 | }
31 | }
32 | if entropy == 0 {
33 | return 0
34 | }
35 | return -entropy
36 | }
37 |
38 | // ReaderEntropy computes the entropy of a reader
39 | func ReaderEntropy(r io.Reader) float64 {
40 | return readerEntropy(r)
41 | }
42 |
43 | // BytesEntropy computes the entropy of a buffer
44 | func BytesEntropy(b []byte) float64 {
45 | return readerEntropy(bytes.NewReader(b))
46 | }
47 |
48 | // StringEntropy computes the entropy of a string
49 | func StringEntropy(s string) float64 {
50 | return readerEntropy(bytes.NewReader([]byte(s)))
51 | }
52 |
--------------------------------------------------------------------------------
/entropy/entropy_test.go:
--------------------------------------------------------------------------------
1 | package entropy
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | const (
8 | high = "XZsVLgrqzy1wMabsl8TO9SuiKmOhWsz6qbBo6u8WhMDiLysEAG"
9 | expectedHigh = 4.963856189774723
10 | low = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
11 | )
12 |
13 | func TestLowEntropy(t *testing.T) {
14 | t.Logf("Low entropy: %f", StringEntropy(low))
15 | }
16 |
17 | func TestHighEntropy(t *testing.T) {
18 | eh := StringEntropy(high)
19 | t.Logf("High entropy: %f", eh)
20 | if eh != expectedHigh {
21 | t.Logf("Entropy %f != %f", eh, expectedHigh)
22 | t.Fail()
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/fileutils/fileutils.go:
--------------------------------------------------------------------------------
1 | package fileutils
2 |
3 | import (
4 | "compress/gzip"
5 | "fmt"
6 | "io"
7 | "os"
8 | )
9 |
10 | // GzipFile compresses a file to gzip and deletes the original file
11 | func GzipFile(path string) (err error) {
12 | var buf [4096]byte
13 | f, err := os.Open(path)
14 | if err != nil {
15 | return
16 | }
17 |
18 | fname := fmt.Sprintf("%s.gz", path)
19 | partname := fmt.Sprintf("%s.part", fname)
20 |
21 | // to keep permission of compressed file
22 | stat, err := os.Stat(path)
23 | if err != nil {
24 | return
25 | }
26 |
27 | of, err := os.OpenFile(partname, os.O_CREATE|os.O_WRONLY, stat.Mode())
28 | if err != nil {
29 | return
30 | }
31 |
32 | w := gzip.NewWriter(of)
33 | for n, err := f.Read(buf[:]); err != io.EOF; {
34 | w.Write(buf[:n])
35 | n, err = f.Read(buf[:])
36 | }
37 | w.Flush()
38 | // gzip writer
39 | w.Close()
40 | // original file
41 | f.Close()
42 | // part file
43 | of.Close()
44 | if err = os.Remove(path); err != nil {
45 | return err
46 | }
47 | // rename the file to its final name
48 | return os.Rename(partname, fname)
49 | }
50 |
--------------------------------------------------------------------------------
/fileutils/hash/hash.go:
--------------------------------------------------------------------------------
1 | package hash
2 |
3 | import (
4 | "crypto/md5"
5 | "crypto/sha1"
6 | "crypto/sha256"
7 | "crypto/sha512"
8 | "encoding/hex"
9 | "fmt"
10 | "io"
11 | "os"
12 | )
13 |
14 | func openFile(path string, pan bool) *os.File {
15 | file, err := os.Open(path)
16 | if err != nil {
17 | file.Close()
18 | switch pan {
19 | case true:
20 | panic(err)
21 | default:
22 | fmt.Println(err)
23 | }
24 | }
25 | return file
26 | }
27 |
28 | // Hashes structure definition
29 | type Hashes struct {
30 | Path string `json:"path"`
31 | Md5 string `json:"md5"`
32 | Sha1 string `json:"sha1"`
33 | Sha256 string `json:"sha256"`
34 | Sha512 string `json:"sha512"`
35 | }
36 |
37 | // New Hashes structure
38 | func New() Hashes {
39 | return Hashes{}
40 | }
41 |
42 | // Update the current Hashes structure
43 | func (h *Hashes) Update(path string) (err error) {
44 | var buffer [4096]byte
45 | file, err := os.Open(path)
46 | if err != nil {
47 | return err
48 | }
49 | defer file.Close()
50 | md5 := md5.New()
51 | sha1 := sha1.New()
52 | sha256 := sha256.New()
53 | sha512 := sha512.New()
54 |
55 | for read, err := file.Read(buffer[:]); err != io.EOF && read != 0; read, err = file.Read(buffer[:]) {
56 | if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
57 | return err
58 | }
59 | md5.Write(buffer[:read])
60 | sha1.Write(buffer[:read])
61 | sha256.Write(buffer[:read])
62 | sha512.Write(buffer[:read])
63 | }
64 |
65 | h.Path = path
66 | h.Md5 = hex.EncodeToString(md5.Sum(nil))
67 | h.Sha1 = hex.EncodeToString(sha1.Sum(nil))
68 | h.Sha256 = hex.EncodeToString(sha256.Sum(nil))
69 | h.Sha512 = hex.EncodeToString(sha512.Sum(nil))
70 | return nil
71 | }
72 |
--------------------------------------------------------------------------------
/fileutils/hash/hash_test.go:
--------------------------------------------------------------------------------
1 | package hash
2 |
3 | import (
4 | "path/filepath"
5 | "sync"
6 | "testing"
7 |
8 | "github.com/0xrawsec/golang-utils/fsutil/fswalker"
9 | "github.com/0xrawsec/golang-utils/log"
10 | )
11 |
12 | func TestHash(t *testing.T) {
13 | log.InitLogger(log.LDebug)
14 | wg := sync.WaitGroup{}
15 | for wi := range fswalker.Walk("../../") {
16 | if wi.Err != nil {
17 | //panic(wi.Err)
18 | }
19 | for _, fileInfo := range wi.Files {
20 | path := filepath.Join(wi.Dirpath, fileInfo.Name())
21 | wg.Add(1)
22 | go func() {
23 | h := New()
24 | log.Info(path)
25 | h.Update(path)
26 | log.Debug(h)
27 | wg.Done()
28 | }()
29 | }
30 | }
31 | wg.Wait()
32 | }
33 |
--------------------------------------------------------------------------------
/fsutil/fsutil.go:
--------------------------------------------------------------------------------
1 | package fsutil
2 |
3 | import (
4 | "errors"
5 | "io"
6 | "os"
7 | "path/filepath"
8 | )
9 |
10 | var (
11 | // ErrSrcNotRegularFile : src not a regular file
12 | ErrSrcNotRegularFile = errors.New("Source file is not a regular file")
13 | // ErrDstNotRegularFile : dst not a regular file
14 | ErrDstNotRegularFile = errors.New("Destination file is not a regular file")
15 | )
16 |
17 | // CopyFile : copies src file to dst file
18 | func CopyFile(src, dst string) (err error) {
19 | srcStats, err := os.Stat(src)
20 | if err != nil {
21 | return
22 | }
23 | if !srcStats.Mode().IsRegular() {
24 | return ErrSrcNotRegularFile
25 | }
26 | dstStats, err := os.Stat(dst)
27 | if err != nil {
28 | if !os.IsNotExist(err) {
29 | return
30 | }
31 | } else {
32 | // The file already exists
33 | if !dstStats.Mode().IsRegular() {
34 | return ErrDstNotRegularFile
35 | }
36 | }
37 | return copyFileContents(src, dst)
38 | }
39 |
40 | func copyFileContents(src, dst string) (err error) {
41 | in, err := os.Open(src)
42 | if err != nil {
43 | return
44 | }
45 | defer in.Close()
46 |
47 | out, err := os.Create(dst)
48 | if err != nil {
49 | return
50 | }
51 | defer out.Close()
52 |
53 | if _, err = io.Copy(out, in); err != nil {
54 | return
55 | }
56 | err = out.Sync()
57 | return
58 | }
59 |
60 | // AbsFromRelativeToBin : function that returns the absolute path to a file/directory
61 | // computed with the directory of the binary as the root
62 | // Example : if bin is /opt/program this function will return an absolute path computed as relative to /opt/
63 | // @relPath : the parts of the path you want to Join in your final path
64 | // return (string, error) : the absolute path and an error if necessary
65 | func AbsFromRelativeToBin(relPath ...string) (string, error) {
66 | rootDirname := filepath.Dir(os.Args[0])
67 | absRootDirname, err := filepath.Abs(rootDirname)
68 | if err != nil {
69 | return "", err
70 | }
71 | return filepath.Join(absRootDirname, filepath.Join(relPath...)), nil
72 | }
73 |
74 | /////////////////////////////// Common utilities ///////////////////////////////
75 |
76 | // IsFile returns true if path is a file
77 | func IsFile(path string) bool {
78 | s, err := os.Stat(path)
79 | if err != nil {
80 | return false
81 | }
82 | return s.Mode().IsRegular()
83 | }
84 |
85 | // IsDir returns true if path is a directory
86 | func IsDir(path string) bool {
87 | s, err := os.Stat(path)
88 | if err != nil {
89 | return false
90 | }
91 | return s.IsDir()
92 | }
93 |
94 | // IsLink returns true if path is a Symlink
95 | func IsLink(path string) bool {
96 | s, err := os.Stat(path)
97 | if err != nil {
98 | return false
99 | }
100 | return (s.Mode()&os.ModeSymlink == os.ModeSymlink)
101 | }
102 |
103 | // Exists returns true if file at path exists
104 | func Exists(path string) bool {
105 | f, err := os.Open(path)
106 | if err != nil {
107 | return false
108 | }
109 | defer f.Close()
110 | return true
111 | }
112 |
113 | // ResolveLink resolves the link if it is a Link or return the original path
114 | func ResolveLink(path string) (string, error) {
115 | if IsLink(path) {
116 | return os.Readlink(path)
117 | }
118 | return path, nil
119 | }
120 |
--------------------------------------------------------------------------------
/fsutil/fswalker/fswalker.go:
--------------------------------------------------------------------------------
1 | package fswalker
2 |
3 | import (
4 | "fmt"
5 | "io/ioutil"
6 | "os"
7 | "path/filepath"
8 |
9 | "github.com/0xrawsec/golang-utils/log"
10 | )
11 |
12 | const (
13 | chanBuffSize = 4096
14 | )
15 |
16 | func check(err error) {
17 | if err != nil {
18 | panic(err)
19 | }
20 | }
21 |
22 | func perror(err error) {
23 | if err != nil {
24 | fmt.Println(err)
25 | }
26 | }
27 |
28 | // WalkItem returned by the Walk method
29 | type WalkItem struct {
30 | Dirpath string
31 | Dirs []os.FileInfo
32 | Files []os.FileInfo
33 | Err error
34 | }
35 |
36 | // NormalizePath normalizes a given path
37 | func NormalizePath(path string) string {
38 | pointer, err := filepath.EvalSymlinks(path)
39 | if err != nil {
40 | return path
41 | }
42 | abs, err := filepath.Abs(pointer)
43 | if err != nil {
44 | return pointer
45 | }
46 | return abs
47 | }
48 |
49 | // Walk : walks recursively through the FS
50 | func Walk(root string) <-chan WalkItem {
51 | // probably more efficient since wait only when chan is full
52 | iterChannel := make(chan WalkItem, chanBuffSize)
53 | dirsAlreadyProcessed := make(map[string]bool)
54 | dirsToProcess := []string{root}
55 | go func() {
56 | for len(dirsToProcess) > 0 {
57 | dirs, files := []os.FileInfo{}, []os.FileInfo{}
58 | dirpath := NormalizePath(dirsToProcess[len(dirsToProcess)-1])
59 | dirsToProcess = dirsToProcess[:len(dirsToProcess)-1]
60 | if _, ok := dirsAlreadyProcessed[dirpath]; !ok {
61 | dirsAlreadyProcessed[dirpath] = true
62 | filesInfo, err := ioutil.ReadDir(NormalizePath(dirpath))
63 | if err != nil {
64 | log.Errorf("Error reading directory (%s): %s\n", err.Error(), dirpath)
65 | } else {
66 | for _, fileInfo := range filesInfo {
67 | switch {
68 | case fileInfo.Mode().IsDir():
69 | dirs = append(dirs, fileInfo)
70 | dirsToProcess = append(dirsToProcess, filepath.Join(dirpath, fileInfo.Name()))
71 | case fileInfo.Mode().IsRegular():
72 | files = append(files, fileInfo)
73 | case fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink:
74 | var pointerFI os.FileInfo
75 | sympath := NormalizePath(filepath.Join(dirpath, fileInfo.Name()))
76 | pointerFI, err = os.Stat(sympath)
77 | if err != nil {
78 | log.Errorf("Error reading symlink (%s): %s\n", err.Error(), sympath)
79 | } else {
80 | switch {
81 | case pointerFI.Mode().IsDir():
82 | dirs = append(dirs, fileInfo)
83 | dirsToProcess = append(dirsToProcess, sympath)
84 | case pointerFI.Mode().IsRegular():
85 | files = append(files, fileInfo)
86 | }
87 | }
88 | }
89 | }
90 | }
91 | iterChannel <- WalkItem{dirpath, dirs, files, err}
92 | }
93 | }
94 | close(iterChannel)
95 | }()
96 | return iterChannel
97 | }
98 |
--------------------------------------------------------------------------------
/fsutil/fswalker/fswalker_test.go:
--------------------------------------------------------------------------------
1 | package fswalker
2 |
3 | import (
4 | "path/filepath"
5 | "testing"
6 | )
7 |
8 | var (
9 | loopDir = "./test/test_dir/"
10 | )
11 |
12 | func TestWalk(t *testing.T) {
13 | for wi := range Walk(loopDir) {
14 | t.Log("Directories")
15 | for _, di := range wi.Dirs {
16 | t.Logf("\t%s", filepath.Join(wi.Dirpath, di.Name()))
17 | }
18 |
19 | t.Log("Files")
20 | for _, fi := range wi.Files {
21 | t.Logf("\t%s", filepath.Join(wi.Dirpath, fi.Name()))
22 | }
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/fsutil/fswalker/test/test_dir/loop:
--------------------------------------------------------------------------------
1 | /home/quentin/Workspace/Go/utils/src/toolbox/fsutil/fswalker/test/test_dir
--------------------------------------------------------------------------------
/fsutil/fswalker/test/test_dir/test.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xrawsec/golang-utils/88c616630ffce91055787a9c6b70445c71e2980e/fsutil/fswalker/test/test_dir/test.txt
--------------------------------------------------------------------------------
/fsutil/logfile/logfile.go:
--------------------------------------------------------------------------------
1 | package logfile
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "os"
7 | "path/filepath"
8 | "strconv"
9 | "strings"
10 | "sync"
11 | "time"
12 |
13 | "github.com/0xrawsec/golang-utils/fileutils"
14 | "github.com/0xrawsec/golang-utils/fsutil"
15 | "github.com/0xrawsec/golang-utils/fsutil/fswalker"
16 | "github.com/0xrawsec/golang-utils/log"
17 | )
18 |
19 | const (
20 | _ = iota // ignore first value by assigning to blank identifier
21 | //KB Kilobytes
22 | KB = 1 << (10 * iota)
23 | //MB Megabytes
24 | MB
25 | //GB Gigabytes
26 | GB
27 | //TB Terabytes
28 | TB
29 |
30 | // DefaultRotationRate defines the default value for rotation refresh rate
31 | DefaultRotationRate = time.Millisecond * 250
32 | )
33 |
34 | // LogFile interface
35 | type LogFile interface {
36 | // Path returns the path of the current LogFile
37 | Path() string
38 | // Rotate ensures file rotation
39 | Rotate() error
40 | // Rotation routine
41 | RotRoutine()
42 | // Write used to write to the LogFile
43 | Write([]byte) (int, error)
44 | // Close used to close the LogFile
45 | Close() error
46 | }
47 |
48 | // BaseLogFile structure definition
49 | type BaseLogFile struct {
50 | sync.Mutex
51 | base string
52 | dir string
53 | path string
54 | //prefix string
55 | perm os.FileMode
56 | file *os.File
57 | writer io.Writer
58 | done chan bool
59 | wg sync.WaitGroup
60 | }
61 |
62 | // Rotate implements LogFile interface
63 | func (b *BaseLogFile) Rotate() (err error) {
64 | b.Lock()
65 | defer b.Unlock()
66 | b.file.Close()
67 |
68 | // First rename all the gzip files
69 | // First find max file index
70 | maxIdx := uint64(0)
71 | for wi := range fswalker.Walk(filepath.Dir(b.path)) {
72 | for _, fi := range wi.Files {
73 | if strings.HasPrefix(fi.Name(), b.base) && strings.HasSuffix(fi.Name(), ".gz") {
74 | ext := strings.TrimLeft(fi.Name(), fmt.Sprintf("%s.", b.base))
75 | sp := strings.SplitN(ext, ".", 2)
76 | if len(sp) == 2 {
77 | id, err := strconv.ParseUint(sp[0], 0, 64)
78 | if err != nil {
79 | log.Info(fi.Name())
80 | log.Errorf("Cannot parse logfile id: %s", err)
81 | }
82 | if id > maxIdx {
83 | maxIdx = id
84 | }
85 | }
86 | }
87 | }
88 | }
89 |
90 | // Actually renaming
91 | for i := maxIdx; i > 0; i-- {
92 | // renaming the zip file
93 | oldf := fmt.Sprintf("%s.%d.gz", b.path, i)
94 | newf := fmt.Sprintf("%s.%d.gz", b.path, i+1)
95 | // because there we do not guarantee that oldf exists due to previous loop
96 | if fsutil.IsFile(oldf) {
97 | if err := os.Rename(oldf, newf); err != nil {
98 | log.Errorf("Failed to rename old logfile: %s", err)
99 | }
100 | }
101 | }
102 |
103 | // Rename basename.1 to basename.2
104 | dot1 := fmt.Sprintf("%s.1", b.path)
105 | dot2 := fmt.Sprintf("%s.2", b.path)
106 | // path to part file to control that we are not already compressing
107 | dot2Part := fmt.Sprintf("%s.2.gz.part", b.path)
108 |
109 | // Should not happen but that's a precaution step not to overwrite dot2
110 | // without knowing it
111 | if fsutil.IsFile(dot2) && !fsutil.IsFile(dot2Part) {
112 | if err := fileutils.GzipFile(dot2); err != nil {
113 | log.Errorf("Failed to gzip LogFile: %s", err)
114 | }
115 | }
116 |
117 | if fsutil.IsFile(dot1) {
118 | if err := os.Rename(dot1, dot2); err != nil {
119 | log.Errorf("Failed to rename old file: %s", err)
120 | } else {
121 | // Start a routine to gzip dot2
122 | b.wg.Add(1)
123 | go func() {
124 | defer b.wg.Done()
125 | if fsutil.IsFile(dot2) && !fsutil.IsFile(dot2Part) {
126 | if err := fileutils.GzipFile(dot2); err != nil {
127 | log.Errorf("Failed to gzip LogFile: %s", err)
128 | }
129 | }
130 | }()
131 | }
132 | }
133 |
134 | // Move current to basename.1
135 | if err := os.Rename(b.path, dot1); err != nil {
136 | log.Errorf("Failed to rename old file: %s", err)
137 | }
138 |
139 | b.file, err = os.OpenFile(b.path, os.O_APPEND|os.O_CREATE|os.O_RDWR, b.perm)
140 | b.writer = b.file
141 | //l.timer.Reset(l.rotationDelay)
142 | return err
143 | }
144 |
145 | // Write implements LogFile interface
146 | func (b *BaseLogFile) Write(p []byte) (int, error) {
147 | b.Lock()
148 | defer b.Unlock()
149 | return b.writer.Write(p)
150 | }
151 |
152 | // WriteString implements LogFile interface
153 | func (b *BaseLogFile) WriteString(s string) (int, error) {
154 | return b.Write([]byte(s))
155 | }
156 |
157 | // Path implements LogFile interface
158 | func (b *BaseLogFile) Path() string {
159 | return b.path
160 | }
161 |
162 | // Close implements LogFile interface
163 | func (b *BaseLogFile) Close() error {
164 | // don't need to l.Lock here since l.done
165 | // will play the role of the semaphore
166 | // i.e. we go on next instruction only when bool is pulled
167 | // from l.done
168 | b.done <- true
169 | b.wg.Wait()
170 | return nil
171 | }
172 |
173 | // TimeRotateLogFile structure definition.
174 | // A TimeRotateLogFile rotates at whenever rotation delay expires.
175 | // The current file being used is in plain-text. Whenever the rotation
176 | // happens, the file is GZIP-ed to save space on disk. A delay can be
177 | // specified in order to wait before the file is compressed.
178 | type TimeRotateLogFile struct {
179 | BaseLogFile
180 | rotationDelay time.Duration
181 | timer *time.Timer
182 | }
183 |
184 | // OpenTimeRotateLogFile opens a new TimeRotateLogFile drot controls
185 | // the rotation delay and dgzip the time to wait before the latest file is GZIPed
186 | func OpenTimeRotateLogFile(path string, perm os.FileMode, drot time.Duration) (l *TimeRotateLogFile, err error) {
187 |
188 | l = &TimeRotateLogFile{}
189 | // BaseLogfile fields
190 | l.base = filepath.Base(path)
191 | l.dir = filepath.Dir(path)
192 | l.path = path
193 | l.perm = perm
194 | l.wg = sync.WaitGroup{}
195 | l.done = make(chan bool)
196 |
197 | // TimeRotateLogFile fields
198 |
199 | // initializes l.timer so that it is aware of the
200 | // previous logfile writes
201 | if s, e := os.Stat(l.path); e == nil {
202 | firstRot := time.Now().Sub(s.ModTime())
203 | switch {
204 | case firstRot < 0:
205 | l.timer = time.NewTimer(drot)
206 | case firstRot > drot:
207 | // basically tell to rotate now
208 | l.timer = time.NewTimer(0)
209 | default:
210 | // first rotate at
211 | l.timer = time.NewTimer(firstRot)
212 | }
213 | } else {
214 | l.timer = time.NewTimer(drot)
215 | }
216 |
217 | l.rotationDelay = drot
218 |
219 | l.file, err = os.OpenFile(l.path, os.O_APPEND|os.O_CREATE|os.O_RDWR, l.perm)
220 |
221 | if err != nil {
222 | return
223 | }
224 |
225 | l.writer = l.file
226 |
227 | // Go routine responsible for log rotation
228 | l.wg.Add(1)
229 | go l.RotRoutine()
230 |
231 | return
232 | }
233 |
234 | // RotRoutine implements LogFile
235 | func (l *TimeRotateLogFile) RotRoutine() {
236 | defer l.wg.Done()
237 | for {
238 | select {
239 | case <-l.done:
240 | l.file.Close()
241 | return
242 | case <-l.timer.C:
243 | if err := l.Rotate(); err != nil {
244 | log.Errorf("Failed LogFile rotation: %s", err)
245 | }
246 | l.timer.Reset(l.rotationDelay)
247 | }
248 | time.Sleep(time.Millisecond * 500)
249 | }
250 | }
251 |
252 | // Close implements LogFile interface
253 | func (l *TimeRotateLogFile) Close() error {
254 | // don't need to l.Lock here since l.done
255 | // will play the role of the semaphore
256 | // i.e. we go on next instruction only when bool is pulled
257 | // from l.done
258 | l.done <- true
259 | // timer needs to be stopped not to try to Rotate while
260 | // some member have been uninitialized
261 | l.timer.Stop()
262 | l.wg.Wait()
263 |
264 | return nil
265 | }
266 |
267 | // SizeRotateLogFile structure definition
268 | // A SizeRotateLogFile is a GZIP compressed file which rotates automatically
269 | type SizeRotateLogFile struct {
270 | BaseLogFile
271 | size int64
272 | }
273 |
274 | // OpenSizeRotateLogFile opens a new log file for logging rotating
275 | // according to its own size
276 | func OpenSizeRotateLogFile(path string, perm os.FileMode, size int64) (*SizeRotateLogFile, error) {
277 | l := SizeRotateLogFile{}
278 | l.base = filepath.Base(path)
279 | l.dir = filepath.Dir(path)
280 | l.path = path
281 | l.perm = perm
282 | l.wg = sync.WaitGroup{}
283 | l.done = make(chan bool)
284 | // fields specific to SizeRotateLogFile
285 | l.size = size
286 |
287 | // Open the file descriptor
288 | f, err := os.OpenFile(l.Path(), os.O_APPEND|os.O_CREATE|os.O_RDWR, l.perm)
289 | if err != nil {
290 | return nil, err
291 | }
292 |
293 | l.file = f
294 | l.writer = l.file
295 | // We start the rotate routine
296 |
297 | l.wg.Add(1)
298 | go l.RotRoutine()
299 | return &l, nil
300 | }
301 |
302 | // RotRoutine implements LogFile
303 | func (l *SizeRotateLogFile) RotRoutine() {
304 | defer l.wg.Done()
305 | for {
306 | select {
307 | case <-l.done:
308 | l.file.Close()
309 | return
310 | default:
311 | if stats, err := os.Stat(l.path); err == nil {
312 | if stats.Size() >= l.size {
313 | l.Rotate()
314 | }
315 | }
316 | }
317 | time.Sleep(time.Millisecond * 500)
318 | }
319 | }
320 |
--------------------------------------------------------------------------------
/fsutil/logfile/logfile_test.go:
--------------------------------------------------------------------------------
1 | package logfile
2 |
3 | import (
4 | "crypto/rand"
5 | "fmt"
6 | "os"
7 | "path/filepath"
8 | "testing"
9 | "time"
10 | )
11 |
12 | var (
13 | dir = filepath.Join("test", "output")
14 | path = filepath.Join(dir, "logfile.log")
15 | )
16 |
17 | func init() {
18 | os.RemoveAll(dir)
19 | os.MkdirAll(dir, 0777)
20 | }
21 |
22 | func TestLogfile(t *testing.T) {
23 | var lf LogFile
24 | size := int64(MB * 10)
25 | lf, err := OpenSizeRotateLogFile(path, 0600, size)
26 |
27 | if err != nil {
28 | t.Fail()
29 | t.Logf("Cannot create logfile: %s", err)
30 | return
31 | }
32 | //lf.(*SizeRotateLogFile).SetRefreshRate(time.Nanosecond * 5)
33 | defer lf.Close()
34 | buff := make([]byte, 10)
35 | lwritten := 0
36 | for i := int64(0); i < size/5; i++ {
37 | rand.Read(buff)
38 | lf.(*SizeRotateLogFile).WriteString(fmt.Sprintf("%q\n", buff))
39 | lwritten++
40 | }
41 | t.Logf("Written %d lines", lwritten)
42 | }
43 |
44 | func TestTimeRotateLFBasic(t *testing.T) {
45 | var lf LogFile
46 | lf, err := OpenTimeRotateLogFile(path, 0600, 1*time.Second)
47 | if err != nil {
48 | t.Fatalf("Failed to create logfile")
49 | t.FailNow()
50 | }
51 | //defer lf.Close()
52 | buff := make([]byte, 10)
53 | lwritten := 0
54 | for i := int64(0); i < 1000; i++ {
55 | if i%500 == 0 {
56 | time.Sleep(1 * time.Second)
57 | }
58 | rand.Read(buff)
59 | if _, err := lf.(*TimeRotateLogFile).Write([]byte(fmt.Sprintf("%q\n", buff))); err != nil {
60 | t.Logf("Error writting: %s", err)
61 | }
62 | lwritten++
63 | }
64 | t.Logf("Written %d lines", lwritten)
65 | time.Sleep(3 * time.Second)
66 | lf.Close()
67 | }
68 |
69 | func TestTimeRotateLFRotateOld(t *testing.T) {
70 | for i := 0; i < 10; i++ {
71 | lf, err := OpenTimeRotateLogFile(path, 0600, 500*time.Millisecond)
72 | if err != nil {
73 | t.Fatalf("Failed to create logfile")
74 | t.FailNow()
75 | }
76 | lf.Close()
77 | time.Sleep(1 * time.Second)
78 | }
79 |
80 | }
81 |
--------------------------------------------------------------------------------
/fsutil/shred/shred.go:
--------------------------------------------------------------------------------
1 | package shred
2 |
3 | import (
4 | "crypto/rand"
5 | "os"
6 | )
7 |
8 | // Shred a file
9 | func Shred(fpath string) error {
10 | stat, err := os.Stat(fpath)
11 | if err != nil {
12 | return err
13 | }
14 |
15 | file, err := os.OpenFile(fpath, os.O_RDWR, 0)
16 | if err != nil {
17 | return err
18 | }
19 | defer file.Close()
20 |
21 | b := make([]byte, stat.Size())
22 | rand.Read(b)
23 | _, err = file.Write(b)
24 |
25 | err = os.Remove(fpath)
26 | if err != nil {
27 | return err
28 | }
29 |
30 | return err
31 | }
32 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/0xrawsec/golang-utils
2 |
3 | go 1.13
4 |
5 | require (
6 | github.com/kr/fs v0.1.0 // indirect
7 | github.com/pkg/errors v0.8.1 // indirect
8 | github.com/pkg/sftp v1.10.0
9 | github.com/stretchr/testify v1.7.1 // indirect
10 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
11 | golang.org/x/tools v0.0.0-20190320215829-36c10c0a621f
12 | )
13 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
2 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
3 | github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
4 | github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
5 | github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
6 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
7 | github.com/pkg/sftp v1.10.0 h1:DGA1KlA9esU6WcicH+P8PxFZOl15O6GYtab1cIJdOlE=
8 | github.com/pkg/sftp v1.10.0/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk=
9 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
10 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
11 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
12 | github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
13 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
14 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
15 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
16 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
17 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
18 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
19 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
20 | golang.org/x/tools v0.0.0-20190320215829-36c10c0a621f h1:1ZEOEQCgHwWeZkEp7AeN0DROZtO+h0NDRxtar5CdyYQ=
21 | golang.org/x/tools v0.0.0-20190320215829-36c10c0a621f/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
22 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
23 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
24 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
25 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
26 |
--------------------------------------------------------------------------------
/log/log.go:
--------------------------------------------------------------------------------
1 | package log
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "os"
7 | "runtime/debug"
8 | "strings"
9 | )
10 |
11 | const (
12 | // LDebug log level
13 | LDebug = 1
14 | // LInfo log level
15 | LInfo = 1 << 1
16 | // LError log level
17 | LError = 1 << 2
18 | // LCritical log level
19 | LCritical = 1 << 3
20 | defaultFileMode = 0640
21 | )
22 |
23 | var (
24 | gLogLevel = LInfo
25 | gLogLevelBackup = gLogLevel
26 |
27 | MockAbort = false
28 | )
29 |
30 | func init() {
31 | //gLogger.Set
32 | InitLogger(LInfo)
33 | }
34 |
35 | // InitLogger Initialize the global logger
36 | func InitLogger(logLevel int) {
37 | SetLogLevel(logLevel)
38 | if logLevel <= LDebug {
39 | log.SetFlags(log.LstdFlags | log.Lshortfile)
40 | }
41 | }
42 |
43 | // SetLogfile sets output file to put logging messages
44 | func SetLogfile(logfilePath string, opts ...os.FileMode) {
45 | var err error
46 | mode := os.FileMode(defaultFileMode)
47 | // We open the file in append mode
48 | if len(opts) > 0 {
49 | mode = opts[0]
50 | }
51 | gLogFile, err := os.OpenFile(logfilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, mode)
52 | if err != nil {
53 | panic(err)
54 | }
55 | if _, err := gLogFile.Seek(0, os.SEEK_END); err != nil {
56 | panic(err)
57 | }
58 | log.SetOutput(gLogFile)
59 | }
60 |
61 | // SetLogLevel backup gLoglevel and set gLogLevel to logLevel
62 | func SetLogLevel(logLevel int) {
63 | gLogLevelBackup = gLogLevel
64 | switch logLevel {
65 | case LInfo:
66 | gLogLevel = logLevel
67 | case LDebug:
68 | gLogLevel = logLevel
69 | case LCritical:
70 | gLogLevel = logLevel
71 | case LError:
72 | gLogLevel = logLevel
73 | default:
74 | gLogLevel = LInfo
75 | }
76 | }
77 |
78 | // RestoreLogLevel restore gLogLevel to gLogLevelBackup
79 | func RestoreLogLevel() {
80 | gLogLevel = gLogLevelBackup
81 | }
82 |
83 | func logMessage(prefix string, i ...interface{}) {
84 | format := fmt.Sprintf("%s%s", prefix, strings.Repeat("%v ", len(i)))
85 | msg := fmt.Sprintf(format, i...)
86 | log.Output(3, msg)
87 | }
88 |
89 | // Info log message if gLogLevel <= LInfo
90 | func Info(i ...interface{}) {
91 | if gLogLevel <= LInfo {
92 | logMessage("INFO - ", i...)
93 | }
94 | }
95 |
96 | // Infof log message with format if gLogLevel <= LInfo
97 | func Infof(format string, i ...interface{}) {
98 | if gLogLevel <= LInfo {
99 | logMessage("INFO - ", fmt.Sprintf(format, i...))
100 | }
101 | }
102 |
103 | // Warning log message if gLogLevel <= LInfo
104 | func Warn(i ...interface{}) {
105 | if gLogLevel <= LInfo {
106 | logMessage("WARNING - ", i...)
107 | }
108 | }
109 |
110 | // Warnf log message with format if gLogLevel <= LInfo
111 | func Warnf(format string, i ...interface{}) {
112 | if gLogLevel <= LInfo {
113 | logMessage("WARNING - ", fmt.Sprintf(format, i...))
114 | }
115 | }
116 |
117 | // Debug log message if gLogLevel <= LDebug
118 | func Debug(i ...interface{}) {
119 | if gLogLevel <= LDebug {
120 | logMessage("DEBUG - ", i...)
121 | }
122 | }
123 |
124 | // Debugf log message with format if gLogLevel <= LDebug
125 | func Debugf(format string, i ...interface{}) {
126 | if gLogLevel <= LDebug {
127 | logMessage("DEBUG - ", fmt.Sprintf(format, i...))
128 | }
129 | }
130 |
131 | // Error log message if gLogLevel <= LError
132 | func Error(i ...interface{}) {
133 | if gLogLevel <= LError {
134 | logMessage("ERROR - ", i...)
135 | }
136 | }
137 |
138 | // Errorf log message with format if gLogLevel <= LError
139 | func Errorf(format string, i ...interface{}) {
140 | if gLogLevel <= LError {
141 | logMessage("ERROR - ", fmt.Sprintf(format, i...))
142 | }
143 | }
144 |
145 | // Abort logs an error and exit with return code
146 | func Abort(rc int, i ...interface{}) {
147 | if gLogLevel <= LError {
148 | logMessage("ABORT - ", i...)
149 | }
150 | if !MockAbort {
151 | os.Exit(rc)
152 | }
153 | }
154 |
155 | // Critical log message if gLogLevel <= LCritical
156 | func Critical(i ...interface{}) {
157 | if gLogLevel <= LCritical {
158 | logMessage("CRITICAL - ", i...)
159 | }
160 | }
161 |
162 | // Criticalf log message with format if gLogLevel <= LCritical
163 | func Criticalf(format string, i ...interface{}) {
164 | if gLogLevel <= LCritical {
165 | logMessage("CRITICAL - ", fmt.Sprintf(format, i...))
166 | }
167 | }
168 |
169 | // DontPanic only prints panic information but don't panic
170 | func DontPanic(i interface{}) {
171 | msg := fmt.Sprintf("%v\n %s", i, debug.Stack())
172 | logMessage("PANIC - ", msg)
173 | }
174 |
175 | // DebugDontPanic only prints panic information but don't panic
176 | func DebugDontPanic(i interface{}) {
177 | if gLogLevel <= LDebug {
178 | msg := fmt.Sprintf("%v\n %s", i, debug.Stack())
179 | logMessage("PANIC - ", msg)
180 | }
181 | }
182 |
183 | // DontPanicf only prints panic information but don't panic
184 | func DontPanicf(format string, i ...interface{}) {
185 | msg := fmt.Sprintf("%v\n %s", fmt.Sprintf(format, i...), debug.Stack())
186 | logMessage("PANIC - ", msg)
187 | }
188 |
189 | // DebugDontPanicf only prints panic information but don't panic
190 | func DebugDontPanicf(format string, i ...interface{}) {
191 | if gLogLevel <= LDebug {
192 | msg := fmt.Sprintf("%v\n %s", fmt.Sprintf(format, i...), debug.Stack())
193 | logMessage("PANIC - ", msg)
194 | }
195 | }
196 |
197 | // Panic prints panic information and call panic
198 | func Panic(i interface{}) {
199 | msg := fmt.Sprintf("%v\n %s", i, debug.Stack())
200 | logMessage("PANIC - ", msg)
201 | panic(i)
202 | }
203 |
--------------------------------------------------------------------------------
/log/log_test.go:
--------------------------------------------------------------------------------
1 | package log
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "testing"
7 | )
8 |
9 | func TestLog(t *testing.T) {
10 | InitLogger(LDebug)
11 | Debug("We are debugging", "this part", "of", "code")
12 | Info("I log into console", "this", "and", "that")
13 | Infof("%s %s", "we print", "a formated string")
14 | Warn("This", "is", "dangerous")
15 | Warnf("%s %s %s", "This", "is", "dangerous formated string")
16 | Error(fmt.Errorf("error encountered in program"), "but also this strange number:", 42)
17 | Errorf("%s %d", "encountered error", 666)
18 | Critical("Dammit", "we are in a bad", errors.New("situation"))
19 | Criticalf("%s %s %s", "Dammit", "we are in a bad", errors.New("situation"))
20 | DontPanic(errors.New("no stress"))
21 | DebugDontPanic(errors.New("no stress"))
22 | DontPanicf("%s %s", "manual say", "we should not panic")
23 | DebugDontPanicf("%s %s", "manual say", "we should not panic")
24 | MockAbort = true
25 | Abort(0, "Aborting because of", fmt.Errorf("error raised by some function"))
26 | }
27 |
--------------------------------------------------------------------------------
/log/test/test.log:
--------------------------------------------------------------------------------
1 | 2017/02/19 20:09:43 INFO - I log into file
2 | 2019/03/20 23:35:26 INFO - I log into file
3 | 2019/03/20 23:37:00 INFO - I log into file
4 | 2021/09/24 15:39:24 INFO - I log into file
5 |
--------------------------------------------------------------------------------
/net/sftp/sftp.go:
--------------------------------------------------------------------------------
1 | package sftp
2 |
3 | import (
4 | "fmt"
5 | "io/ioutil"
6 | "os"
7 | "path/filepath"
8 | "regexp"
9 | "strings"
10 | "unsafe"
11 |
12 | "github.com/0xrawsec/golang-utils/fsutil/fswalker"
13 | "github.com/pkg/sftp"
14 | "golang.org/x/crypto/ssh"
15 | )
16 |
17 | const (
18 | FlagFile int = 1 << iota
19 | FlagDir
20 | )
21 |
22 | // Client
23 | type Client struct {
24 | sftp.Client
25 | }
26 |
27 | func loadPrivateKey(privateKeyPath string) (sss ssh.Signer, err error) {
28 | pemBytes, err := ioutil.ReadFile(privateKeyPath)
29 | if err != nil {
30 | return
31 | }
32 | sss, err = ssh.ParsePrivateKey(pemBytes)
33 | return
34 | }
35 |
36 | // BuildSSHURL builds up a SSH URL to feed ssh
37 | // @host: hostname
38 | // @port: port
39 | func BuildSSHURL(host, port string) string {
40 | return fmt.Sprintf("%s:%s", host, port)
41 | }
42 |
43 | // PrivateKeyAuthMethod returns a ssh.AuthMethod initialized with a private key
44 | // @privateKeyPath: path to the private key to use
45 | func PrivateKeyAuthMethod(privateKeyPath string) ssh.AuthMethod {
46 | sss, err := loadPrivateKey(privateKeyPath)
47 | if err != nil {
48 | panic(err)
49 | }
50 | return ssh.PublicKeys(sss)
51 |
52 | }
53 |
54 | // New returns a new SFTP Client
55 | // @host: hostname to connect to
56 | // @port: port on the hostname
57 | // @username: username to login
58 | // @sams: list of ssh.AuthMethod to use
59 | func New(host, port, username string, sams ...ssh.AuthMethod) (*Client, error) {
60 | config := &ssh.ClientConfig{
61 | User: username,
62 | Auth: sams,
63 | }
64 | client, err := ssh.Dial("tcp", BuildSSHURL(host, port), config)
65 | if err != nil {
66 | return nil, err
67 | }
68 | tmpSftpClient, err := sftp.NewClient(client)
69 | if err != nil {
70 | return nil, err
71 | }
72 | return (*Client)(unsafe.Pointer(tmpSftpClient)), err
73 | }
74 |
75 | // ResolveSymlink resolve a path and returns the path of the file pointed if
76 | // it is a symlink
77 | // @path: path to resolve
78 | func (sc *Client) ResolveSymlink(path string) string {
79 | pointer, err := sc.ReadLink(path)
80 | if err != nil {
81 | return path
82 | }
83 | return pointer
84 | }
85 |
86 | // Walk walks recursively through the SFTP
87 | // @root: root path to start walking through
88 | func (sc *Client) Walk(root string) <-chan fswalker.WalkItem {
89 | iterChannel := make(chan fswalker.WalkItem)
90 | dirsToProcess := []string{root}
91 | go func() {
92 | for len(dirsToProcess) > 0 {
93 | dirs, files := []os.FileInfo{}, []os.FileInfo{}
94 | dirpath := dirsToProcess[len(dirsToProcess)-1]
95 | dirsToProcess = dirsToProcess[:len(dirsToProcess)-1]
96 | filesInfo, err := sc.ReadDir(sc.ResolveSymlink(dirpath))
97 | if err != nil {
98 | fmt.Printf("Error reading directory (%s): %s\n", err.Error(), dirpath)
99 | } else {
100 | for _, fileInfo := range filesInfo {
101 | switch {
102 | case fileInfo.Mode().IsDir():
103 | dirs = append(dirs, fileInfo)
104 | dirsToProcess = append(dirsToProcess, filepath.Join(dirpath, fileInfo.Name()))
105 | case fileInfo.Mode().IsRegular():
106 | files = append(files, fileInfo)
107 | case fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink:
108 | sympath := filepath.Join(dirpath, fileInfo.Name())
109 | pointerFI, err := sc.Stat(sympath)
110 | if err != nil {
111 | fmt.Fprintf(os.Stderr, "Error reading symlink (%s): %s\n", err.Error(), sympath)
112 | } else {
113 | switch {
114 | case pointerFI.Mode().IsDir():
115 | dirs = append(dirs, fileInfo)
116 | dirsToProcess = append(dirsToProcess, sc.Join(dirpath, fileInfo.Name()))
117 | case pointerFI.Mode().IsRegular():
118 | files = append(files, fileInfo)
119 | }
120 | }
121 | }
122 | }
123 | }
124 | iterChannel <- fswalker.WalkItem{Dirpath: dirpath,
125 | Dirs: dirs,
126 | Files: files,
127 | Err: err}
128 | }
129 | close(iterChannel)
130 | }()
131 | return iterChannel
132 | }
133 |
134 | func matchPatterns(str string, cPatterns []*regexp.Regexp) bool {
135 | for _, cPattern := range cPatterns {
136 | if cPattern.MatchString(str) {
137 | return true
138 | }
139 | }
140 | return false
141 | }
142 |
143 | func filter(osfi *[]os.FileInfo, cPatterns []*regexp.Regexp) {
144 | for i := 0; i < len(*osfi); {
145 | if matchPatterns((*osfi)[i].Name(), cPatterns) == false {
146 | *osfi = append((*osfi)[:i], (*osfi)[i+1:]...)
147 | continue
148 | }
149 | i++
150 | }
151 | }
152 |
153 | func (sc *Client) Find(root string, flag int, patterns ...string) (iterChannel chan fswalker.WalkItem) {
154 | var cPatterns []*regexp.Regexp
155 | iterChannel = make(chan fswalker.WalkItem)
156 | for _, pattern := range patterns {
157 | pattern = strings.TrimLeft(pattern, "^")
158 | pattern = strings.TrimRight(pattern, "$")
159 | cPattern, err := regexp.Compile(fmt.Sprintf("^%s$", pattern))
160 | if err == nil {
161 | cPatterns = append(cPatterns, cPattern)
162 | }
163 | }
164 | go func() {
165 | for wi := range sc.Walk(root) {
166 | if wi.Err != nil {
167 | iterChannel <- wi
168 | } else {
169 | switch flag {
170 | case FlagFile:
171 | wi.Dirs = []os.FileInfo{}
172 | filter(&wi.Files, cPatterns)
173 | case FlagDir:
174 | wi.Files = []os.FileInfo{}
175 | filter(&wi.Dirs, cPatterns)
176 | case FlagFile | FlagDir:
177 | filter(&wi.Dirs, cPatterns)
178 | filter(&wi.Files, cPatterns)
179 | }
180 | iterChannel <- wi
181 | }
182 | }
183 | close(iterChannel)
184 | }()
185 | return
186 | }
187 |
--------------------------------------------------------------------------------
/net/sftp/sftp_test.go:
--------------------------------------------------------------------------------
1 | package sftp
2 |
3 | import "testing"
4 |
5 | func TestSftp(t *testing.T) {
6 | t.Log("We do nothing")
7 | }
8 |
--------------------------------------------------------------------------------
/ngram/ngram.go:
--------------------------------------------------------------------------------
1 | package ngram
2 |
3 | import (
4 | "errors"
5 | "hash/fnv"
6 | "io"
7 | "os"
8 | )
9 |
10 | type Ngram []byte
11 |
12 | type FastGenerator struct {
13 | Init bool
14 | Reader io.ReadSeeker
15 | Ngram Ngram
16 | }
17 |
18 | // ErrBadNgramSize is raised when the ngram size is not in the correct range
19 | var ErrBadNgramSize = errors.New("ErrBadNgramSize: ngram size must be in ]0;MAXINT]")
20 |
21 | // New new ngram from buffer
22 | func NewNgram(buf []byte) Ngram {
23 | ngram := make(Ngram, len(buf))
24 | copy(ngram, buf)
25 | return ngram
26 | }
27 |
28 | // Hash hashes a ngram
29 | func (ngram *Ngram) Hash() uint64 {
30 | h := fnv.New64a()
31 | h.Write((*ngram)[:])
32 | return h.Sum64()
33 | }
34 |
35 | // Generator generates Ngrams of size sNgram from a file
36 | func Generator(reader io.Reader, sNgram int) chan Ngram {
37 | if sNgram <= 0 {
38 | panic(ErrBadNgramSize)
39 | }
40 | yield := make(chan Ngram)
41 |
42 | go func() {
43 | feed(yield, reader, sNgram)
44 | }()
45 |
46 | return yield
47 | }
48 |
49 | func feed(generator chan Ngram, reader io.Reader, sNgram int) {
50 | defer close(generator)
51 | buf := make([]byte, 4096)
52 | ngram := make(Ngram, sNgram)
53 | read, err := reader.Read(ngram)
54 | if read < sNgram {
55 | return
56 | }
57 | switch err {
58 | case io.EOF, nil:
59 | generator <- NewNgram(ngram)
60 | default:
61 | panic(err)
62 | }
63 | for read, err := reader.Read(buf); err != io.EOF; read, err = reader.Read(buf) {
64 | for i := 0; i < read; i++ {
65 | copy(ngram, ngram[1:])
66 | ngram[len(ngram)-1] = buf[i]
67 | generator <- NewNgram(ngram)
68 | }
69 | }
70 | }
71 |
72 | func NewFastGenerator(reader io.ReadSeeker, sNgram int) (fg FastGenerator) {
73 | fg.Init = true
74 | fg.Reader = reader
75 | fg.Ngram = make(Ngram, sNgram)
76 | return fg
77 | }
78 |
79 | func (fg *FastGenerator) Next() (err error) {
80 | if !fg.Init {
81 | fg.Reader.Seek(int64(-(len(fg.Ngram) - 1)), os.SEEK_CUR)
82 | } else {
83 | fg.Init = false
84 | }
85 | read, err := fg.Reader.Read(fg.Ngram)
86 | if read < len(fg.Ngram) {
87 | return ErrBadNgramSize
88 | }
89 | if err != nil {
90 | return err
91 | }
92 | return nil
93 | }
94 |
--------------------------------------------------------------------------------
/ngram/ngram_test.go:
--------------------------------------------------------------------------------
1 | package ngram
2 |
3 | import (
4 | "bytes"
5 | "io/ioutil"
6 | "os"
7 | "testing"
8 | )
9 |
10 | var (
11 | SizeNgram = 3
12 | TestFile = "./test/1M.bin"
13 | )
14 |
15 | func BenchmarkNgramOnFile(b *testing.B) {
16 | for i := 0; i < b.N; i++ {
17 | file, err := os.Open(TestFile)
18 | if err != nil {
19 | panic(err)
20 | }
21 | ng := Generator(file, SizeNgram)
22 | for _ = range ng {
23 | //fmt.Println(string(ngram))
24 | }
25 | }
26 | }
27 |
28 | func BenchmarkFastOnFile(b *testing.B) {
29 | for i := 0; i < b.N; i++ {
30 | file, err := os.Open(TestFile)
31 | if err != nil {
32 | panic(err)
33 | }
34 | fng := NewFastGenerator(file, SizeNgram)
35 | for err := fng.Next(); err == nil; err = fng.Next() {
36 | //fmt.Println(string(ngram))
37 | //fmt.Println(fng.Ngram)
38 | }
39 | }
40 | }
41 |
42 | func BenchmarkNgramOnData(b *testing.B) {
43 | for i := 0; i < b.N; i++ {
44 | dat, err := ioutil.ReadFile(TestFile)
45 | if err != nil {
46 | panic(err)
47 | }
48 | ng := Generator(bytes.NewReader(dat), SizeNgram)
49 | for _ = range ng {
50 | //fmt.Println(string(ngram))
51 | }
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/progress/progress_test.go:
--------------------------------------------------------------------------------
1 | package progress
2 |
3 | import (
4 | "fmt"
5 | "path/filepath"
6 | "testing"
7 |
8 | "github.com/0xrawsec/golang-utils/fsutil/fswalker"
9 | )
10 |
11 | func TestProgress(t *testing.T) {
12 | progress := New(80)
13 | progress.SetPre("This is a test")
14 | for wi := range fswalker.Walk("../../../") {
15 | if wi.Err != nil {
16 | //panic(wi.Err)
17 | }
18 | for _, fileInfo := range wi.Files {
19 | progress.Update(filepath.Join(wi.Dirpath, fileInfo.Name()))
20 | progress.Print()
21 | }
22 | }
23 | fmt.Print("\n")
24 | }
25 |
--------------------------------------------------------------------------------
/progress/sprogress.go:
--------------------------------------------------------------------------------
1 | package progress
2 |
3 | import (
4 | "bufio"
5 | "errors"
6 | "fmt"
7 | "math"
8 | "os"
9 | )
10 |
11 | const ellipse string = "[...]"
12 |
13 | // ErrBadSize is generated if the string size of the progress is not valid
14 | var ErrBadSize error = errors.New("ErrBadSize: progress message size not allowed")
15 |
16 | // Progress structure
17 | type Progress struct {
18 | pre string
19 | state string
20 | message string
21 | sOutput int
22 | }
23 |
24 | // New Progress structure
25 | func New(optionals ...int) Progress {
26 | if len(optionals) > 0 {
27 | if optionals[0] < 0 {
28 | panic(ErrBadSize)
29 | }
30 | return Progress{pre: "Progress", state: "|", sOutput: optionals[0]}
31 | }
32 | return Progress{pre: "Progress", state: "|"}
33 | }
34 |
35 | // SetPre set the prologue part of the progress message
36 | func (p *Progress) SetPre(newPre string) {
37 | p.pre = newPre
38 | }
39 |
40 | //Update the status of the Progress
41 | func (p *Progress) Update(message string) {
42 | switch p.state {
43 | case "|":
44 | p.state = "/"
45 | case "/":
46 | p.state = "-"
47 | case "-":
48 | p.state = "\\"
49 | case "\\":
50 | p.state = "|"
51 | }
52 | if message != "" {
53 | p.message = message
54 | }
55 | }
56 |
57 | func (p *Progress) String() string {
58 | strProgressInvariant := fmt.Sprintf("%s: %s", p.pre, p.state)
59 | if len(strProgressInvariant)+len(p.message)+1 <= p.sOutput {
60 | return fmt.Sprintf("%s %s", strProgressInvariant, p.message)
61 | }
62 | sNewMessage := p.sOutput - (len(strProgressInvariant) + len(ellipse) + 1)
63 | if sNewMessage < 0 {
64 | return strProgressInvariant
65 | }
66 | limit := int(math.Floor(float64(sNewMessage) / 2))
67 | p.message = p.message[:limit] + ellipse + p.message[len(p.message)-limit:]
68 | if len(fmt.Sprintf("%s %s", strProgressInvariant, p.message)) > p.sOutput {
69 | panic(ErrBadSize)
70 | }
71 | return fmt.Sprintf("%s %s", strProgressInvariant, p.message)
72 | }
73 |
74 | // Print Progress structure on stderr or stdout if the first optional argument
75 | // is true
76 | func (p *Progress) Print(optionals ...bool) {
77 | stream := os.Stderr
78 | if len(optionals) > 0 {
79 | if optionals[0] {
80 | stream = os.Stdout
81 | }
82 | }
83 | f := bufio.NewWriter(stream)
84 | defer f.Flush()
85 | f.WriteString(fmt.Sprintf("% *s\r", p.sOutput, ""))
86 | f.WriteString(p.String() + "\r")
87 | }
88 |
--------------------------------------------------------------------------------
/readers/readers.go:
--------------------------------------------------------------------------------
1 | package readers
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "fmt"
7 | "io"
8 | "os"
9 | "unicode/utf16"
10 | "unicode/utf8"
11 | "unsafe"
12 | )
13 |
14 | // ReverseReader reads bytes from the end of reader
15 | type ReverseReader struct {
16 | rs io.ReadSeeker
17 | size int64
18 | offset int64
19 | }
20 |
21 | // NewReverseReader creates a new ReverseReader
22 | func NewReverseReader(rs io.ReadSeeker) *ReverseReader {
23 | rr := ReverseReader{}
24 | rr.rs = rs
25 | s, err := rs.Seek(0, os.SEEK_END)
26 | if err != nil {
27 | panic(err)
28 | }
29 | rr.size = s
30 | rr.offset = s
31 | return &rr
32 | }
33 |
34 | // Read implements io.Reader interface
35 | // very likely bad performances due to the seeks
36 | func (r *ReverseReader) Read(p []byte) (n int, err error) {
37 | //fmt.Printf("Offset %d\n", r.offset)
38 | //fmt.Printf("Reading %d bytes\n", len(p))
39 | switch {
40 | case r.offset <= 0:
41 | return 0, io.EOF
42 | case r.offset-int64(len(p)) <= 0:
43 | r.rs.Seek(0, os.SEEK_SET)
44 | n, err = r.rs.Read(p[:r.offset])
45 | r.rs.Seek(0, os.SEEK_SET)
46 | r.offset = 0
47 | return n, nil
48 | default:
49 | r.offset -= int64(len(p))
50 | r.rs.Seek(r.offset, os.SEEK_SET)
51 | n, err = r.rs.Read(p)
52 | r.rs.Seek(r.offset, os.SEEK_SET)
53 | return
54 | }
55 | }
56 |
57 | // ReadRune reads a rune backward
58 | func (r *ReverseReader) ReadRune() (ru rune, size int, err error) {
59 | var rb [4]byte
60 | n, err := r.Read(rb[:])
61 | ru, size = utf8.DecodeLastRune(rb[:n])
62 | if err != nil {
63 | return
64 | }
65 | r.offset += int64(n - size)
66 | r.rs.Seek(r.offset, os.SEEK_SET)
67 | if ru == utf8.RuneError {
68 | return ru, size, fmt.Errorf("RuneError")
69 | }
70 | return
71 | }
72 |
73 | /***************************************************/
74 | /************** Readlines functions ****************/
75 | /***************************************************/
76 |
77 | func min(i, k int) int {
78 | switch {
79 | case i < k:
80 | return i
81 | case k < i:
82 | return k
83 | default:
84 | return k
85 | }
86 | }
87 |
88 | func reversedCopy(dst, src []byte) {
89 | m := min(len(src), len(dst))
90 | for i, k := 0, m-1; i < m; i++ {
91 | dst[i] = src[k]
92 | k--
93 | }
94 | }
95 |
96 | // ReversedReadlines returns the lines found in a reader in reversed order
97 | func ReversedReadlines(r io.ReadSeeker) (lines chan []byte) {
98 | lines = make(chan []byte)
99 | go func() {
100 | defer close(lines)
101 |
102 | var c [1]byte
103 | rr := NewReverseReader(r)
104 | line := make([]byte, 0, 4096)
105 | for n, err := rr.Read(c[:]); err != io.EOF && n != 0; n, err = rr.Read(c[:]) {
106 | if c[0] == '\n' {
107 | cpLine := make([]byte, len(line))
108 | reversedCopy(cpLine, line)
109 | lines <- cpLine
110 | line = make([]byte, 0, 4096)
111 | } else {
112 | // don't append newline
113 | line = append(line, c[0])
114 | }
115 | }
116 | // process the last line
117 | cpLine := make([]byte, len(line))
118 | reversedCopy(cpLine, line)
119 | lines <- cpLine
120 | }()
121 | return
122 | }
123 |
124 | // Readlines : returns a channel containing the lines of the reader
125 | func Readlines(reader io.Reader) (generator chan []byte) {
126 | generator = make(chan []byte)
127 | go func() {
128 | defer close(generator)
129 | lreader := bufio.NewReader(reader)
130 | for line, isPrefix, err := lreader.ReadLine(); err != io.EOF; {
131 | fullLine := make([]byte, len(line))
132 | copy(fullLine, line)
133 | for isPrefix == true {
134 | line, isPrefix, err = lreader.ReadLine()
135 | fullLine = append(fullLine, line...)
136 | }
137 | generator <- fullLine
138 | line, isPrefix, err = lreader.ReadLine()
139 | }
140 | }()
141 | return generator
142 | }
143 |
144 | // ReadlinesUTF16 : returns a channel of []byte lines trimmed. To be used with a reader containing UTF16 encoded runes
145 | func ReadlinesUTF16(reader io.Reader) (generator chan []byte) {
146 | generator = make(chan []byte)
147 | go func() {
148 | var utf16Rune [2]byte
149 | var r rune
150 | var u [1]uint16
151 |
152 | // closing generator
153 | defer close(generator)
154 |
155 | line := make([]byte, 0, 4096)
156 | i := 0
157 | for read, err := reader.Read(utf16Rune[:]); read == 2 && err == nil; {
158 | u[0] = *(*uint16)(unsafe.Pointer(&utf16Rune[0]))
159 | // Filter out UTF16 BOM
160 | if u[0] == 0xfeff && i == 0 {
161 | goto tail
162 | }
163 |
164 | r = utf16.Decode(u[:])[0]
165 | if r == '\n' {
166 | fullLine := make([]byte, len(line))
167 | copy(fullLine, line)
168 | fullLine = bytes.TrimRight(fullLine, "\r\n")
169 | generator <- fullLine
170 | line = make([]byte, 0, 4096)
171 | } else {
172 | line = append(line, byte(r))
173 | }
174 |
175 | tail:
176 | read, err = reader.Read(utf16Rune[:])
177 | i++
178 |
179 | }
180 | }()
181 | return generator
182 | }
183 |
--------------------------------------------------------------------------------
/readers/reverse_test.go:
--------------------------------------------------------------------------------
1 | package readers
2 |
3 | import (
4 | "io"
5 | "os"
6 | "strings"
7 | "testing"
8 |
9 | "text/scanner"
10 | )
11 |
12 | const (
13 | text = `This is a text containing
14 | lines that should be
15 | printed in reversed
16 | order.`
17 |
18 | testfile = "../LICENSE"
19 | testfileCharCnt = 35141
20 | testfileLineCnt = 674
21 | )
22 |
23 | func TestReverseReaderBasic(t *testing.T) {
24 | var c [4]byte
25 | r := strings.NewReader(text)
26 | rr := NewReverseReader(r)
27 | for _, err := rr.Read(c[:]); err != io.EOF; _, err = rr.Read(c[:]) {
28 | t.Logf("%q", c)
29 | }
30 | }
31 |
32 | func TestReverseReaderReadRune(t *testing.T) {
33 | r := strings.NewReader(text)
34 | rr := NewReverseReader(r)
35 | for ru, _, err := rr.ReadRune(); err != io.EOF; ru, _, err = rr.ReadRune() {
36 | t.Logf("%q", ru)
37 | }
38 | }
39 |
40 | func TestReversedReadline(t *testing.T) {
41 | r := strings.NewReader(text)
42 | for line := range ReversedReadlines(r) {
43 | t.Log(string(line))
44 | }
45 | }
46 |
47 | func TestReversedReadlineOnFile(t *testing.T) {
48 | var lCnt, cCnt int
49 | fd, err := os.Open(testfile)
50 | if err != nil {
51 | t.Logf("Failed to open test file: %s", testfile)
52 | t.FailNow()
53 | }
54 | defer fd.Close()
55 | for line := range ReversedReadlines(fd) {
56 | // +1 for \n removed
57 | cCnt += len(line) + 1
58 | lCnt++
59 | }
60 | t.Logf("#char: %d #lines: %d", cCnt, lCnt)
61 | // we add one to the original counters because last line of the file is counted
62 | // as a line while it is not really since there is no \n
63 | if lCnt != testfileLineCnt+1 || cCnt != testfileCharCnt+1 {
64 | t.Log("Bad number of lines or characters")
65 | t.FailNow()
66 | }
67 | }
68 |
69 | func TestReverseReaderAdvanced(t *testing.T) {
70 | r := strings.NewReader(text)
71 | rr := NewReverseReader(r)
72 | s := scanner.Scanner{}
73 | s.Init(rr)
74 | s.Whitespace = 0
75 | s.Whitespace ^= 0x1 << '\n'
76 | for ru := s.Scan(); ru != scanner.EOF; ru = s.Scan() {
77 | switch ru {
78 | case '\n':
79 | t.Logf("%q", s.TokenText())
80 | default:
81 | t.Logf("%q", s.TokenText())
82 | }
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/regexp/submatch/submatch.go:
--------------------------------------------------------------------------------
1 | package submatch
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "reflect"
7 | "regexp"
8 | "strconv"
9 | "time"
10 | "unsafe"
11 | )
12 |
13 | // Helper structure definition
14 | type Helper struct {
15 | IndexMap map[string]int
16 | timeLayout string
17 | regex *regexp.Regexp
18 | submatch [][]byte
19 | }
20 |
21 | var (
22 | ErrNoSuchKey = errors.New("no such key")
23 | ErrIndexOutOfRange = errors.New("index out of range")
24 | ErrUnparsableDestinationType = errors.New("unknown destination type")
25 | ErrNotValidPtr = errors.New("not valid pointer")
26 | TimeType = reflect.ValueOf(time.Time{}).Type()
27 | )
28 |
29 | type FieldNotSetError struct {
30 | Field string
31 | }
32 |
33 | func (fnse FieldNotSetError) Error() string {
34 | return fmt.Sprintf("Cannot set field: %s", fnse.Field)
35 | }
36 |
37 | // NewHelper : creates a new submatch helper from a regexp struct
38 | func NewHelper(r *regexp.Regexp) (sm Helper) {
39 | sm.IndexMap = make(map[string]int)
40 | for i, name := range r.SubexpNames() {
41 | sm.IndexMap[name] = i
42 | }
43 | sm.timeLayout = time.RFC3339
44 | sm.regex = r
45 | return
46 | }
47 |
48 | // Prepare : this method must be called on any []byte/string you
49 | // want the helper to work on. It basically apply regex.Regexp.FindSubmatch
50 | // on b and initializes internal helper member for further processing.
51 | func (sh *Helper) Prepare(b []byte) {
52 | sh.submatch = sh.regex.FindSubmatch(b)
53 | }
54 |
55 | // SetTimeLayout : setter for timeLayout field of SubmatchHelper
56 | // to properly parse timestamps
57 | func (sh *Helper) SetTimeLayout(layout string) {
58 | sh.timeLayout = layout
59 | }
60 |
61 | func strParse(s *string, k reflect.Kind) (interface{}, error) {
62 | switch k {
63 | // String
64 | case reflect.String:
65 | // return a copy of the string
66 | return string(*s), nil
67 | // Uints
68 | case reflect.Uint8:
69 | conv, err := strconv.ParseUint(*s, 10, 8)
70 | return uint8(conv), err
71 | case reflect.Uint16:
72 | conv, err := strconv.ParseUint(*s, 10, 16)
73 | return uint16(conv), err
74 | case reflect.Uint32:
75 | conv, err := strconv.ParseUint(*s, 10, 32)
76 | return uint32(conv), err
77 | case reflect.Uint64:
78 | return strconv.ParseUint(*s, 10, 64)
79 | case reflect.Uint:
80 | conv, err := strconv.ParseUint(*s, 10, 8*int(unsafe.Sizeof(uint(0))))
81 | return uint(conv), err
82 | // Ints
83 | case reflect.Int8:
84 | conv, err := strconv.ParseInt(*s, 10, 8)
85 | return int8(conv), err
86 | case reflect.Int16:
87 | conv, err := strconv.ParseInt(*s, 10, 16)
88 | return int16(conv), err
89 | case reflect.Int32:
90 | conv, err := strconv.ParseInt(*s, 10, 32)
91 | return int32(conv), err
92 | case reflect.Int64:
93 | return strconv.ParseInt(*s, 10, 64)
94 | case reflect.Int:
95 | conv, err := strconv.ParseInt(*s, 10, 8*int(unsafe.Sizeof(int(0))))
96 | return int(conv), err
97 | // Floats
98 | case reflect.Float32:
99 | conv, err := strconv.ParseFloat(*s, 32)
100 | return float32(conv), err
101 | case reflect.Float64:
102 | return strconv.ParseFloat(*s, 64)
103 | // Bool
104 | case reflect.Bool:
105 | return strconv.ParseBool(*s)
106 | }
107 | return "", ErrUnparsableDestinationType
108 | }
109 |
110 | // Unmarshal : unmarshal the data found by the Helper's regexp into v.
111 | // Helper needs to be prepared first through the Prepare function.
112 | func (sh *Helper) Unmarshal(v interface{}) error {
113 | rv := reflect.ValueOf(v)
114 | if rv.Kind() != reflect.Ptr || rv.IsNil() {
115 | return ErrNotValidPtr
116 | }
117 | s := rv.Elem()
118 | t := s.Type()
119 | for i := 0; i < t.NumField(); i++ {
120 | field := t.Field(i)
121 |
122 | if !field.IsExported() {
123 | continue
124 | }
125 |
126 | // Unmarshal recursively if field of struct is a struct
127 | if s.Field(i).Kind() == reflect.Struct && s.Field(i).Type().Name() != TimeType.Name() {
128 | if err := sh.Unmarshal(s.Field(i).Addr().Interface()); err != nil {
129 | return err
130 | }
131 | } else {
132 | // We get the value in the tag named regexp
133 | key := field.Tag.Get("regexp")
134 | if key == "" {
135 | // If tag does not exist, we take field name
136 | key = field.Name
137 | }
138 | // Get the matched value and update the interface if necessary
139 | b, err := sh.GetBytes(key)
140 | switch err {
141 | case nil:
142 | var cast interface{}
143 | str := string(b)
144 | switch {
145 | case s.Field(i).Type().Name() == TimeType.Name() && s.Field(i).Kind() == reflect.Struct:
146 | cast, err = time.Parse(sh.timeLayout, str)
147 | default:
148 | cast, err = strParse(&str, s.Field(i).Kind())
149 | }
150 | s.Field(i).Set(reflect.ValueOf(cast))
151 | if err != nil {
152 | return err
153 | }
154 | case ErrNoSuchKey:
155 | // It means we cannot set the field
156 | //return FieldNotSetError{field.Name}
157 | default:
158 | return err
159 | }
160 | }
161 | }
162 | return nil
163 | }
164 |
165 | // GetBytes : Get the matching []byte (if any) extracted from
166 | // the data matched by the Helper's regexp. Helper needs to be
167 | // prepared using the Prepare function to work properly.
168 | func (sh *Helper) GetBytes(key string) ([]byte, error) {
169 | if i, ok := sh.IndexMap[key]; ok {
170 | if i <= len(sh.submatch) {
171 | return sh.submatch[i], nil
172 | }
173 | return []byte{}, ErrIndexOutOfRange
174 | }
175 | return []byte{}, ErrNoSuchKey
176 | }
177 |
--------------------------------------------------------------------------------
/regexp/submatch/submatch_test.go:
--------------------------------------------------------------------------------
1 | package submatch
2 |
3 | import (
4 | "fmt"
5 | "regexp"
6 | "testing"
7 | "time"
8 | )
9 |
10 | type TestStructure struct {
11 | M1 string `regexp:"m1"`
12 | M2 int8 `regexp:"m2"`
13 | M3 int16 `regexp:"m3"`
14 | M4 time.Time `regexp:"m4"`
15 | }
16 |
17 | func TestGetByte(t *testing.T) {
18 | rex := regexp.MustCompile("(?P.*)")
19 | line := "shouldmatcheverything"
20 | sh := NewHelper(rex)
21 | sh.Prepare([]byte(line))
22 | val, err := sh.GetBytes("test")
23 | t.Logf("Retrieved value: %s", val)
24 | if string(val) != line || err != nil {
25 | if err != nil {
26 | t.Errorf("Failed to retrieve field: %s", err)
27 | }
28 | t.Errorf("Retrieved field value not expected")
29 | }
30 | }
31 |
32 | func TestUnmarshal(t *testing.T) {
33 | rex := regexp.MustCompile("((?P.*?),(?P.*?),(?P.*?),(?P.*),)")
34 | line := fmt.Sprintf("thisisastring,4,42,%s,", time.Now().Format(time.RFC1123Z))
35 | sh := NewHelper(rex)
36 | ts := TestStructure{}
37 | sh.SetTimeLayout(time.RFC1123Z)
38 | sh.Prepare([]byte(line))
39 | err := sh.Unmarshal(&ts)
40 | if err != nil {
41 | t.Errorf("Failed to unmarshal: %s", err)
42 | }
43 | t.Logf("%v", ts)
44 | }
45 |
--------------------------------------------------------------------------------
/runtime/systeminfo/systeminfo.go:
--------------------------------------------------------------------------------
1 | package systeminfo
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "os/exec"
7 | "regexp"
8 | "runtime"
9 | "strings"
10 |
11 | "github.com/0xrawsec/golang-utils/log"
12 | "github.com/0xrawsec/golang-utils/readers"
13 | )
14 |
15 | type SystemInfo struct {
16 | SysLocale string
17 | OSName string
18 | OSVersion string
19 | }
20 |
21 | func (si SystemInfo) String() string {
22 | return fmt.Sprintf("locale: %s; osname: %s; osversion: %s", si.SysLocale, si.OSName, si.OSVersion)
23 | }
24 |
25 | type SystemInfoGetter interface {
26 | Get() (SystemInfo, error)
27 | }
28 |
29 | // Utility
30 |
31 | func trimString(str string) string {
32 | return strings.Trim(str, `\t\r\n `)
33 | }
34 |
35 | // EmptySystemInfoGetter
36 | type EmptySystemInfoGetter struct{}
37 |
38 | func (esig EmptySystemInfoGetter) Get() (si SystemInfo, err error) {
39 | si.SysLocale = "unk"
40 | si.OSName = "unk"
41 | si.OSVersion = "unk"
42 | return
43 | }
44 |
45 | // WindowsSystemInfoGetter
46 | var (
47 | winOsNameRegexp = regexp.MustCompile(`^OS Name:\s+(?P.*)`)
48 | winOsVersionRegexp = regexp.MustCompile(`^OS Version:\s+(?P.*)`)
49 | winSysLocale = regexp.MustCompile(`^System Locale:\s+(?P.*)`)
50 | )
51 |
52 | type WindowsSystemInfoGetter struct{}
53 |
54 | func (wsig WindowsSystemInfoGetter) Get() (si SystemInfo, err error) {
55 | cmd := exec.Command("cmd", "/c", "systeminfo")
56 | output, err := cmd.CombinedOutput()
57 | if err != nil {
58 | return
59 | }
60 | for line := range readers.Readlines(bytes.NewReader(output)) {
61 | //log.Debug(string(line))
62 | switch {
63 | case winOsNameRegexp.Match(line):
64 | log.Debug(string(line))
65 | si.OSVersion = trimString(string(winOsNameRegexp.FindSubmatch(line)[1]))
66 | case winOsVersionRegexp.Match(line):
67 | si.OSName = trimString(string(winOsVersionRegexp.FindSubmatch(line)[1]))
68 | case winSysLocale.Match(line):
69 | si.SysLocale = trimString(string(winSysLocale.FindSubmatch(line)[1]))
70 | }
71 | }
72 | return
73 | }
74 |
75 | func New() SystemInfoGetter {
76 | switch runtime.GOOS {
77 | case "windows":
78 | return WindowsSystemInfoGetter{}
79 | }
80 | return EmptySystemInfoGetter{}
81 | }
82 |
--------------------------------------------------------------------------------
/runtime/systeminfo/systeminfo_test.go:
--------------------------------------------------------------------------------
1 | package systeminfo
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/0xrawsec/golang-utils/log"
7 | )
8 |
9 | func init() {
10 | log.InitLogger(log.LDebug)
11 | }
12 |
13 | func TestSystemInfo(t *testing.T) {
14 | sig := New()
15 | si, err := sig.Get()
16 | if err != nil {
17 | t.Error(err)
18 | }
19 | t.Log(si)
20 | }
21 |
--------------------------------------------------------------------------------
/scanner/scanner.go:
--------------------------------------------------------------------------------
1 | package scanner
2 |
3 | import (
4 | "bufio"
5 | "fmt"
6 | "io"
7 | "os"
8 |
9 | "github.com/0xrawsec/golang-utils/datastructs"
10 | )
11 |
12 | const (
13 | // EOF scanner
14 | EOF = -(iota + 1)
15 | // MaxTokenLen length of a token
16 | MaxTokenLen = 1 << 20 // 1 Mega Token
17 | )
18 |
19 | var (
20 | // Whitespace default whitespaces characters taken by scanner
21 | Whitespace = "\t\n\r "
22 | )
23 |
24 | // Scanner structure definition
25 | type Scanner struct {
26 | Offset int64
27 | LastOffset int64
28 | Whitespace *datastructs.BitSet
29 | Error func(error)
30 | r *bufio.Reader
31 | tokenIdx int
32 | token []rune
33 | }
34 |
35 | // New creates a new scanner from reader
36 | func New(r io.Reader) (s *Scanner) {
37 | s = &Scanner{}
38 | s.r = bufio.NewReader(r)
39 | s.Whitespace = datastructs.NewBitSet(256)
40 | s.Error = func(err error) {
41 | fmt.Fprintf(os.Stderr, "Scanner error: %s\n", err)
42 | }
43 | // initialize default with Whitespace variable
44 | s.InitWhitespace(Whitespace)
45 | s.token = make([]rune, MaxTokenLen)
46 | s.tokenIdx = 0
47 | return
48 | }
49 |
50 | // InitWhitespace initialised an new set of whitespaces for the scanner
51 | func (s *Scanner) InitWhitespace(w string) {
52 | s.Whitespace = datastructs.NewBitSet(256)
53 | for _, c := range w {
54 | s.Whitespace.Set(int(c))
55 | }
56 | }
57 |
58 | // Scan scans until we reach a whitespace token
59 | func (s *Scanner) Scan() (r rune) {
60 | s.tokenIdx = 0
61 | r, _, err := s.r.ReadRune()
62 | prevRune := r
63 |
64 | if s.Whitespace.Get(int(r)) {
65 | s.token[s.tokenIdx] = r
66 | s.tokenIdx++
67 | return r
68 | }
69 |
70 | for ; !s.Whitespace.Get(int(r)); r, _, err = s.r.ReadRune() {
71 | switch err {
72 | case nil:
73 | break
74 | case io.EOF:
75 | return EOF
76 | default:
77 | s.Error(err)
78 | return EOF
79 | }
80 | s.token[s.tokenIdx] = r
81 | s.tokenIdx++
82 | prevRune = r
83 | }
84 |
85 | // We have to UnreadRune because we went too far of one rune
86 | err = s.r.UnreadRune()
87 | if err != nil {
88 | s.Error(err)
89 | }
90 |
91 | return prevRune
92 | }
93 |
94 | // TokenText returns the string containing characters until the token was found
95 | func (s *Scanner) TokenText() string {
96 | return string(s.token[:s.tokenIdx])
97 | }
98 |
99 | // Tokenize returns a chan of tokens found in scanner until exhaustion
100 | func (s *Scanner) Tokenize() (cs chan string) {
101 | cs = make(chan string)
102 | go func() {
103 | defer close(cs)
104 | for r := s.Scan(); r != EOF; r = s.Scan() {
105 | cs <- s.TokenText()
106 | }
107 | cs <- s.TokenText()
108 | }()
109 | return
110 | }
111 |
--------------------------------------------------------------------------------
/scanner/scanner_test.go:
--------------------------------------------------------------------------------
1 | package scanner
2 |
3 | import (
4 | "strings"
5 | "testing"
6 | )
7 |
8 | const (
9 | colontext = `This:is:a:text:separated:by:colon:not:parseable:by:text/scanner:go:package`
10 | nlcolontext = "This:is\na:text:separated\nby:colon\nor:newline\nnot:parseable\nby:text/scanner:go\npackage"
11 | )
12 |
13 | func TestScannerBasic(t *testing.T) {
14 | s := New(strings.NewReader(colontext))
15 | s.InitWhitespace(":")
16 | for r := s.Scan(); r != EOF; r = s.Scan() {
17 | switch r {
18 | case ':':
19 | break
20 | default:
21 | t.Logf("r=%q tokens=%s", r, s.TokenText())
22 | }
23 | }
24 | t.Logf("tokens=%s", s.TokenText())
25 | }
26 |
27 | func TestScannerBasic2(t *testing.T) {
28 | s := New(strings.NewReader(nlcolontext))
29 | s.InitWhitespace(":\n")
30 | for r := s.Scan(); r != EOF; r = s.Scan() {
31 | switch r {
32 | case ':', '\n':
33 | break
34 | default:
35 | t.Logf("r=%q tokens=%s", r, s.TokenText())
36 | }
37 | }
38 | t.Logf("tokens=%s", s.TokenText())
39 | }
40 |
41 | func TestScannerTokenize(t *testing.T) {
42 | s := New(strings.NewReader(nlcolontext))
43 | s.InitWhitespace(":\n")
44 | for tok := range s.Tokenize() {
45 | t.Logf("tok=%q", tok)
46 |
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/stats/stats.go:
--------------------------------------------------------------------------------
1 | package stats
2 |
3 | import "math"
4 |
5 | // Trucate truncates a float with p figures of precision
6 | func Truncate(f float64, p int) float64 {
7 | return float64(int(f*math.Pow10(p))) / math.Pow10(p)
8 | }
9 |
10 | // Average computes the average of a table of floats
11 | func Average(floats []float64) float64 {
12 | var sum float64
13 | var cnt float64
14 | for _, f := range floats {
15 | sum += f
16 | cnt++
17 | }
18 | return sum / cnt
19 | }
20 |
21 | // StdDev returns the standard deviation of a random variable
22 | func StdDev(floats []float64) float64 {
23 | var sum float64
24 | a := Average(floats)
25 | for _, f := range floats {
26 | sum += math.Pow(f-a, 2)
27 | }
28 | return math.Sqrt(sum / float64(len(floats)))
29 | }
30 |
--------------------------------------------------------------------------------
/stats/stats_test.go:
--------------------------------------------------------------------------------
1 | package stats
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | var (
8 | serie = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
9 | )
10 |
11 | func TestAverage(t *testing.T) {
12 | avg := Average(serie)
13 | t.Logf("Average of population: %f", avg)
14 | if avg != 5.5 {
15 | t.Fail()
16 | }
17 | }
18 |
19 | func TestStdDev(t *testing.T) {
20 | sd := StdDev(serie)
21 | t.Logf("Standard deviation of population: %f", sd)
22 | if Truncate(sd, 2) != 2.87 {
23 | t.Fail()
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/sync/semaphore/semaphore.go:
--------------------------------------------------------------------------------
1 | // Package semaphore implements a basic semaphore object widely inspired from
2 | // source: http://www.golangpatterns.info/concurrency/semaphores
3 | package semaphore
4 |
5 | type empty struct{}
6 | type Semaphore chan empty
7 |
8 | // New : new Semaphore object
9 | func New(capacity uint64) Semaphore {
10 | return make(Semaphore, capacity)
11 | }
12 |
13 | // Acquire : increment Semaphore by one
14 | func (s Semaphore) Acquire() {
15 | s.P(1)
16 | }
17 |
18 | // Release : decrement Semaphore by one
19 | func (s Semaphore) Release() {
20 | s.V(1)
21 | }
22 |
23 | // P : acquire n resources
24 | func (s Semaphore) P(n int) {
25 | e := empty{}
26 | for i := 0; i < n; i++ {
27 | s <- e
28 | }
29 | }
30 |
31 | // V : release n resources
32 | func (s Semaphore) V(n int) {
33 | for i := 0; i < n; i++ {
34 | <-s
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/sync/semaphore/semaphore_test.go:
--------------------------------------------------------------------------------
1 | package semaphore
2 |
3 | import (
4 | "io/ioutil"
5 | "path/filepath"
6 | "sync"
7 | "testing"
8 |
9 | "github.com/0xrawsec/golang-utils/fsutil/fswalker"
10 | )
11 |
12 | func TestSemaphore(t *testing.T) {
13 | var wg sync.WaitGroup
14 | sem := New(1024)
15 | for wi := range fswalker.Walk("../../..") {
16 | if wi.Err != nil {
17 | panic(wi.Err)
18 | }
19 | for _, fileInfo := range wi.Files {
20 | fileInfo := fileInfo
21 | wg.Add(1)
22 | go func() {
23 | sem.Acquire()
24 | defer sem.Release()
25 | t.Logf("Reading: %s", filepath.Join(wi.Dirpath, fileInfo.Name()))
26 | ioutil.ReadFile(filepath.Join(wi.Dirpath, fileInfo.Name()))
27 | defer wg.Done()
28 | }()
29 | }
30 | }
31 | wg.Wait()
32 | }
33 |
--------------------------------------------------------------------------------