├── .gitignore
├── LICENSE
├── README.md
├── compose
├── README.md
├── aliases.sh
├── backup
│ ├── Dockerfile
│ └── backup.sh
├── cln
│ ├── Dockerfile
│ └── entrypoint.sh
├── docker-compose.override-cln.yml
├── docker-compose.override-lnd.yml
├── docker-compose.yml
├── env-sample
│ ├── README.md
│ ├── clntn
│ │ ├── bitcoin.conf
│ │ ├── cln_config
│ │ ├── compose.env
│ │ ├── robosats.env
│ │ ├── tor
│ │ │ └── .gikeep
│ │ └── torrc
│ ├── lndmn
│ │ ├── bitcoin.conf
│ │ └── lnd.conf
│ └── lndtn
│ │ ├── bitcoin.conf
│ │ ├── compose.env
│ │ ├── lnd.conf
│ │ ├── lnd_autounlock_pwd
│ │ ├── relay.strfry.conf
│ │ ├── robosats.env
│ │ ├── strfry.conf
│ │ ├── supervisord.conf
│ │ ├── thunderhub-accounts.yml
│ │ ├── thunderhub.env
│ │ ├── tor
│ │ └── .gikeep
│ │ └── torrc
├── hidden-service.md
├── i2p
│ ├── docker-compose.yml
│ └── run.sh
├── lndg
│ └── Dockerfile
├── nginx
│ ├── mn.conf.d
│ │ └── local.conf
│ ├── mn.well-known
│ │ └── .gitkeep
│ ├── tn.conf.d
│ │ └── local.conf
│ └── tn.well-known
│ │ └── .gitkeep
├── relay
│ ├── Dockerfile
│ ├── README.md
│ ├── crontab
│ ├── entrypoint.sh
│ ├── mn.external_urls.txt
│ ├── mn.federation_urls.txt
│ ├── sync_external.sh
│ ├── sync_federation.sh
│ ├── tn.external_urls.txt
│ └── tn.federation_urls.txt
├── strfry
│ ├── Dockerfile
│ ├── README.md
│ ├── crontab
│ ├── entrypoint.sh
│ ├── mn.external_urls.txt
│ ├── mn.federation_urls.txt
│ ├── sync_external.sh
│ ├── sync_federation.sh
│ ├── tn.external_urls.txt
│ └── tn.federation_urls.txt
├── systemd.md
└── tor
│ ├── Dockerfile
│ └── entrypoint.sh
├── k8s
├── README.md
├── base
│ ├── bitcoind
│ │ ├── configmap.yml
│ │ ├── pvc.yml
│ │ ├── secret.yml
│ │ ├── service.yml
│ │ └── statefulset.yml
│ ├── celery
│ │ ├── beat-deployment.yml
│ │ └── worker-deployment.yml
│ ├── command
│ │ ├── clean-orders-deployment.yml
│ │ ├── follow-invoices-deployment.yml
│ │ └── telegram-watcher-deployment.yml
│ ├── coredns-configmap.yml
│ ├── daphne
│ │ ├── deployment.yml
│ │ └── service.yml
│ ├── gunicorn
│ │ ├── deployment.yml
│ │ └── service.yml
│ ├── i2p
│ │ ├── configmap.yml
│ │ ├── deployment.yml
│ │ └── service.yml
│ ├── kustomization.yml
│ ├── litd
│ │ ├── configmap.yml
│ │ ├── pvc.yml
│ │ ├── secret.yml
│ │ ├── service.yml
│ │ └── statefulset.yml
│ ├── lnd
│ │ ├── configmap.yml
│ │ ├── pvc.yml
│ │ ├── secret.yml
│ │ ├── service.yml
│ │ └── statefulset.yml
│ ├── nginx
│ │ ├── configmap.yml
│ │ ├── deployment.yml
│ │ ├── service.yml
│ │ └── static-pvc.yml
│ ├── openebs-local-storageclass.yml
│ ├── postgres
│ │ ├── pvc.yml
│ │ ├── secret.yml
│ │ ├── service.yml
│ │ └── statefulset.yml
│ ├── redis
│ │ ├── pvc.yml
│ │ ├── service.yml
│ │ └── statefulset.yml
│ ├── robosats-configmap.yml
│ ├── robosats-secret.yml
│ ├── thub
│ │ ├── configmap.yml
│ │ ├── deployment.yml
│ │ └── service.yml
│ └── tor
│ │ ├── configmap.yml
│ │ ├── deployment.yml
│ │ ├── onion-secret.yml
│ │ ├── pvc.yml
│ │ ├── service.yml
│ │ ├── tor-secret.yml
│ │ ├── ~install.yml
│ │ ├── ~onionservice.yml
│ │ └── ~readme.md
└── overlays
│ ├── development
│ ├── kustomization.yml
│ └── namespace.yml
│ └── testnet
│ ├── kustomization.yml
│ └── namespace.yml
└── web
├── .gitignore
├── custom_nginx.conf
├── docker-compose.yml
├── readme.md
├── serve_misc
└── readme.md
└── torrc
/.gitignore:
--------------------------------------------------------------------------------
1 | /compose/env
2 | /web/tor
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU AFFERO GENERAL PUBLIC LICENSE
2 | Version 3, 19 November 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU Affero General Public License is a free, copyleft license for
11 | software and other kinds of works, specifically designed to ensure
12 | cooperation with the community in the case of network server software.
13 |
14 | The licenses for most software and other practical works are designed
15 | to take away your freedom to share and change the works. By contrast,
16 | our General Public Licenses are intended to guarantee your freedom to
17 | share and change all versions of a program--to make sure it remains free
18 | software for all its users.
19 |
20 | When we speak of free software, we are referring to freedom, not
21 | price. Our General Public Licenses are designed to make sure that you
22 | have the freedom to distribute copies of free software (and charge for
23 | them if you wish), that you receive source code or can get it if you
24 | want it, that you can change the software or use pieces of it in new
25 | free programs, and that you know you can do these things.
26 |
27 | Developers that use our General Public Licenses protect your rights
28 | with two steps: (1) assert copyright on the software, and (2) offer
29 | you this License which gives you legal permission to copy, distribute
30 | and/or modify the software.
31 |
32 | A secondary benefit of defending all users' freedom is that
33 | improvements made in alternate versions of the program, if they
34 | receive widespread use, become available for other developers to
35 | incorporate. Many developers of free software are heartened and
36 | encouraged by the resulting cooperation. However, in the case of
37 | software used on network servers, this result may fail to come about.
38 | The GNU General Public License permits making a modified version and
39 | letting the public access it on a server without ever releasing its
40 | source code to the public.
41 |
42 | The GNU Affero General Public License is designed specifically to
43 | ensure that, in such cases, the modified source code becomes available
44 | to the community. It requires the operator of a network server to
45 | provide the source code of the modified version running there to the
46 | users of that server. Therefore, public use of a modified version, on
47 | a publicly accessible server, gives the public access to the source
48 | code of the modified version.
49 |
50 | An older license, called the Affero General Public License and
51 | published by Affero, was designed to accomplish similar goals. This is
52 | a different license, not a version of the Affero GPL, but Affero has
53 | released a new version of the Affero GPL which permits relicensing under
54 | this license.
55 |
56 | The precise terms and conditions for copying, distribution and
57 | modification follow.
58 |
59 | TERMS AND CONDITIONS
60 |
61 | 0. Definitions.
62 |
63 | "This License" refers to version 3 of the GNU Affero General Public License.
64 |
65 | "Copyright" also means copyright-like laws that apply to other kinds of
66 | works, such as semiconductor masks.
67 |
68 | "The Program" refers to any copyrightable work licensed under this
69 | License. Each licensee is addressed as "you". "Licensees" and
70 | "recipients" may be individuals or organizations.
71 |
72 | To "modify" a work means to copy from or adapt all or part of the work
73 | in a fashion requiring copyright permission, other than the making of an
74 | exact copy. The resulting work is called a "modified version" of the
75 | earlier work or a work "based on" the earlier work.
76 |
77 | A "covered work" means either the unmodified Program or a work based
78 | on the Program.
79 |
80 | To "propagate" a work means to do anything with it that, without
81 | permission, would make you directly or secondarily liable for
82 | infringement under applicable copyright law, except executing it on a
83 | computer or modifying a private copy. Propagation includes copying,
84 | distribution (with or without modification), making available to the
85 | public, and in some countries other activities as well.
86 |
87 | To "convey" a work means any kind of propagation that enables other
88 | parties to make or receive copies. Mere interaction with a user through
89 | a computer network, with no transfer of a copy, is not conveying.
90 |
91 | An interactive user interface displays "Appropriate Legal Notices"
92 | to the extent that it includes a convenient and prominently visible
93 | feature that (1) displays an appropriate copyright notice, and (2)
94 | tells the user that there is no warranty for the work (except to the
95 | extent that warranties are provided), that licensees may convey the
96 | work under this License, and how to view a copy of this License. If
97 | the interface presents a list of user commands or options, such as a
98 | menu, a prominent item in the list meets this criterion.
99 |
100 | 1. Source Code.
101 |
102 | The "source code" for a work means the preferred form of the work
103 | for making modifications to it. "Object code" means any non-source
104 | form of a work.
105 |
106 | A "Standard Interface" means an interface that either is an official
107 | standard defined by a recognized standards body, or, in the case of
108 | interfaces specified for a particular programming language, one that
109 | is widely used among developers working in that language.
110 |
111 | The "System Libraries" of an executable work include anything, other
112 | than the work as a whole, that (a) is included in the normal form of
113 | packaging a Major Component, but which is not part of that Major
114 | Component, and (b) serves only to enable use of the work with that
115 | Major Component, or to implement a Standard Interface for which an
116 | implementation is available to the public in source code form. A
117 | "Major Component", in this context, means a major essential component
118 | (kernel, window system, and so on) of the specific operating system
119 | (if any) on which the executable work runs, or a compiler used to
120 | produce the work, or an object code interpreter used to run it.
121 |
122 | The "Corresponding Source" for a work in object code form means all
123 | the source code needed to generate, install, and (for an executable
124 | work) run the object code and to modify the work, including scripts to
125 | control those activities. However, it does not include the work's
126 | System Libraries, or general-purpose tools or generally available free
127 | programs which are used unmodified in performing those activities but
128 | which are not part of the work. For example, Corresponding Source
129 | includes interface definition files associated with source files for
130 | the work, and the source code for shared libraries and dynamically
131 | linked subprograms that the work is specifically designed to require,
132 | such as by intimate data communication or control flow between those
133 | subprograms and other parts of the work.
134 |
135 | The Corresponding Source need not include anything that users
136 | can regenerate automatically from other parts of the Corresponding
137 | Source.
138 |
139 | The Corresponding Source for a work in source code form is that
140 | same work.
141 |
142 | 2. Basic Permissions.
143 |
144 | All rights granted under this License are granted for the term of
145 | copyright on the Program, and are irrevocable provided the stated
146 | conditions are met. This License explicitly affirms your unlimited
147 | permission to run the unmodified Program. The output from running a
148 | covered work is covered by this License only if the output, given its
149 | content, constitutes a covered work. This License acknowledges your
150 | rights of fair use or other equivalent, as provided by copyright law.
151 |
152 | You may make, run and propagate covered works that you do not
153 | convey, without conditions so long as your license otherwise remains
154 | in force. You may convey covered works to others for the sole purpose
155 | of having them make modifications exclusively for you, or provide you
156 | with facilities for running those works, provided that you comply with
157 | the terms of this License in conveying all material for which you do
158 | not control copyright. Those thus making or running the covered works
159 | for you must do so exclusively on your behalf, under your direction
160 | and control, on terms that prohibit them from making any copies of
161 | your copyrighted material outside their relationship with you.
162 |
163 | Conveying under any other circumstances is permitted solely under
164 | the conditions stated below. Sublicensing is not allowed; section 10
165 | makes it unnecessary.
166 |
167 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168 |
169 | No covered work shall be deemed part of an effective technological
170 | measure under any applicable law fulfilling obligations under article
171 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
172 | similar laws prohibiting or restricting circumvention of such
173 | measures.
174 |
175 | When you convey a covered work, you waive any legal power to forbid
176 | circumvention of technological measures to the extent such circumvention
177 | is effected by exercising rights under this License with respect to
178 | the covered work, and you disclaim any intention to limit operation or
179 | modification of the work as a means of enforcing, against the work's
180 | users, your or third parties' legal rights to forbid circumvention of
181 | technological measures.
182 |
183 | 4. Conveying Verbatim Copies.
184 |
185 | You may convey verbatim copies of the Program's source code as you
186 | receive it, in any medium, provided that you conspicuously and
187 | appropriately publish on each copy an appropriate copyright notice;
188 | keep intact all notices stating that this License and any
189 | non-permissive terms added in accord with section 7 apply to the code;
190 | keep intact all notices of the absence of any warranty; and give all
191 | recipients a copy of this License along with the Program.
192 |
193 | You may charge any price or no price for each copy that you convey,
194 | and you may offer support or warranty protection for a fee.
195 |
196 | 5. Conveying Modified Source Versions.
197 |
198 | You may convey a work based on the Program, or the modifications to
199 | produce it from the Program, in the form of source code under the
200 | terms of section 4, provided that you also meet all of these conditions:
201 |
202 | a) The work must carry prominent notices stating that you modified
203 | it, and giving a relevant date.
204 |
205 | b) The work must carry prominent notices stating that it is
206 | released under this License and any conditions added under section
207 | 7. This requirement modifies the requirement in section 4 to
208 | "keep intact all notices".
209 |
210 | c) You must license the entire work, as a whole, under this
211 | License to anyone who comes into possession of a copy. This
212 | License will therefore apply, along with any applicable section 7
213 | additional terms, to the whole of the work, and all its parts,
214 | regardless of how they are packaged. This License gives no
215 | permission to license the work in any other way, but it does not
216 | invalidate such permission if you have separately received it.
217 |
218 | d) If the work has interactive user interfaces, each must display
219 | Appropriate Legal Notices; however, if the Program has interactive
220 | interfaces that do not display Appropriate Legal Notices, your
221 | work need not make them do so.
222 |
223 | A compilation of a covered work with other separate and independent
224 | works, which are not by their nature extensions of the covered work,
225 | and which are not combined with it such as to form a larger program,
226 | in or on a volume of a storage or distribution medium, is called an
227 | "aggregate" if the compilation and its resulting copyright are not
228 | used to limit the access or legal rights of the compilation's users
229 | beyond what the individual works permit. Inclusion of a covered work
230 | in an aggregate does not cause this License to apply to the other
231 | parts of the aggregate.
232 |
233 | 6. Conveying Non-Source Forms.
234 |
235 | You may convey a covered work in object code form under the terms
236 | of sections 4 and 5, provided that you also convey the
237 | machine-readable Corresponding Source under the terms of this License,
238 | in one of these ways:
239 |
240 | a) Convey the object code in, or embodied in, a physical product
241 | (including a physical distribution medium), accompanied by the
242 | Corresponding Source fixed on a durable physical medium
243 | customarily used for software interchange.
244 |
245 | b) Convey the object code in, or embodied in, a physical product
246 | (including a physical distribution medium), accompanied by a
247 | written offer, valid for at least three years and valid for as
248 | long as you offer spare parts or customer support for that product
249 | model, to give anyone who possesses the object code either (1) a
250 | copy of the Corresponding Source for all the software in the
251 | product that is covered by this License, on a durable physical
252 | medium customarily used for software interchange, for a price no
253 | more than your reasonable cost of physically performing this
254 | conveying of source, or (2) access to copy the
255 | Corresponding Source from a network server at no charge.
256 |
257 | c) Convey individual copies of the object code with a copy of the
258 | written offer to provide the Corresponding Source. This
259 | alternative is allowed only occasionally and noncommercially, and
260 | only if you received the object code with such an offer, in accord
261 | with subsection 6b.
262 |
263 | d) Convey the object code by offering access from a designated
264 | place (gratis or for a charge), and offer equivalent access to the
265 | Corresponding Source in the same way through the same place at no
266 | further charge. You need not require recipients to copy the
267 | Corresponding Source along with the object code. If the place to
268 | copy the object code is a network server, the Corresponding Source
269 | may be on a different server (operated by you or a third party)
270 | that supports equivalent copying facilities, provided you maintain
271 | clear directions next to the object code saying where to find the
272 | Corresponding Source. Regardless of what server hosts the
273 | Corresponding Source, you remain obligated to ensure that it is
274 | available for as long as needed to satisfy these requirements.
275 |
276 | e) Convey the object code using peer-to-peer transmission, provided
277 | you inform other peers where the object code and Corresponding
278 | Source of the work are being offered to the general public at no
279 | charge under subsection 6d.
280 |
281 | A separable portion of the object code, whose source code is excluded
282 | from the Corresponding Source as a System Library, need not be
283 | included in conveying the object code work.
284 |
285 | A "User Product" is either (1) a "consumer product", which means any
286 | tangible personal property which is normally used for personal, family,
287 | or household purposes, or (2) anything designed or sold for incorporation
288 | into a dwelling. In determining whether a product is a consumer product,
289 | doubtful cases shall be resolved in favor of coverage. For a particular
290 | product received by a particular user, "normally used" refers to a
291 | typical or common use of that class of product, regardless of the status
292 | of the particular user or of the way in which the particular user
293 | actually uses, or expects or is expected to use, the product. A product
294 | is a consumer product regardless of whether the product has substantial
295 | commercial, industrial or non-consumer uses, unless such uses represent
296 | the only significant mode of use of the product.
297 |
298 | "Installation Information" for a User Product means any methods,
299 | procedures, authorization keys, or other information required to install
300 | and execute modified versions of a covered work in that User Product from
301 | a modified version of its Corresponding Source. The information must
302 | suffice to ensure that the continued functioning of the modified object
303 | code is in no case prevented or interfered with solely because
304 | modification has been made.
305 |
306 | If you convey an object code work under this section in, or with, or
307 | specifically for use in, a User Product, and the conveying occurs as
308 | part of a transaction in which the right of possession and use of the
309 | User Product is transferred to the recipient in perpetuity or for a
310 | fixed term (regardless of how the transaction is characterized), the
311 | Corresponding Source conveyed under this section must be accompanied
312 | by the Installation Information. But this requirement does not apply
313 | if neither you nor any third party retains the ability to install
314 | modified object code on the User Product (for example, the work has
315 | been installed in ROM).
316 |
317 | The requirement to provide Installation Information does not include a
318 | requirement to continue to provide support service, warranty, or updates
319 | for a work that has been modified or installed by the recipient, or for
320 | the User Product in which it has been modified or installed. Access to a
321 | network may be denied when the modification itself materially and
322 | adversely affects the operation of the network or violates the rules and
323 | protocols for communication across the network.
324 |
325 | Corresponding Source conveyed, and Installation Information provided,
326 | in accord with this section must be in a format that is publicly
327 | documented (and with an implementation available to the public in
328 | source code form), and must require no special password or key for
329 | unpacking, reading or copying.
330 |
331 | 7. Additional Terms.
332 |
333 | "Additional permissions" are terms that supplement the terms of this
334 | License by making exceptions from one or more of its conditions.
335 | Additional permissions that are applicable to the entire Program shall
336 | be treated as though they were included in this License, to the extent
337 | that they are valid under applicable law. If additional permissions
338 | apply only to part of the Program, that part may be used separately
339 | under those permissions, but the entire Program remains governed by
340 | this License without regard to the additional permissions.
341 |
342 | When you convey a copy of a covered work, you may at your option
343 | remove any additional permissions from that copy, or from any part of
344 | it. (Additional permissions may be written to require their own
345 | removal in certain cases when you modify the work.) You may place
346 | additional permissions on material, added by you to a covered work,
347 | for which you have or can give appropriate copyright permission.
348 |
349 | Notwithstanding any other provision of this License, for material you
350 | add to a covered work, you may (if authorized by the copyright holders of
351 | that material) supplement the terms of this License with terms:
352 |
353 | a) Disclaiming warranty or limiting liability differently from the
354 | terms of sections 15 and 16 of this License; or
355 |
356 | b) Requiring preservation of specified reasonable legal notices or
357 | author attributions in that material or in the Appropriate Legal
358 | Notices displayed by works containing it; or
359 |
360 | c) Prohibiting misrepresentation of the origin of that material, or
361 | requiring that modified versions of such material be marked in
362 | reasonable ways as different from the original version; or
363 |
364 | d) Limiting the use for publicity purposes of names of licensors or
365 | authors of the material; or
366 |
367 | e) Declining to grant rights under trademark law for use of some
368 | trade names, trademarks, or service marks; or
369 |
370 | f) Requiring indemnification of licensors and authors of that
371 | material by anyone who conveys the material (or modified versions of
372 | it) with contractual assumptions of liability to the recipient, for
373 | any liability that these contractual assumptions directly impose on
374 | those licensors and authors.
375 |
376 | All other non-permissive additional terms are considered "further
377 | restrictions" within the meaning of section 10. If the Program as you
378 | received it, or any part of it, contains a notice stating that it is
379 | governed by this License along with a term that is a further
380 | restriction, you may remove that term. If a license document contains
381 | a further restriction but permits relicensing or conveying under this
382 | License, you may add to a covered work material governed by the terms
383 | of that license document, provided that the further restriction does
384 | not survive such relicensing or conveying.
385 |
386 | If you add terms to a covered work in accord with this section, you
387 | must place, in the relevant source files, a statement of the
388 | additional terms that apply to those files, or a notice indicating
389 | where to find the applicable terms.
390 |
391 | Additional terms, permissive or non-permissive, may be stated in the
392 | form of a separately written license, or stated as exceptions;
393 | the above requirements apply either way.
394 |
395 | 8. Termination.
396 |
397 | You may not propagate or modify a covered work except as expressly
398 | provided under this License. Any attempt otherwise to propagate or
399 | modify it is void, and will automatically terminate your rights under
400 | this License (including any patent licenses granted under the third
401 | paragraph of section 11).
402 |
403 | However, if you cease all violation of this License, then your
404 | license from a particular copyright holder is reinstated (a)
405 | provisionally, unless and until the copyright holder explicitly and
406 | finally terminates your license, and (b) permanently, if the copyright
407 | holder fails to notify you of the violation by some reasonable means
408 | prior to 60 days after the cessation.
409 |
410 | Moreover, your license from a particular copyright holder is
411 | reinstated permanently if the copyright holder notifies you of the
412 | violation by some reasonable means, this is the first time you have
413 | received notice of violation of this License (for any work) from that
414 | copyright holder, and you cure the violation prior to 30 days after
415 | your receipt of the notice.
416 |
417 | Termination of your rights under this section does not terminate the
418 | licenses of parties who have received copies or rights from you under
419 | this License. If your rights have been terminated and not permanently
420 | reinstated, you do not qualify to receive new licenses for the same
421 | material under section 10.
422 |
423 | 9. Acceptance Not Required for Having Copies.
424 |
425 | You are not required to accept this License in order to receive or
426 | run a copy of the Program. Ancillary propagation of a covered work
427 | occurring solely as a consequence of using peer-to-peer transmission
428 | to receive a copy likewise does not require acceptance. However,
429 | nothing other than this License grants you permission to propagate or
430 | modify any covered work. These actions infringe copyright if you do
431 | not accept this License. Therefore, by modifying or propagating a
432 | covered work, you indicate your acceptance of this License to do so.
433 |
434 | 10. Automatic Licensing of Downstream Recipients.
435 |
436 | Each time you convey a covered work, the recipient automatically
437 | receives a license from the original licensors, to run, modify and
438 | propagate that work, subject to this License. You are not responsible
439 | for enforcing compliance by third parties with this License.
440 |
441 | An "entity transaction" is a transaction transferring control of an
442 | organization, or substantially all assets of one, or subdividing an
443 | organization, or merging organizations. If propagation of a covered
444 | work results from an entity transaction, each party to that
445 | transaction who receives a copy of the work also receives whatever
446 | licenses to the work the party's predecessor in interest had or could
447 | give under the previous paragraph, plus a right to possession of the
448 | Corresponding Source of the work from the predecessor in interest, if
449 | the predecessor has it or can get it with reasonable efforts.
450 |
451 | You may not impose any further restrictions on the exercise of the
452 | rights granted or affirmed under this License. For example, you may
453 | not impose a license fee, royalty, or other charge for exercise of
454 | rights granted under this License, and you may not initiate litigation
455 | (including a cross-claim or counterclaim in a lawsuit) alleging that
456 | any patent claim is infringed by making, using, selling, offering for
457 | sale, or importing the Program or any portion of it.
458 |
459 | 11. Patents.
460 |
461 | A "contributor" is a copyright holder who authorizes use under this
462 | License of the Program or a work on which the Program is based. The
463 | work thus licensed is called the contributor's "contributor version".
464 |
465 | A contributor's "essential patent claims" are all patent claims
466 | owned or controlled by the contributor, whether already acquired or
467 | hereafter acquired, that would be infringed by some manner, permitted
468 | by this License, of making, using, or selling its contributor version,
469 | but do not include claims that would be infringed only as a
470 | consequence of further modification of the contributor version. For
471 | purposes of this definition, "control" includes the right to grant
472 | patent sublicenses in a manner consistent with the requirements of
473 | this License.
474 |
475 | Each contributor grants you a non-exclusive, worldwide, royalty-free
476 | patent license under the contributor's essential patent claims, to
477 | make, use, sell, offer for sale, import and otherwise run, modify and
478 | propagate the contents of its contributor version.
479 |
480 | In the following three paragraphs, a "patent license" is any express
481 | agreement or commitment, however denominated, not to enforce a patent
482 | (such as an express permission to practice a patent or covenant not to
483 | sue for patent infringement). To "grant" such a patent license to a
484 | party means to make such an agreement or commitment not to enforce a
485 | patent against the party.
486 |
487 | If you convey a covered work, knowingly relying on a patent license,
488 | and the Corresponding Source of the work is not available for anyone
489 | to copy, free of charge and under the terms of this License, through a
490 | publicly available network server or other readily accessible means,
491 | then you must either (1) cause the Corresponding Source to be so
492 | available, or (2) arrange to deprive yourself of the benefit of the
493 | patent license for this particular work, or (3) arrange, in a manner
494 | consistent with the requirements of this License, to extend the patent
495 | license to downstream recipients. "Knowingly relying" means you have
496 | actual knowledge that, but for the patent license, your conveying the
497 | covered work in a country, or your recipient's use of the covered work
498 | in a country, would infringe one or more identifiable patents in that
499 | country that you have reason to believe are valid.
500 |
501 | If, pursuant to or in connection with a single transaction or
502 | arrangement, you convey, or propagate by procuring conveyance of, a
503 | covered work, and grant a patent license to some of the parties
504 | receiving the covered work authorizing them to use, propagate, modify
505 | or convey a specific copy of the covered work, then the patent license
506 | you grant is automatically extended to all recipients of the covered
507 | work and works based on it.
508 |
509 | A patent license is "discriminatory" if it does not include within
510 | the scope of its coverage, prohibits the exercise of, or is
511 | conditioned on the non-exercise of one or more of the rights that are
512 | specifically granted under this License. You may not convey a covered
513 | work if you are a party to an arrangement with a third party that is
514 | in the business of distributing software, under which you make payment
515 | to the third party based on the extent of your activity of conveying
516 | the work, and under which the third party grants, to any of the
517 | parties who would receive the covered work from you, a discriminatory
518 | patent license (a) in connection with copies of the covered work
519 | conveyed by you (or copies made from those copies), or (b) primarily
520 | for and in connection with specific products or compilations that
521 | contain the covered work, unless you entered into that arrangement,
522 | or that patent license was granted, prior to 28 March 2007.
523 |
524 | Nothing in this License shall be construed as excluding or limiting
525 | any implied license or other defenses to infringement that may
526 | otherwise be available to you under applicable patent law.
527 |
528 | 12. No Surrender of Others' Freedom.
529 |
530 | If conditions are imposed on you (whether by court order, agreement or
531 | otherwise) that contradict the conditions of this License, they do not
532 | excuse you from the conditions of this License. If you cannot convey a
533 | covered work so as to satisfy simultaneously your obligations under this
534 | License and any other pertinent obligations, then as a consequence you may
535 | not convey it at all. For example, if you agree to terms that obligate you
536 | to collect a royalty for further conveying from those to whom you convey
537 | the Program, the only way you could satisfy both those terms and this
538 | License would be to refrain entirely from conveying the Program.
539 |
540 | 13. Remote Network Interaction; Use with the GNU General Public License.
541 |
542 | Notwithstanding any other provision of this License, if you modify the
543 | Program, your modified version must prominently offer all users
544 | interacting with it remotely through a computer network (if your version
545 | supports such interaction) an opportunity to receive the Corresponding
546 | Source of your version by providing access to the Corresponding Source
547 | from a network server at no charge, through some standard or customary
548 | means of facilitating copying of software. This Corresponding Source
549 | shall include the Corresponding Source for any work covered by version 3
550 | of the GNU General Public License that is incorporated pursuant to the
551 | following paragraph.
552 |
553 | Notwithstanding any other provision of this License, you have
554 | permission to link or combine any covered work with a work licensed
555 | under version 3 of the GNU General Public License into a single
556 | combined work, and to convey the resulting work. The terms of this
557 | License will continue to apply to the part which is the covered work,
558 | but the work with which it is combined will remain governed by version
559 | 3 of the GNU General Public License.
560 |
561 | 14. Revised Versions of this License.
562 |
563 | The Free Software Foundation may publish revised and/or new versions of
564 | the GNU Affero General Public License from time to time. Such new versions
565 | will be similar in spirit to the present version, but may differ in detail to
566 | address new problems or concerns.
567 |
568 | Each version is given a distinguishing version number. If the
569 | Program specifies that a certain numbered version of the GNU Affero General
570 | Public License "or any later version" applies to it, you have the
571 | option of following the terms and conditions either of that numbered
572 | version or of any later version published by the Free Software
573 | Foundation. If the Program does not specify a version number of the
574 | GNU Affero General Public License, you may choose any version ever published
575 | by the Free Software Foundation.
576 |
577 | If the Program specifies that a proxy can decide which future
578 | versions of the GNU Affero General Public License can be used, that proxy's
579 | public statement of acceptance of a version permanently authorizes you
580 | to choose that version for the Program.
581 |
582 | Later license versions may give you additional or different
583 | permissions. However, no additional obligations are imposed on any
584 | author or copyright holder as a result of your choosing to follow a
585 | later version.
586 |
587 | 15. Disclaimer of Warranty.
588 |
589 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597 |
598 | 16. Limitation of Liability.
599 |
600 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608 | SUCH DAMAGES.
609 |
610 | 17. Interpretation of Sections 15 and 16.
611 |
612 | If the disclaimer of warranty and limitation of liability provided
613 | above cannot be given local legal effect according to their terms,
614 | reviewing courts shall apply local law that most closely approximates
615 | an absolute waiver of all civil liability in connection with the
616 | Program, unless a warranty or assumption of liability accompanies a
617 | copy of the Program in return for a fee.
618 |
619 | END OF TERMS AND CONDITIONS
620 |
621 | How to Apply These Terms to Your New Programs
622 |
623 | If you develop a new program, and you want it to be of the greatest
624 | possible use to the public, the best way to achieve this is to make it
625 | free software which everyone can redistribute and change under these terms.
626 |
627 | To do so, attach the following notices to the program. It is safest
628 | to attach them to the start of each source file to most effectively
629 | state the exclusion of warranty; and each file should have at least
630 | the "copyright" line and a pointer to where the full notice is found.
631 |
632 |
633 | Copyright (C)
634 |
635 | This program is free software: you can redistribute it and/or modify
636 | it under the terms of the GNU Affero General Public License as published by
637 | the Free Software Foundation, either version 3 of the License, or
638 | (at your option) any later version.
639 |
640 | This program is distributed in the hope that it will be useful,
641 | but WITHOUT ANY WARRANTY; without even the implied warranty of
642 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643 | GNU Affero General Public License for more details.
644 |
645 | You should have received a copy of the GNU Affero General Public License
646 | along with this program. If not, see .
647 |
648 | Also add information on how to contact you by electronic and paper mail.
649 |
650 | If your software can interact with users remotely through a computer
651 | network, you should also make sure that it provides a way for users to
652 | get its source. For example, if your program is a web application, its
653 | interface could display a "Source" link that leads users to an archive
654 | of the code. There are many ways you could offer source, and different
655 | solutions will be better for different programs; see section 13 for the
656 | specific requirements.
657 |
658 | You should also get your employer (if you work as a programmer) or school,
659 | if any, to sign a "copyright disclaimer" for the program, if necessary.
660 | For more information on this, and how to apply and follow the GNU AGPL, see
661 | .
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # RoboSats Coordinator Deployment Orchestrations
2 |
3 | This repository holds two full blown RoboSats coordinator deployment orchestration with decent defaults. There is two flavours: docker-compose (`/compose`) and kubernetes (`/k8s`). The docker-compose orchestration is better tested and better suited for new coordinators. The kubernetes orchestration is still work in progress and better suited for high availability and large scale coordinators: any help to finish the k8s orchestration is appreciated and will be rewarded with Sats.
4 |
5 | The tags/releases versioning for this repository match `robosats/robosats` (semver) with a subpatch sequence. For example, for `v0.6.0-alpha` the first orchestration release will be `v0.6.0~1`, therefore, allowing bug fixing and faster iteration of the orchestration codebase if needed.
--------------------------------------------------------------------------------
/compose/README.md:
--------------------------------------------------------------------------------
1 | # Docker Compose based orchestration for RoboSats Coordinator
2 | Dockerized RoboSats stack. Docker compose with services for nginx, redis, gunicorn, daphne, bitcoind, lnd/cln, back-up, celery, celery-beats and other tools.
3 |
4 | # Setup
5 |
6 | Let's assume you are using a newly installed OS. For this setup guide we are using `Ubuntu server 22.04 (LTS)`
7 |
8 | ## Install TOR
9 | Ubuntu users are advised to install Tor from the Tor Project's own repositories, rather than their OS repos.
10 |
11 | Follow this guide for information about adding the Tor Project Repositories to your sources:
12 |
13 | https://linuxconfig.org/install-tor-proxy-on-ubuntu-20-04-linux
14 |
15 | ```
16 | sudo apt install tor -y
17 | ```
18 |
19 | You can optionally torify the shell persistently
20 | ```
21 | echo ". torsocks on" >> ~/.bashrc
22 | ```
23 |
24 | In case you need to turn off the torification in the future
25 | ```
26 | source torsocks off
27 | ```
28 |
29 | ## Install Docker on Ubuntu
30 | Excerpt from https://docs.docker.com/engine/install/ubuntu/
31 |
32 | ```
33 | # Add Docker's official GPG key:
34 | sudo apt-get update
35 | sudo apt-get install ca-certificates curl gnupg
36 | sudo install -m 0755 -d /etc/apt/keyrings
37 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
38 | sudo chmod a+r /etc/apt/keyrings/docker.gpg
39 |
40 | # Add the repository to Apt sources:
41 | echo \
42 | "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
43 | "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
44 | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
45 | sudo apt-get update
46 |
47 | # Install
48 | sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
49 |
50 | # Test
51 | sudo docker run hello-world
52 | ```
53 | You can optionally add a symlink to docker image and containers path to another path location
54 |
55 | ```
56 | sudo systemctl stop docker,
57 |
58 | sudo rm /var/liv/docker
59 |
60 | ln -s /desired/path/docker /var/lib/docker
61 | ```
62 |
63 | And restart Docker service
64 |
65 | `
66 | service docker restart
67 | `
68 |
69 | ## Clone and configure RoboSats deploy
70 |
71 | Clone this repo
72 | ```
73 | git clone git@github.com:RoboSats/robosats-deploy.git
74 | cd robosats-deploy/compose
75 | ```
76 |
77 | Create or restore the environmental configuration files in new folder `/compose/env/` directory. You can use the `env-sample` files as a guide for your configuration, be exhaustive and make sure every setting is right. The file `compose.env` contains all the high level configuration for your orchestration.
78 |
79 | ```
80 | cp -r env-sample env
81 | ```
82 | Then edit and make sure the paths and configurations are right.
83 | ```
84 | nano env/{namespace}/compose...env
85 | nano env/{namespace}/robosats...env
86 | nano env/{namespace}/lnd.conf
87 | ...
88 | ```
89 | If you were already running `robosats-deploy/compose` in another machine and need to recover, simply bring your existing environmental files from your backup.
90 |
91 | In `/compose/env/compose...env` there is a variable named `SUFFIX` . This one is used to suffix all of your containers and configuration files. For example if you use `-tn` (for testnet), your bitcoind service will be called `btc-tn`, this is an effective way of creating namespaces. The example configuration in `/compose/env-sample/` uses the prefix `-lndtn`, for a LND testnet coordinator. This way, it is easy to run several coordinator orchestration in the same machine. For example, you can use the `-lndmn` prefix for a LND mainnet coordinator configuration or `-clntn` for a CLN Testnet configuration. You can also create alias shortcuts for each of your orchestration.
92 |
93 | ## Use aliases
94 | Docker commands are lengthy. You can use aliases to make your task of operating a docker compose based robosats coordinator easier. Take a look at `/compose/aliases.sh` for some useful aliases and shortcuts.
95 |
96 | ## Example commands for a lnd testnet orchestration (-lndtn containers)
97 | If you install the aliases you can run the following shortcut commands:
98 |
99 | ```
100 | tn build # instead of docker compose -p lndtest --env-file /home/$(whoami)/robosats-deploy/compose/env/stack-lndtn.env -f /home/$(whoami)/robosats-deploy/compose/docker-compose.yml -f /home/$(whoami)/robosats-deploy/compose/docker-compose.override-lnd.yml build
101 | tn up -d
102 | # Now the full coordinator orchestration is running
103 | ```
104 |
105 | If this is a new coordinator installation, you need to create an admin RoboSats account. Make sure your superuser name matches the `ESCROW_USERNAME` in the `robosats...env` file, by default `"admin"` .
106 | ```
107 | tn-manage createsuperuser # `tn-manage` is the alias for `docker exec -it rs-lndtn python3 manage.py`
108 | # Enter a username (admin) and a password. Everything else can be skipped by pressing enter.
109 | # You can now visit the coordinator panel at "ip:port/coordinator" in your browser
110 | ```
111 |
112 | ```
113 | docker compose -p lndtest --env-file env/stack-lndtn.env build
114 | docker compose -p lndtest --env-file env/stack-lndtn.env up -d
115 | docker exec -it rs-lndtn cp -R frontend/static/frontend /usr/src/static
116 | docker exec -it rs-lndtn python3 manage.py createsuperuser
117 | docker compose -p lndtest --env-file env/stack-lndtn.env restart
118 | ```
119 | You could also just check all services logs
120 |
121 | `tn logs -f`
122 |
123 | Unlock or 'create' the lnd node
124 |
125 | `tn-lncli unlock`
126 |
127 | Create p2wkh addresses
128 |
129 | `tn-lncli newaddress p2wkh` (note without alias this command would be ``docker exec -it lnd-lndtn lncli --network=testnet newaddress p2wkh``)
130 |
131 | Wallet balance
132 |
133 | `tn-lncli walletbalance`
134 |
135 | Connect
136 |
137 | `tn-lncli connect node_id@ip:9735`
138 |
139 | Open channel
140 |
141 | `tn-lncli openchannel node_id --local_amt LOCAL_AMT --push_amt PUSH_AMT`
142 |
143 | ## If needed; this is how to clean restart the docker instance
144 | Stop the container(s) using the following command:
145 |
146 | `docker compose -p lndtest --env-file /home/$(whoami)/robosats-deploy/compose/env/stack-lndtn.env -f /home/$(whoami)/robosats-deploy/compose/docker-compose.yml -f /home/$(whoami)/robosats-deploy/compose/docker-compose.override-lnd.yml down`
147 | Delete all containers using the following command:
148 | `docker rm -f $(docker ps -a -q)`
149 | Delete all volumes using the following command:
150 | `docker volume rm $(docker volume ls -q)`
151 | Restart the containers using the following command:
152 | `docker compose -p robotest --env-file env/stack-lndtn.env up`
153 |
154 |
155 | Delete images
156 | `docker rmi $(docker images -f 'dangling=true' -q)`
157 |
158 | ## Add Onion services
159 |
160 | At the moment the RoboSats image does not use TorControl of the Tor container to automatically generate the Onion hidden service. It simply exposes the port (18000 in the `/compose/env-sample` testnet orchestration) and exposes a hidden service defined `/env/{namespace}/torrc`.
161 |
162 | You can edit `torcc` to add or remove services (e.g., expose Thunderhub as a hidden service)
163 | ```
164 | sudo nano /env/{namespace}/torrc
165 | ```
166 |
167 | ```
168 | # Robosats Testnet Onion Service
169 | HiddenServiceDir /var/lib/tor/robotest/
170 | HiddenServiceVersion 3
171 | HiddenServicePort 80 127.0.0.1:18000
172 | #... mainnet over robotest
173 | HiddenServicePort 8001 127.0.0.1:8000
174 | ```
175 |
176 | You can print the hidden service hostname.
177 | ```
178 | sudo cat /env/{namespace}/tor/robotest/hostname
179 | ```
180 | Note that if you try to now access your RoboSats instance by pasting this Onion address in your browser you will see a 400 Error. This is due to the hostname not being allowed by the backend. You have to edit your `/env/{namespace}/robosats.env` and add your `.....onion` as `HOST_NAME` or `HOST_NAME2`.
181 |
182 | And if you want so, you can replace the ed25519 keys to set your own custom hostname. You can mine a vanity onion with [mkp224o](https://github.com/cathugger/mkp224o)
183 |
184 | Additionally, you can also edit your machine's `/etc/tor/torrc` to create Onion endpoints to SSH remotely into your machine or to services to monitor your server (e.g Cockpit).
185 |
186 | ```
187 | # SSH Hidden Service
188 | HiddenServiceDir /var/lib/tor/sshd/
189 | HiddenServiceVersion 3
190 | HiddenServicePort 22 127.0.0.1:22
191 |
192 |
193 | # Management Services
194 | HiddenServiceDir /var/lib/tor/management/
195 | HiddenServiceVersion 3
196 | # Thunderhub
197 | HiddenServicePort 3000 127.0.0.1:3000
198 | # LIT
199 | HiddenServicePort 4000 127.0.0.1:8443
200 | # Cockpit
201 | HiddenServicePort 1000 127.0.0.1:9090
202 | ```
203 |
204 | Restart
205 |
206 | `sudo /etc/init.d/tor restart`
207 |
208 | # Install Cockpit
209 | Just a useful tool to monitor your machine that might come handy. Specially useful if you use ZFS as file system (recommended).
210 |
211 | ```
212 | sudo apt-get install cockpit -y
213 | sudo systemctl enable --now cockpit.socket
214 | sudo apt-get install podman cockpit-podman -y
215 | sudo systemctl enable --now podman
216 | git clone https://github.com/45Drives/cockpit-zfs-manager.git
217 | sudo cp -r cockpit-zfs-manager/zfs /usr/share/cockpit
218 | sudo apt-get install samba -y
219 | ```
220 | Access cockpit on port 9090
221 |
222 | # Setup on Umbrel
223 |
224 | If you're using an Umbrel node, and you want to integrate RoboSats Coordinator with Umbrel LND node (mainnet) you can edit the configurations file as follows.
225 |
226 | ## Prerequisites
227 |
228 | Before proceeding, make sure you've set up your Umbrel node and it's fully synced with the Bitcoin network. This guide utilizes LND as backend.
229 |
230 | ## Edit compose.env, robosats.env, docker-compose.yml and docker-compose.override-lnd.yml
231 |
232 | Obviously, you should comment out all the containers whose services already running on Umbrel. Typically you would want to comment out bitcoind, thunderhub and lightning-terminal.
233 |
234 | Secondly, you need to give network access to the LND instance from the Robosats Coordinator docker orchestration.
235 |
236 | To do so, follow the steps outlined below.
237 |
238 | ### Edit Environment Files
239 |
240 | 1. **Set LND Data Path**:
241 | Set the `LND_DATA` variable in compose.env to the path where your LND data is located as follows:
242 | ```env
243 | LND_DATA=/umbrel-path-location/app-data/lightning/data/lnd/
244 | ```
245 |
246 | 2. **Set LND gRPC Host**:
247 | Update the `LND_GRPC_HOST` variable to your specific gRPC host and port in robosats.env. Typically this is done as below:
248 | ```env
249 | LND_GRPC_HOST=10.21.21.9:10009
250 | ```
251 |
252 |
253 | ### Edit Docker Compose File
254 |
255 | 3. **Modify Networks Under TOR Container**:
256 | Navigate to the TOR container section in your `docker-compose.yml` file and add `umbrel_main_network` under the `networks` field.
257 | ```yaml
258 | networks:
259 | - umbrel_main_network
260 | ```
261 |
262 | Add Network Definition:
263 | At the end of your docker-compose.yml file, add the definition for umbrel_main_network.
264 |
265 | ```yaml
266 | networks:
267 | umbrel_main_network:
268 | external: true
269 | ```
270 |
--------------------------------------------------------------------------------
/compose/aliases.sh:
--------------------------------------------------------------------------------
1 | # ALIAS FILE
2 | # COPY INTO /etc/profile.d/robo_aliases.sh for every user to have these permanent aliases
3 |
4 | #######
5 | ## Example aliases for a Mainnet LND coordinator, shorten as `lndmn` . Assuming the prefix for your orchestration is also `lndmn`
6 | ## Edit the /home/user path directory and orchestration suffix (-lndmn) as needed
7 |
8 | ## ROBOMAIN Docker-Compose (lndmn)
9 | alias mn="docker compose -p lndmain --env-file /home/$(whoami)/robosats-deploy/compose/env/lndmn/compose.env -f /home/$(whoami)/robosats-deploy/compose/docker-compose.yml -f /home/$(whoami)/robosats-deploy/compose/docker-compose.override-lnd.yml"
10 |
11 | ## Example usage:
12 | ## Start orchestration
13 | ## > mn up -d
14 | ## Follow all logs
15 | ## > mn logs -f --tail 10
16 | ## Follow LND logs
17 | ## > mn logs -f --tail 300 lnd
18 | ## Build an image
19 | ## > mn build lnd
20 |
21 |
22 | ## Once your Mainnet LND instance is up, we can use these other aliases
23 | # ROBOMAIN LNCLI COMANDS
24 |
25 | alias mn-lncli="docker exec -it lnd-lndmn lncli"
26 |
27 | # DJANGO MANAGE
28 | alias mn-manage="docker exec -it rs-lndmn python3 manage.py"
29 |
30 | # POSTGRESS
31 | # Example postgresql dump and restore. Unsafe!
32 | alias mn-pg-backup='docker exec -i sql-lndmn /bin/bash -c "PGPASSWORD=robosats pg_dump --username postgres postgres" > /home/$(whoami)/backup/mainnet/database/backup.sql'
33 | alias mn-pg-restore='docker exec -i sql-lndmn /bin/bash -c "PGPASSWORD=robosats psql --username postgres postgres" < /home/$(whoami)/backup/mainnet/database/backup.sql'
34 |
35 | #################################################################################################################
36 | ## ROBOTEST Docker-Compose (same aliases as above, but for a testnet `lndtn` orchestration`)
37 |
38 | alias tn="docker compose -p lndtest --env-file /home/$(whoami)/robosats-deploy/compose/env/lndtn/compose.env -f /home/$(whoami)/robosats-deploy/compose/docker-compose.yml -f /home/$(whoami)/robosats-deploy/compose/docker-compose.override-lnd.yml"
39 |
40 | ## Example uses:
41 | ## > robotest up -d
42 | ## > robotest logs -f
43 | ## > robotest build lnd
44 |
45 |
46 | ## Once ROBOTEST is up!
47 | # ROBOTEST LNCLI COMANDS
48 |
49 | alias tn-lncli="docker exec -it lnd-lndtn lncli --network=testnet"
50 |
51 | # DJANGO MANAGE
52 |
53 | alias tn-manage="docker exec -it rs-lndtn python3 manage.py"
54 |
55 | # POSTGRESS
56 | # Example postgresql dump and restore. Unsafe!
57 | alias tn-pg-restore='docker exec -i sql-lndtn /bin/bash -c "PGPASSWORD=robotest psql --username postgres postgres" < /home/$(whoami)/backup/testnet/database/backup.sql'
58 | alias tn-pg-backup='docker exec -i sql-lndtn /bin/bash -c "PGPASSWORD=robotest pg_dump --username postgres postgres" > /home/$(whoami)/backup/testnet/database/backup.sql'
--------------------------------------------------------------------------------
/compose/backup/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:bullseye-slim
2 |
3 | RUN apt update
4 | RUN apt-get install rsync -y
5 |
6 | COPY backup.sh /usr/src
7 |
8 | WORKDIR /usr/src
9 |
10 | CMD ["bash","backup.sh"]
11 |
--------------------------------------------------------------------------------
/compose/backup/backup.sh:
--------------------------------------------------------------------------------
1 | #########
2 | ### Super rudementary service that will copy/paste (backup) into 3 attached storage locations defined
3 | ### in /env/stack-prefix.env
4 | ### Backs up lnd/lnd.conf, btc/bitcoind.conf, lnd.channel.scb, and robosats postgresql dumps
5 |
6 | mkdir -p /backup1/lnd
7 | mkdir -p /backup2/lnd
8 | mkdir -p /backup3/lnd
9 |
10 | mkdir -p /backup1/lnd/data/chain/bitcoin/${NETWORK:?}
11 | mkdir -p /backup2/lnd/data/chain/bitcoin/${NETWORK:?}
12 | mkdir -p /backup3/lnd/data/chain/bitcoin/${NETWORK:?}
13 |
14 | mkdir -p /backup1/bitcoin
15 | mkdir -p /backup2/bitcoin
16 | mkdir -p /backup3/bitcoin
17 |
18 | mkdir -p /backup1/database
19 | mkdir -p /backup2/database
20 | mkdir -p /backup3/database
21 |
22 | mkdir -p /backup1/lit
23 | mkdir -p /backup2/lit
24 | mkdir -p /backup3/lit
25 |
26 |
27 | # Would be hard to clean deleted user's avatars. Easier to re-generate, better not backup.
28 | #mkdir -p /backup1/static/assets/avatars
29 | #mkdir -p /backup2/static/assets/avatars
30 |
31 |
32 | for i in {1..1000};
33 | do
34 |
35 | rsync -auzhPq /running/lnd/data/chain/bitcoin/${NETWORK:?}/channel.backup /backup1/lnd/data/chain/bitcoin/${NETWORK:?}/channel.backup
36 | rsync -auzhPq /running/lnd/data/chain/bitcoin/${NETWORK:?}/channel.backup /backup2/lnd/data/chain/bitcoin/${NETWORK:?}/channel.backup
37 | rsync -auzhPq /running/lnd/data/chain/bitcoin/${NETWORK:?}/channel.backup /backup3/lnd/data/chain/bitcoin/${NETWORK:?}/channel.backup
38 |
39 | sleep 5
40 | done
41 |
42 | # Only back up database every 1000 loops (5000 seconds )
43 | rsync -auzhPq /running/lnd/lnd.conf /backup1/lnd/lnd.conf
44 | rsync -auzhPq /running/lnd/lnd.conf /backup2/lnd/lnd.conf
45 | rsync -auzhPq /running/lnd/lnd.conf /backup3/lnd/lnd.conf
46 |
47 | rsync -auzhPq /running/bitcoin/bitcoin.conf /backup1/bitcoin/bitcoin.conf
48 | rsync -auzhPq /running/bitcoin/bitcoin.conf /backup2/bitcoin/bitcoin.conf
49 | rsync -auzhPq /running/bitcoin/bitcoin.conf /backup3/bitcoin/bitcoin.conf
50 |
51 | rsync -auzhPq /running/lit/ /backup1/lit/
52 | rsync -auzhPq /running/lit/ /backup2/lit/
53 | rsync -auzhPq /running/lit/ /backup3/lit/
54 |
55 |
56 | echo "## backing up database ##"
57 | rsync -auzhP /running/database/ /backup1/database/
58 | rsync -auzhP /running/database/ /backup2/database/
59 | rsync -auzhP /running/database/ /backup3/database/
60 |
--------------------------------------------------------------------------------
/compose/cln/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:bullseye-slim as builder
2 | ARG DEBIAN_FRONTEND=noninteractive
3 |
4 | ARG LIGHTNINGD_VERSION=v23.08
5 | RUN apt-get update -qq && \
6 | apt-get install -qq -y --no-install-recommends \
7 | autoconf \
8 | automake \
9 | build-essential \
10 | ca-certificates \
11 | curl \
12 | git \
13 | protobuf-compiler
14 |
15 | ENV RUST_PROFILE=release
16 | ENV PATH=$PATH:/root/.cargo/bin/
17 | RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
18 | RUN rustup toolchain install stable --component rustfmt --allow-downgrade
19 |
20 | WORKDIR /opt/lightningd
21 | RUN git clone --recursive --branch cln-grpc-hold https://github.com/daywalker90/lightning.git /tmp/cln-grpc-hold
22 | RUN cd /tmp/cln-grpc-hold \
23 | && cargo build --release
24 |
25 | FROM elementsproject/lightningd:v23.08 as final
26 |
27 | COPY --from=builder /tmp/cln-grpc-hold/target/release/cln-grpc-hold /tmp/cln-grpc-hold
28 | COPY entrypoint.sh entrypoint.sh
29 | RUN chmod +x entrypoint.sh
30 |
31 | EXPOSE 9735 9835
32 | ENTRYPOINT [ "/usr/bin/tini", "-g", "--", "./entrypoint.sh" ]
--------------------------------------------------------------------------------
/compose/cln/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | : "${EXPOSE_TCP:=false}"
4 |
5 | networkdatadir="${LIGHTNINGD_DATA}/${LIGHTNINGD_NETWORK}"
6 |
7 | if [ "$EXPOSE_TCP" == "true" ]; then
8 | set -m
9 | lightningd "$@" &
10 |
11 | echo "Core-Lightning starting"
12 | while read -r i; do if [ "$i" = "lightning-rpc" ]; then break; fi; done \
13 | < <(inotifywait -e create,open --format '%f' --quiet "${networkdatadir}" --monitor)
14 | echo "Core-Lightning started"
15 | echo "Core-Lightning started, RPC available on port $LIGHTNINGD_RPC_PORT"
16 |
17 | socat "TCP4-listen:$LIGHTNINGD_RPC_PORT,fork,reuseaddr" "UNIX-CONNECT:${networkdatadir}/lightning-rpc" &
18 | fg %-
19 | else
20 | # Always copy the cln-grpc-hodl plugin into the plugins directory on start up
21 | mkdir -p /root/.lightning/plugins
22 | cp /tmp/cln-grpc-hold /root/.lightning/plugins/cln-grpc-hold
23 | exec "$@"
24 | fi
--------------------------------------------------------------------------------
/compose/docker-compose.override-cln.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 | services:
3 | cln:
4 | build: ./cln
5 | restart: always
6 | network_mode: service:tor
7 | container_name: cln${SUFFIX}
8 | depends_on:
9 | - tor
10 | - bitcoind
11 | volumes:
12 | - ${TOR_DATA:?}:${GUEST_TOR_DATA:?}:ro
13 | - ${TOR_CONFIG:?}:${GUEST_TOR_CONFIG:?}:ro
14 | - ${CLN_DATA:?}:/root/.lightning
15 | - ${CLN_CONFIG:?}:/root/.lightning/config
16 | - ${BITCOIN_DATA}:/root/.bitcoin
17 | command: lightningd
18 |
19 | # Patch common services
20 | robosats:
21 | volumes:
22 | - ${CLN_DATA}:/cln
23 | clean-orders:
24 | volumes:
25 | - ${CLN_DATA}:/cln
26 | follow-invoices:
27 | volumes:
28 | - ${CLN_DATA}:/cln
29 | celery:
30 | volumes:
31 | - ${CLN_DATA}:/cln
32 | # backup:
33 | # volumes:
34 | # - ${CLN_DATA}:/running/cln:ro
--------------------------------------------------------------------------------
/compose/docker-compose.override-lnd.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 | services:
3 | lnd:
4 | image: lightninglabs/lnd:${LND_TAG:-v0.17.4-beta@sha256:668eba1fdb1ac520292db95a57059be91895fb8af076eafd6c8b5e757f0f196c}
5 | restart: always
6 | network_mode: service:tor
7 | container_name: lnd${SUFFIX}
8 | depends_on:
9 | - tor
10 | - bitcoind
11 | volumes:
12 | - ${TOR_DATA:?}:/var/lib/tor:ro
13 | - ${LND_DATA:?}:/root/.lnd
14 | - ${LND_CONF:?}:/root/.lnd/lnd.conf
15 | - ${LND_AUTOUNLOCK_PWD:?}:/tmp/pwd:ro
16 | entrypoint: lnd
17 | environment:
18 | SUFFIX: ${SUFFIX}
19 | LND_RPC_PORT: ${GUEST_LND_RPC_PORT:?}
20 | LND_REST_PORT: ${GUEST_LND_REST_PORT:?}
21 |
22 | thub:
23 | image: apotdevin/thunderhub:${THUNDERHUB_TAG:-v0.13.23@sha256:5ad834045dc3030ec199615827c415ca07729da32c4739afd3adfc662af4fe65}
24 | restart: always
25 | network_mode: service:tor
26 | container_name: thub${SUFFIX}
27 | env_file:
28 | ${THUB_ENVS_FILE}
29 | depends_on:
30 | - tor
31 | - lnd
32 | volumes:
33 | - ${LND_DATA:?}:/lnd/:ro
34 | - ${THUB_ACCOUNTS:?}:${THUB_ACCOUNTS_CONFIG_PATH:?}
35 |
36 | lit:
37 | image: lightninglabs/lightning-terminal:${LIT_TAG:-v0.11.0-alpha@sha256:dc14c495f699b14c2d75c43097ad3ab40e7287fc136a3203df9cffe17b867b71}
38 | restart: always
39 | network_mode: service:tor
40 | container_name: lit${SUFFIX}
41 | depends_on:
42 | - tor
43 | - lnd
44 | entrypoint: litd --uipassword ${LIT_PASSWORD} --network=${NETWORK} --lnd-mode=remote --remote.lnd.rpcserver=127.0.0.1:${GUEST_LND_RPC_PORT:?} --remote.lnd.macaroonpath=/lnd/data/chain/bitcoin/${NETWORK}/admin.macaroon --remote.lnd.tlscertpath=/lnd/tls.cert --httpslisten=0.0.0.0:8443 --loop.server.proxy=127.0.0.1:9050 --loop.loopoutmaxparts=25 --loop.maxlsatfee=1
45 | volumes:
46 | - ${LND_DATA:?}:/lnd/:ro
47 | - ${LIT_DATA:?}:/root/
48 |
49 | lndg:
50 | build: ./lndg
51 | restart: always
52 | container_name: lndg${SUFFIX}
53 | volumes:
54 | - ${LNDG_CONFIG}:/root/supervisord.conf:ro
55 | - ${LND_DATA}:/root/.lnd:ro
56 | - ${LNDG_DATA}:/lndg/data:rw
57 | depends_on:
58 | - tor
59 | - lnd
60 | command:
61 | - sh
62 | - -c
63 | - python initialize.py -net ${NETWORK} -d -dx -dir /root/.lnd -rpc 127.0.0.1:${GUEST_LND_RPC_PORT:?} -pw '${LNDG_PASSWORD:?}' && supervisord -c /root/supervisord.conf && python manage.py runserver 0.0.0.0:8889
64 | network_mode: service:tor
65 |
66 | # Patch common services
67 | # tor:
68 | # ports:
69 | # - ${THUB_LOCAL_PORT}:3000
70 | # - ${LIT_LOCAL_PORT}:8443
71 | robosats:
72 | volumes:
73 | - ${LND_DATA}:/lnd
74 | clean-orders:
75 | volumes:
76 | - ${LND_DATA}:/lnd
77 | follow-invoices:
78 | volumes:
79 | - ${LND_DATA}:/lnd
80 | celery:
81 | volumes:
82 | - ${LND_DATA}:/lnd
83 | backup:
84 | volumes:
85 | - ${LND_DATA}:/running/lnd:ro
86 | - ${LIT_DATA}:/running/lit:ro
87 |
88 | # Other LND services you might want to enable
89 |
90 | # bos:
91 | # image: alexbosworth/balanceofsatoshis:latest
92 | # restart: always
93 | # container_name: bos${SUFFIX}
94 | # depends_on:
95 | # - tor
96 | # - lnd
97 | # network_mode: service:tor
98 | # volumes:
99 | # - ${LND_DATA:?}:/home/node/.lnd:ro
100 |
101 | # LND healthcheck.
102 | # These are hit or miss. Not recommended as you might kill a healthy container or enter a restart loop.
103 |
104 | # healthcheck:
105 | # test: ['CMD', 'lncli', '--network=${NETWORK}', 'getinfo']
106 | # interval: 30s
107 | # retries: 3
108 | # start_period: 30m
109 | # timeout: 10s
110 | # labels:
111 | # - "deunhealth.restart.on.unhealthy=true"
112 |
113 | # docker-compose can't restore an unhealthy container (e.g., restart it). Hence, if you enable the LND
114 | # healthcheck you might want to uncomment the `deunhealth` container as well.
115 |
116 | # deunhealth:
117 | # image: qmcgaw/deunhealth:v0.3.0
118 | # container_name: deunhealth${SUFFIX}
119 | # restart: always
120 | # volumes:
121 | # - /var/run/docker.sock:/var/run/docker.sock
122 |
--------------------------------------------------------------------------------
/compose/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 | services:
3 | tor:
4 | build: ./tor
5 | container_name: tor${SUFFIX}
6 | restart: always
7 | environment:
8 | SUFFIX: ${SUFFIX}
9 | volumes:
10 | - ${TOR_DATA:?}:/var/lib/tor
11 | - ${TOR_CONFIG:?}:/etc/tor/torrc
12 | # ports:
13 | # - ${WEB_LOCAL_PORT}:80
14 | # - ${THUB_LOCAL_PORT}:3000
15 | # - ${LIT_LOCAL_PORT}:8443
16 |
17 | bitcoind:
18 | image: ruimarinho/bitcoin-core:${BITCOIND_TAG:-24.0.1-alpine@sha256:624b6fb1c282a3db6438ff35fbb9eed6ae260c66ffbb427ae72edd7da6a0ecdc}
19 | container_name: btc${SUFFIX}
20 | restart: always
21 | depends_on:
22 | - tor
23 | network_mode: service:tor
24 | volumes:
25 | - ${TOR_DATA:?}:/var/lib/tor:ro
26 | - ${BITCOIN_DATA:?}:${GUEST_BITCOIN:?}
27 | - ${BITCOIN_CONF:?}:${GUEST_BITCOIN:?}/bitcoin.conf
28 |
29 | daphne:
30 | image: recksato/robosats:${ROBOSATS_TAG:-v0.6.0-alpha@sha256:8cc975ff5942a3fb2a09827b8eaafebea1c851eb3ecf9be1aaac1f238cfa9fc1}
31 | container_name: daphne${SUFFIX}
32 | restart: always
33 | network_mode: service:tor
34 | env_file:
35 | - ${ROBOSATS_ENVS_FILE}
36 | environment:
37 | SKIP_COLLECT_STATIC: "true"
38 | depends_on:
39 | - robosats
40 | command: daphne -b 0.0.0.0 -p 9000 robosats.asgi:application
41 |
42 | redis:
43 | image: redis:${REDIS_TAG:-7.2.1-alpine@sha256:7f5a0dfbf379db69dc78434091dce3220e251022e71dcdf36207928cbf9010de}
44 | container_name: redis${SUFFIX}
45 | restart: always
46 | volumes:
47 | - redisdata:/data
48 | network_mode: service:tor
49 |
50 | robosats:
51 | image: recksato/robosats:${ROBOSATS_TAG:-v0.6.0-alpha@sha256:8cc975ff5942a3fb2a09827b8eaafebea1c851eb3ecf9be1aaac1f238cfa9fc1}
52 | container_name: rs${SUFFIX}
53 | restart: always
54 | environment:
55 | LOG_TO_CONSOLE: 1
56 | env_file:
57 | - ${ROBOSATS_ENVS_FILE}
58 | depends_on:
59 | - redis
60 | volumes:
61 | - ${STATIC}:/usr/src/static
62 | network_mode: service:tor
63 | command: gunicorn --bind :8000 --max-requests 1000 --max-requests-jitter 200 -w ${GUNICORN_WORKERS} robosats.wsgi:application
64 |
65 | postgres:
66 | image: postgres:${POSTGRES_TAG:-14.2-alpine@sha256:2fb5718f2a23dbac9bd1258e886eee90250a5903785e3136d62dd65e19f34982}
67 | container_name: sql${SUFFIX}
68 | restart: always
69 | environment:
70 | POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
71 | POSTGRES_USER: ${POSTGRES_USER}
72 | POSTGRES_DB: ${POSTGRES_NAME}
73 | network_mode: service:tor
74 | volumes:
75 | - ${DATABASE}:/var/lib/postgresql/data
76 |
77 | nginx:
78 | image: nginx:${NGINX_TAG:-1.25.2-alpine-slim@sha256:1b0cb433e90260a96528c987ee78b797e842d510473935304a0931536d10f50d}
79 | container_name: nginx${SUFFIX}
80 | restart: always
81 | volumes:
82 | - ${STATIC}:/usr/src/static:ro
83 | - ${NGINX_CONFD}:/etc/nginx/conf.d:ro
84 | - ${WELLKNOWN}:/usr/src/.well-known:ro
85 | network_mode: service:tor
86 |
87 | clean-orders:
88 | image: recksato/robosats:${ROBOSATS_TAG:-v0.6.0-alpha@sha256:8cc975ff5942a3fb2a09827b8eaafebea1c851eb3ecf9be1aaac1f238cfa9fc1}
89 | restart: always
90 | container_name: clord${SUFFIX}
91 | command: python3 manage.py clean_orders
92 | environment:
93 | SKIP_COLLECT_STATIC: "true"
94 | env_file:
95 | - ${ROBOSATS_ENVS_FILE}
96 | network_mode: service:tor
97 |
98 | follow-invoices:
99 | image: recksato/robosats:${ROBOSATS_TAG:-v0.6.0-alpha@sha256:8cc975ff5942a3fb2a09827b8eaafebea1c851eb3ecf9be1aaac1f238cfa9fc1}
100 | container_name: invo${SUFFIX}
101 | restart: always
102 | env_file:
103 | - ${ROBOSATS_ENVS_FILE}
104 | environment:
105 | SKIP_COLLECT_STATIC: "true"
106 | command: python3 manage.py follow_invoices
107 | network_mode: service:tor
108 |
109 | telegram-watcher:
110 | image: recksato/robosats:${ROBOSATS_TAG:-v0.6.0-alpha@sha256:8cc975ff5942a3fb2a09827b8eaafebea1c851eb3ecf9be1aaac1f238cfa9fc1}
111 | container_name: tg${SUFFIX}
112 | restart: always
113 | environment:
114 | SKIP_COLLECT_STATIC: "true"
115 | env_file:
116 | - ${ROBOSATS_ENVS_FILE}
117 | command: python3 manage.py telegram_watcher
118 | network_mode: service:tor
119 |
120 | celery:
121 | image: recksato/robosats:${ROBOSATS_TAG:-v0.6.0-alpha@sha256:8cc975ff5942a3fb2a09827b8eaafebea1c851eb3ecf9be1aaac1f238cfa9fc1}
122 | container_name: cele${SUFFIX}
123 | restart: always
124 | env_file:
125 | - ${ROBOSATS_ENVS_FILE}
126 | environment:
127 | SKIP_COLLECT_STATIC: "true"
128 | command: celery -A robosats worker --loglevel=WARNING
129 | depends_on:
130 | - redis
131 | network_mode: service:tor
132 |
133 | celery-beat:
134 | image: recksato/robosats:${ROBOSATS_TAG:-v0.6.0-alpha@sha256:8cc975ff5942a3fb2a09827b8eaafebea1c851eb3ecf9be1aaac1f238cfa9fc1}
135 | container_name: beat${SUFFIX}
136 | restart: always
137 | env_file:
138 | - ${ROBOSATS_ENVS_FILE}
139 | environment:
140 | SKIP_COLLECT_STATIC: "true"
141 | command: celery -A robosats beat -l info --scheduler django_celery_beat.schedulers:DatabaseScheduler
142 | depends_on:
143 | - redis
144 | network_mode: service:tor
145 |
146 | strfry:
147 | build:
148 | context: ./strfry
149 | dockerfile: Dockerfile
150 | container_name: strfry${SUFFIX}
151 | restart: always
152 | volumes:
153 | - ${STRFRY_URLS_EXTERNAL}:/app/external_urls.txt:ro
154 | - ${STRFRY_URLS_FEDERATION}:/app/federation_urls.txt:ro
155 | - ${STRFRY_CONF}:/etc/strfry.conf:ro
156 | - ${STRFRY_DATA}/db:/app/strfry-db:rw
157 | network_mode: service:tor
158 |
159 | relay:
160 | build:
161 | context: ./relay
162 | dockerfile: Dockerfile
163 | container_name: relay${SUFFIX}
164 | restart: always
165 | volumes:
166 | - ${RELAY_URLS_EXTERNAL}:/app/external_urls.txt:ro
167 | - ${RELAY_URLS_FEDERATION}:/app/federation_urls.txt:ro
168 | - ${RELAY_CONF}:/etc/strfry.conf:ro
169 | - ${RELAY_DATA}/db:/app/strfry-db:rw
170 | network_mode: service:tor
171 |
172 | # Example simple backup service (copy/paste to attached storage locations)
173 | backup:
174 | build: ./backup
175 | container_name: bu${SUFFIX}
176 | restart: always
177 | environment:
178 | NETWORK: ${NETWORK}
179 | volumes:
180 | - ${DATABASE}:/running/database:ro
181 | - ${BITCOIN_CONF:?}:/running/bitcoin/bitcoin.conf:ro
182 | - ${LND_DATA}:/running/lnd:ro
183 | - ${LND_CONF}:/running/lnd/lnd.conf:ro
184 | - ${LIT_DATA}:/running/lit:ro
185 | - ${STATIC}:/running/static:ro
186 | - ${BU_DIR1}:/backup1:rw
187 | - ${BU_DIR2}:/backup2:rw
188 | - ${BU_DIR3}:/backup3:rw
189 |
190 | volumes:
191 | redisdata:
192 |
--------------------------------------------------------------------------------
/compose/env-sample/README.md:
--------------------------------------------------------------------------------
1 | ## Example environment and configuration files
2 | You can think of these as "Namespaces" for your orchestration.
3 | You can find a LND Testnet example (lndtn) in `/compose/env-sample/lndtn` that will create containers suffixed with `-lndtn` and a CLN Testnet example in `/compose/env-sample/clntn` using the suffix `-clntn` .
--------------------------------------------------------------------------------
/compose/env-sample/clntn/bitcoin.conf:
--------------------------------------------------------------------------------
1 | # Reference: https://en.bitcoin.it/wiki/Running_Bitcoin
2 | # https://github.com/bitcoin/bitcoin/blob/master/share/examples/bitcoin.conf
3 |
4 | server=1
5 | txindex=1
6 | onion=127.0.0.1:9050
7 | torcontrol=127.0.0.1:9051
8 | rpcuser=robosats_testnet_bitcoind
9 | rpcpassword=robosats_testnet_bitcoind
10 | zmqpubrawblock=tcp://127.0.0.1:18501
11 | zmqpubrawtx=tcp://127.0.0.1:18502
12 |
13 | # limit upload bandwith (in MB per day)
14 | maxuploadtarget=10
15 | # Allow RPC connections from outside of container localhost
16 | rpcbind=0.0.0.0
17 | # Only connect to typical docker IP addresses (Usually from docker host computer)
18 | rpcallowip=172.0.0.0/255.0.0.0
19 | # Allow access from any IP address (Usually from another computer on LAN)
20 | #rpcallowip=0.0.0.0/0
21 |
22 | # Run on the test network instead of the real bitcoin network.
23 | testnet=1
24 |
25 | mempoolfullrbf=1
26 |
27 | [main]
28 | # Only run on Tor
29 | onlynet=onion
30 |
31 | # Add Tor seed nodes
32 | addnode=i4x66albngo3sg3w.onion:8333
33 |
34 | # Some testnet settings needed for 0.19, if using testnet
35 | [test]
36 | # Allow RPC connections from outside of container localhost
37 | onlynet=onion
38 | rpcbind=0.0.0.0
39 |
--------------------------------------------------------------------------------
/compose/env-sample/clntn/cln_config:
--------------------------------------------------------------------------------
1 | network=testnet
2 | proxy=127.0.0.1:9050
3 | bind-addr=127.0.0.1:9736
4 | addr=statictor:127.0.0.1:9051
5 | grpc-port=9999
6 | grpc-hold-port=9998
7 | always-use-proxy=true
8 | important-plugin=/root/.lightning/plugins/cln-grpc-hold
9 | # wallet=postgres://user:pass@localhost:5433/cln
10 | # bookkeeper-db=postgres://user:pass@localhost:5433/cln
--------------------------------------------------------------------------------
/compose/env-sample/clntn/compose.env:
--------------------------------------------------------------------------------
1 | # Uncomment the dockerhub tags to override the image versions localy without need to fork the docker-compose.yml
2 |
3 | # ROBOSATS_TAG='v0.6.0-alpha@sha256:8cc975ff5942a3fb2a09827b8eaafebea1c851eb3ecf9be1aaac1f238cfa9fc1'
4 | # BITCOIND_TAG='24.0.1-alpine@sha256:624b6fb1c282a3db6438ff35fbb9eed6ae260c66ffbb427ae72edd7da6a0ecdc'
5 | # REDIS_TAG='7.2.1-alpine@sha256:7f5a0dfbf379db69dc78434091dce3220e251022e71dcdf36207928cbf9010de'
6 | # NGINX_TAG='1.25.2-alpine-slim@sha256:1b0cb433e90260a96528c987ee78b797e842d510473935304a0931536d10f50d'
7 | # POSTGRES_TAG='14.2-alpine@sha256:2fb5718f2a23dbac9bd1258e886eee90250a5903785e3136d62dd65e19f34982'
8 |
9 | STATIC='/custom_path/testnet/static'
10 | DATABASE='/custom_path/testnet/database'
11 |
12 | # You can create a cronjob to create PG backups
13 | PG_BACKUP='/custom_path/testnet/pg_backup'
14 | ROBOSATS_ENVS_FILE='env/clntn/robosats.env'
15 | THUB_ENVS_FILE='env/clntn/thunderhub.env'
16 | SUFFIX='-clntn'
17 | NETWORK='testnet'
18 |
19 | # Postgresql Database (Should match with those in robosats-tn.env)
20 | POSTGRES_NAME='postgres'
21 | POSTGRES_USER='postgres'
22 | POSTGRES_PASSWORD='example'
23 |
24 | BITCOIND_VERSION='24.0.1-alpine'
25 |
26 | NGINX_CONFD='./nginx/tn.conf.d'
27 | WELLKNOWN='./nginx/tn.well-known'
28 |
29 | # Port and number of HTTP server workers for the robosats backend
30 | WEB_LOCAL_PORT=8001
31 | GUNICORN_WORKERS=2
32 |
33 | BU_DIR1='/backup'
34 | BU_DIR2='/custom_path/backup'
35 | BU_DIR3='/custom_path/backup2'
36 |
37 | BITCOIN_DATA='/custom_path/testnet/bitcoin'
38 | BITCOIN_CONF='./env/clntn/bitcoin.conf'
39 | CLN_DATA='/custom_path/testnet/cln'
40 | CLN_CONF='./env/clntn/cln_config'
41 | LIT_DATA='/custom_path/testnet/lit'
42 |
43 | GUEST_BITCOIN=/home/bitcoin/.bitcoin
44 |
45 | TOR_DATA='./env/lndtn/tor'
46 | TOR_CONFIG='./env/lndtn/torrc'
47 |
48 | GUEST_LND_RPC_PORT=10009
49 | GUEST_LND_REST_PORT=8080
50 |
51 | HOST_LND_RPC_PORT=10009
52 | HOST_LND_REST_PORT=8080
53 |
54 | THUB_LOCAL_PORT=3001
55 | THUB_ACCOUNTS='./env/clntn/thunderhub-accounts.yml'
56 | # THUB_ACCOUNTS_CONFIG_PATH must match thub...env
57 | THUB_ACCOUNTS_CONFIG_PATH='/config/accounts.yml'
58 |
59 |
60 |
61 |
62 | LIT_LOCAL_PORT=4001
63 | LIT_PASSWORD=999999999
64 |
65 | LND_BACKUP_PATH='/custom_path/testnet/lnd-backup'
66 | NODE_ALIAS='🤖RoboSats⚡(Testnet)'
67 | NODE_COLOR='#4126a7'
68 |
69 | LND_AUTOUNLOCK_PWD='./env/clntn/lnd_autounlock_pwd'
70 |
--------------------------------------------------------------------------------
/compose/env-sample/clntn/robosats.env:
--------------------------------------------------------------------------------
1 | # Coordinator Alias (Same as longAlias)
2 | COORDINATOR_ALIAS="coordinator_NAME_CLN"
3 | # Lightning node vendor: CLN | LND
4 | LNVENDOR='CLN'
5 |
6 | # LND configuration (only needed if LNVENDOR='LND')
7 | # LND directory to read TLS cert and macaroon
8 | #LND_DIR='/lnd/'
9 | #MACAROON_PATH='data/chain/bitcoin/testnet/admin.macaroon'
10 |
11 | # If LND directory is not specified, cert and macaroon can be provided as base64 strings
12 | # base64 ~/.lnd/tls.cert | tr -d '\n'
13 | #LND_CERT_BASE64='LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNLVENDQWRDZ0F3SUJBZ0lRQ0VoeGpPZXY1bGQyVFNPTXhKalFvekFLQmdncWhrak9QUVFEQWpBNE1SOHcKSFFZRFZRUUtFeFpzYm1RZ1lYVjBiMmRsYm1WeVlYUmxaQ0JqWlhKME1SVXdFd1lEVlFRREV3d3dNakJtTVRnMQpZelkwTnpVd0hoY05Nakl3TWpBNE1UWXhOalV3V2hjTk1qTXdOREExTVRZeE5qVXdXakE0TVI4d0hRWURWUVFLCkV4WnNibVFnWVhWMGIyZGxibVZ5WVhSbFpDQmpaWEowTVJVd0V3WURWUVFERXd3d01qQm1NVGcxWXpZME56VXcKV1RBVEJnY3Foa2pPUFFJQkJnZ3Foa2pPUFFNQkJ3TkNBQVNJVWdkcVMrWFZKL3EzY0JZeWd6ZDc2endaanlmdQpLK3BzcWNYVkFyeGZjU2NXQ25jbXliNGRaMy9Lc3lLWlRaamlySDE3aEY0OGtIMlp5clRZSW9hZG80RzdNSUc0Ck1BNEdBMVVkRHdFQi93UUVBd0lDcERBVEJnTlZIU1VFRERBS0JnZ3JCZ0VGQlFjREFUQVBCZ05WSFJNQkFmOEUKQlRBREFRSC9NQjBHQTFVZERnUVdCQlEwWUJjZXdsd1BqYTJPRXFyTGxzZnJscEswUFRCaEJnTlZIUkVFV2pCWQpnZ3d3TWpCbU1UZzFZelkwTnpXQ0NXeHZZMkZzYUc5emRJSUVkVzVwZUlJS2RXNXBlSEJoWTJ0bGRJSUhZblZtClkyOXVib2NFZndBQUFZY1FBQUFBQUFBQUFBQUFBQUFBQUFBQUFZY0V3S2dRQW9jRUFBQUFBREFLQmdncWhrak8KUFFRREFnTkhBREJFQWlBd0dMY05qNXVZSkVwanhYR05OUnNFSzAwWmlSUUh2Qm50NHp6M0htWHBiZ0lnSWtvUQo3cHFvNGdWNGhiczdrSmt1bnk2bkxlNVg0ZzgxYjJQOW52ZnZ2bkk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K'
14 | # base64 ~/.lnd/data/chain/bitcoin/testnet/admin.macaroon | tr -d '\n'
15 | #LND_MACAROON_BASE64='AgEDbG5kAvgBAwoQsyI+PK+fyb7F2UyTeZ4seRIBMBoWCgdhZGRyZXNzEgRyZWFkEgV3cml0ZRoTCgRpbmZvEgRyZWFkEgV3cml0ZRoXCghpbnZvaWNlcxIEcmVhZBIFd3JpdGUaIQoIbWFjYXJvb24SCGdlbmVyYXRlEgRyZWFkEgV3cml0ZRoWCgdtZXNzYWdlEgRyZWFkEgV3cml0ZRoXCghvZmZjaGFpbhIEcmVhZBIFd3JpdGUaFgoHb25jaGFpbhIEcmVhZBIFd3JpdGUaFAoFcGVlcnMSBHJlYWQSBXdyaXRlGhgKBnNpZ25lchIIZ2VuZXJhdGUSBHJlYWQAAAYgMt90uD6v4truTadWCjlppoeJ4hZrL1SBb09Y+4WOiI0='
16 |
17 | # CLN directory (only needed if LNVENDOR='CLN')
18 | CLN_DIR='/cln/testnet/'
19 | CLN_GRPC_HOST='localhost:9999'
20 | CLN_GRPC_HOLD_HOST='localhost:9998'
21 |
22 | # Bitcoin Core Daemon RPC, used to validate addresses
23 | BITCOIND_RPCURL = 'http://127.0.0.1:18332'
24 | BITCOIND_RPCUSER = 'robosats_testnet_bitcoind'
25 | BITCOIND_RPCPASSWORD = 'robosats_testnet_bitcoind'
26 |
27 | # Auto unlock LND password. Only used in development docker-compose environment.
28 | # It will fail starting up the node without it.
29 | # To disable auto unlock, comment out 'wallet-unlock-password-file=/tmp/pwd' from 'docker/lnd/lnd.conf'
30 | LND_GRPC_HOST='localhost:10009'
31 |
32 | REDIS_URL='redis://localhost:6379/1'
33 |
34 | # Postgresql Database (These are fed from STACK-XX.env)
35 | # Deprecated
36 | POSTGRES_NAME='postgres'
37 | # New
38 | POSTGRES_DB='postgres'
39 | POSTGRES_USER='postgres'
40 | POSTGRES_PASSWORD='example'
41 | POSTGRES_HOST='127.0.0.1'
42 | POSTGRES_PORT='5432'
43 |
44 | # Tor proxy for remote calls (e.g. fetching prices or sending Telegram messages)
45 | USE_TOR='True'
46 | TOR_PROXY='127.0.0.1:9050'
47 |
48 | # List of market price public APIs. If the currency is available in more than 1 API, will use median price.
49 | MARKET_PRICE_APIS = 'https://blockchain.info/ticker, https://api.yadio.io/exrates/BTC'
50 |
51 | # Host e.g. 'robotesta15gf7845r4rgksb4nmmr43fh77bzn2ia2eucduyeafnyd.onion'
52 | HOST_NAME = 'robotestagw3dcxmd66r4rgksb4nmmr43fh77bzn2ia2eucduyeafnyd.onion'
53 | HOST_NAME2 = '*' # Do not change
54 | I2P_ALIAS = ''
55 | I2P_LONG = ''
56 | LOCAL_ALIAS = ''
57 | ONION_LOCATION = 'robotestagw3dcxmd66r4rgksb4nmmr43fh77bzn2ia2eucduyeafnyd.onion'
58 |
59 | # Geoblocked countries (will reject F2F trades).
60 | # List of A3 country codes (see fhttps://en.wikipedia.org/wiki/ISO_3166-1_alpha-3)
61 | # Leave empty '' to allow all countries.
62 | # Example 'NOR,USA,CZE'.
63 | GEOBLOCKED_COUNTRIES = 'ABW,AFG,AGO'
64 |
65 | # Link to robosats alternative site (shown in frontend in statsfornerds so users can switch mainnet/testnet)
66 | ALTERNATIVE_SITE = 'RoboSats6tkf3eva7x2voqso3a5wcorsnw34jveyxfqi2fu7oyheasid.onion'
67 | ALTERNATIVE_NAME = 'RoboSats Mainnet'
68 |
69 | SECRET_KEY= 'n098eafh82390fu9283uyt9yf023uf'
70 |
71 | # Link to robosats mainnet/testnet (shown on frontend in statsfornerds so users can switch mainnet/testnet)
72 | THE_OTHER_ROBOSATS = 'robosats6tkf3eva7x2voqso3a5wcorsnw34jveyxfqi2fu7oyheasid.onion'
73 |
74 | # Telegram bot token
75 | TELEGRAM_TOKEN = ''
76 | TELEGRAM_BOT_NAME = ''
77 | # Telegram chat id to receive admin notifications
78 | TELEGRAM_COORDINATOR_CHAT_ID = 'AdminNotificationChatId'
79 | # Notify new messages in-chat app (fiat exchange step) if at least X minutes has passed since the last chat message.
80 | CHAT_NOTIFICATION_TIMEGAP = 5
81 |
82 | # Maintainance notice or and other coordinator messages on client start
83 | # Style of the notice on the client app, use None for no notice: 'None' | 'Warning' | 'Success' | 'Error' | 'Info'
84 | NOTICE_SEVERITY = 'info'
85 | NOTICE_MESSAGE = 'This is a coordinator message with LINK'
86 |
87 | # Lightning node open info, url to amboss and 1ML
88 | NETWORK = 'testnet'
89 | NODE_ALIAS = '🤖RoboSats⚡(Coordinator_NAME)'
90 | NODE_ID = '028.....'
91 |
92 | # Total trade fee as fraction
93 | FEE = 0.002
94 | # Maker/taker fee split. As seen in https://bisq.wiki/Trading_fees . It is implicit that TAKER_FEE_SPLIT = (1 - MAKER_FEE_SPLIT)
95 | # Shall incentivize order making
96 | MAKER_FEE_SPLIT=0.125
97 |
98 | # Time out penalty for canceling takers in SECONDS
99 | PENALTY_TIMEOUT = 60
100 | # Time between routing attempts of buyer invoice in MINUTES
101 | RETRY_TIME = 1
102 |
103 | # Store Order Logs in DB. Verbose logging for each order as property of the order object in DB. Useful for debugging and for learning
104 | # the order flow for new robosats coordinators (prints a pretty timestamped table on the coordinator panel on each order). But a bit heavy
105 | # on write operations and can potentially affect performance for every request.
106 | DISABLE_ORDER_LOGS = False
107 |
108 | # Robosats Development Fund donation as fraction. 0.2 = 20% of successful orders proceeds are donated via keysend.
109 | # Donations to the devfund are important for the sustainabilty of the project, however, these are totally optional (you
110 | # can run a coordinator without donating devfund!). Coordinators with higher devfund donations % will be more prominently
111 | # displayed (and have special badges), while coordinators that do not donate might eventually lose frontend/client support.
112 | # Leaving the default value (20%) will grant the DevFund contributor badge.
113 | DEVFUND = 0.2
114 |
115 | # Coordinator activity limits
116 | MAX_PUBLIC_ORDERS = 100
117 |
118 | # Trade limits in satoshis
119 | MIN_TRADE = 20000
120 | MAX_TRADE = 5000000
121 |
122 | # For CLTV_expiry calculation
123 | # Assume 8 min/block assumed
124 | BLOCK_TIME = 8
125 | # Safety multiplier in case of mining speed up (CLTV expiry will be times X larger than real time needs for locked bonds/escrow)
126 | MAX_MINING_NETWORK_SPEEDUP_EXPECTED = 1.7
127 |
128 | # Expiration time for locking collateral in SECONDS
129 | EXP_MAKER_BOND_INVOICE = 300
130 | EXP_TAKER_BOND_INVOICE = 200
131 |
132 | # ROUTING
133 | # Proportional routing fee limit (fraction of total payout: % / 100)
134 | PROPORTIONAL_ROUTING_FEE_LIMIT = 0.001
135 | # Base flat limit fee for routing in Sats (used only when proportional is lower than this)
136 | MIN_FLAT_ROUTING_FEE_LIMIT = 10
137 | MIN_FLAT_ROUTING_FEE_LIMIT_REWARD = 2
138 | # Routing timeouts
139 | REWARDS_TIMEOUT_SECONDS = 30
140 | PAYOUT_TIMEOUT_SECONDS = 90
141 | DEBUG_PERMISSIONED_PAYOUTS = False
142 |
143 | # Allow self keysend on keysend function (set true to debug keysend functionality)
144 | ALLOW_SELF_KEYSEND = False
145 |
146 | # REVERSE SUBMARINE SWAP PAYOUTS
147 | # Disable on-the-fly swaps feature
148 | DISABLE_ONCHAIN = False
149 | # Shape of fee to available liquidity curve. Either "linear" or "exponential"
150 | SWAP_FEE_SHAPE = 'exponential'
151 | # EXPONENTIAL. fee (%) = MIN_SWAP_FEE + (MAX_SWAP_FEE - MIN_SWAP_FEE) * e ^ (-LAMBDA * onchain_liquidity_fraction)
152 | SWAP_LAMBDA = 8.8
153 | # LINEAR. 4 parameters needed: min/max fees and min/max balance points. E.g. If 25% or more of liquidity
154 | # is onchain the fee for swap is 2% (minimum), if it is 12% fee is 6%, and for 0% fee is 10%.
155 | # Minimum swap fee as fraction (1%)
156 | MIN_SWAP_FEE = 0.008
157 | # Liquidity split point (LN/onchain) at which we use MIN_SWAP_FEE
158 | MIN_SWAP_POINT = 0.35
159 | # Maximum swap fee as fraction (~10%)
160 | MAX_SWAP_FEE = 0.1
161 | # Liquidity split point (LN/onchain) at which we use MAX_SWAP_FEE
162 | MAX_SWAP_POINT = 0
163 | # Min and Max amount allowed for Swap
164 | MIN_SWAP_AMOUNT = 10000
165 | MAX_SWAP_AMOUNT = 1000000
166 | # Spend Unconfirmed UTXOS (more capital efficient, more risky)
167 | SPEND_UNCONFIRMED = False
168 | # Mining fee confirmation target
169 | SUGGESTED_TARGET_CONF = 4
170 | MINIMUM_TARGET_CONF = 24
171 |
172 | # Fraction rewarded to user from the slashed bond of a counterpart.
173 | # It should not be close to 1, or could be exploited by an attacker trading with himself to DDOS the LN node.
174 | SLASHED_BOND_REWARD_SPLIT = 0.5
175 |
176 | # Username for HTLCs escrows
177 | ESCROW_USERNAME = 'admin'
178 |
179 | #Social
180 | NOSTR_NSEC = 'nsec1vxhs2zc4kqe0dhz4z2gfrdyjsrwf8pg3neeqx6w4nl8djfzdp0dqwd6rxh'
181 |
--------------------------------------------------------------------------------
/compose/env-sample/clntn/tor/.gikeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RoboSats/robosats-deploy/e446170082d28db68f26ffa65ea9916b19a46661/compose/env-sample/clntn/tor/.gikeep
--------------------------------------------------------------------------------
/compose/env-sample/clntn/torrc:
--------------------------------------------------------------------------------
1 | Log notice file /var/log/tor/notices.log
2 |
3 | ## The directory for keeping all the keys/etc. By default, we store
4 | ## things in $HOME/.tor on Unix, and in Application Data\tor on Windows.
5 | DataDirectory /var/lib/tor
6 | DataDirectoryGroupReadable 1
7 |
8 | ## Enable ControlPort
9 | ControlPort 9051
10 | CookieAuthentication 1
11 | CookieAuthFileGroupReadable 1
12 | CookieAuthFile /var/lib/tor/control_auth_cookie
13 |
--------------------------------------------------------------------------------
/compose/env-sample/lndmn/bitcoin.conf:
--------------------------------------------------------------------------------
1 | # Reference: https://en.bitcoin.it/wiki/Running_Bitcoin
2 | # https://github.com/bitcoin/bitcoin/blob/master/share/examples/bitcoin.conf
3 |
4 | server=1
5 | txindex=1
6 | onion=127.0.0.1:9050
7 | torcontrol=127.0.0.1:9051
8 | rpcuser=robosats_mainnet_bitcoind
9 | rpcpassword=robosats_mainnet_bitcoind
10 | zmqpubrawblock=tcp://127.0.0.1:18501
11 | zmqpubrawtx=tcp://127.0.0.1:18502
12 |
13 | # limit upload bandwith (in MB per day)
14 | maxuploadtarget=500
15 | # In case mempool fills we want to be able to hold our own broadcasted 1 Sat/vbyte txs
16 | maxmempool=800
17 | mempoolexpiry=700
18 | # Allow RPC connections from outside of container localhost
19 | rpcbind=0.0.0.0
20 | # Only connect to typical docker IP addresses (Usually from docker host computer)
21 | rpcallowip=172.0.0.0/255.0.0.0
22 | # Allow access from any IP address (Usually from another computer on LAN)
23 | #rpcallowip=0.0.0.0/0
24 |
25 |
26 | proxy=127.0.0.1:9050
27 | # Run on the test network instead of the real bitcoin network.
28 | testnet=0
29 | mainnet=1
30 |
31 | mempoolfullrbf=1
32 |
33 | main.addnode=gliovxxzyy2rkwaoz25khf6oa64c3csqzjn3t6dodsjuf34w6a6ktsyd.onion
34 | main.addnode=ldvhlpsrvspquqnl3gutz7grfu5lb3m2dgnezpl3tlkxgpoiw2g5mzid.onion
35 | main.addnode=s7m4mnd6bokujhywsocxibispktruormushdroeaeqeb3imvztfs3vid.onion
36 | main.addnode=ccjrb6va3j6re4lg2lerlt6wyvlb4tod7qbe7rwiouuapb7etvterxyd.onion
37 | main.addnode=xlpi353v7ia5b73msynr7tmddgxoco7n2r2bljt5txpv6bpzzphkreyd.onion
38 | main.addnode=ira7kqcbff52wofoon^Ag2dieh2xlvmw4e7ya3znsqn7wivn6armetvrqd.onion
39 |
40 | [main]
41 | # Only run on Tor
42 | onlynet=onion
43 | rpcbind=0.0.0.0
44 |
45 | # Some testnet settings needed for 0.19, if using testnet
46 | [test]
47 | # Allow RPC connections from outside of container localhost
48 | rpcbind=0.0.0.0
49 |
--------------------------------------------------------------------------------
/compose/env-sample/lndmn/lnd.conf:
--------------------------------------------------------------------------------
1 | # Reference: https://github.com/lightningnetwork/lnd/blob/master/sample-lnd.conf
2 |
3 | debuglevel=info
4 | alias=🤖RoboSats⚡ COORDINATOR_NAME | LND
5 | color=#4126a7
6 | maxpendingchannels=6
7 | bitcoin.active=1
8 | bitcoin.mainnet=1
9 | bitcoin.node=bitcoind
10 | bitcoind.rpcuser=robosats_mainnet_bitcoind
11 | bitcoind.rpcpass=robosats_mainnet_bitcoind
12 | bitcoind.zmqpubrawblock=tcp://127.0.0.1:18501
13 | bitcoind.zmqpubrawtx=tcp://127.0.0.1:18502
14 | bitcoin.feerate=2000
15 | bitcoin.basefee=100000
16 | minchansize=1000000
17 | bitcoin.minhtlc=75000
18 | routerrpc.minrtprob=0.05
19 |
20 | # /robosats-deploy/compose/env/{namespace}/lnd_autounlock_pwd is mounted to /tmp/pwd
21 | wallet-unlock-password-file=/tmp/pwd
22 |
23 | minchansize=1000000
24 | accept-amp=true
25 | accept-keysend=true
26 | protocol.wumbo-channels=true
27 |
28 | routerrpc.estimator=bimodal
29 | routerrpc.bimodal.scale=300000000
30 | routerrpc.bimodal.nodeweight=0.2
31 | routerrpc.bimodal.decaytime=100h
32 |
33 | # Compaction
34 | db.prune-revocation=true
35 | #db.bolt.auto-compact=true
36 |
37 | # Neutrino
38 | neutrino.connect=faucet.lightning.community
39 |
40 | # needed for lit > 0.9.0
41 | rpcmiddleware.enable=true
42 |
43 | # Configuring Tor docs:
44 | # https://github.com/lightningnetwork/lnd/blob/master/docs/configuring_tor.md
45 | tor.active=1
46 | tor.v3=1
47 |
48 | # Listening port will need to be changed if multiple LND instances are running
49 | listen=localhost:9735
50 |
51 | # Allow connection to gRPC from host
52 | rpclisten=0.0.0.0:10009
53 | restlisten=0.0.0.0:8080
54 | tlsextraip=0.0.0.0
55 |
--------------------------------------------------------------------------------
/compose/env-sample/lndtn/bitcoin.conf:
--------------------------------------------------------------------------------
1 | # Reference: https://en.bitcoin.it/wiki/Running_Bitcoin
2 | # https://github.com/bitcoin/bitcoin/blob/master/share/examples/bitcoin.conf
3 |
4 | server=1
5 | txindex=1
6 | onion=127.0.0.1:9050
7 | torcontrol=127.0.0.1:9051
8 | rpcuser=robosats_testnet_bitcoind
9 | rpcpassword=robosats_testnet_bitcoind
10 | zmqpubrawblock=tcp://127.0.0.1:18501
11 | zmqpubrawtx=tcp://127.0.0.1:18502
12 |
13 | # limit upload bandwith (in MB per day)
14 | maxuploadtarget=10
15 | # Allow RPC connections from outside of container localhost
16 | rpcbind=0.0.0.0
17 | # Only connect to typical docker IP addresses (Usually from docker host computer)
18 | rpcallowip=172.0.0.0/255.0.0.0
19 | # Allow access from any IP address (Usually from another computer on LAN)
20 | #rpcallowip=0.0.0.0/0
21 |
22 | # Run on the test network instead of the real bitcoin network.
23 | testnet=1
24 |
25 | mempoolfullrbf=1
26 |
27 | [main]
28 | # Only run on Tor
29 | onlynet=onion
30 |
31 | # Add Tor seed nodes
32 | addnode=i4x66albngo3sg3w.onion:8333
33 |
34 | # Some testnet settings needed for 0.19, if using testnet
35 | [test]
36 | # Allow RPC connections from outside of container localhost
37 | onlynet=onion
38 | rpcbind=0.0.0.0
39 |
--------------------------------------------------------------------------------
/compose/env-sample/lndtn/compose.env:
--------------------------------------------------------------------------------
1 | # Uncomment the dockerhub tags to override the image versions localy without need to fork the docker-compose.yml
2 |
3 | # ROBOSATS_TAG='v0.6.0-alpha@sha256:8cc975ff5942a3fb2a09827b8eaafebea1c851eb3ecf9be1aaac1f238cfa9fc1'
4 | # BITCOIND_TAG='24.0.1-alpine@sha256:624b6fb1c282a3db6438ff35fbb9eed6ae260c66ffbb427ae72edd7da6a0ecdc'
5 | # REDIS_TAG='7.2.1-alpine@sha256:7f5a0dfbf379db69dc78434091dce3220e251022e71dcdf36207928cbf9010de'
6 | # NGINX_TAG='1.25.2-alpine-slim@sha256:1b0cb433e90260a96528c987ee78b797e842d510473935304a0931536d10f50d'
7 | # POSTGRES_TAG='14.2-alpine@sha256:2fb5718f2a23dbac9bd1258e886eee90250a5903785e3136d62dd65e19f34982'
8 |
9 | # LND_TAG='v0.17.4-beta@sha256:668eba1fdb1ac520292db95a57059be91895fb8af076eafd6c8b5e757f0f196c'
10 | # THUNDERHUB_TAG='v0.13.23@sha256:5ad834045dc3030ec199615827c415ca07729da32c4739afd3adfc662af4fe65'
11 | # LIT_TAG='v0.11.0-alpha@sha256:dc14c495f699b14c2d75c43097ad3ab40e7287fc136a3203df9cffe17b867b71'
12 |
13 | STATIC='/custom_path/testnet/static'
14 | DATABASE='/custom_path/testnet/database'
15 |
16 | # You can create a cronjob to create PG backups
17 | PG_BACKUP='/custom_path/testnet/pg_backup'
18 | ROBOSATS_ENVS_FILE='env/lndtn/robosats.env'
19 | THUB_ENVS_FILE='env/lndtn/thunderhub.env'
20 | SUFFIX='-lndtn'
21 | NETWORK='testnet'
22 |
23 | # Postgresql Database (Should match with those in robosats-tn.env)
24 | POSTGRES_NAME='postgres'
25 | POSTGRES_USER='postgres'
26 | POSTGRES_PASSWORD='example'
27 |
28 | NGINX_CONFD='./nginx/tn.conf.d'
29 | WELLKNOWN='./nginx/tn.well-known'
30 |
31 | STRFRY_CONF='./env-sample/lndtn/strfry.conf'
32 | STRFRY_URLS_EXTERNAL='./strfry/tn.external_urls.txt'
33 | STRFRY_URLS_FEDERATION='./strfry/tn.federation_urls.txt'
34 | STRFRY_DATA='/custom_path/testnet/strfry'
35 |
36 | RELAY_CONF='./env-sample/lndtn/relay.strfry.conf'
37 | RELAY_URLS_EXTERNAL='./relay/tn.external_urls.txt'
38 | RELAY_URLS_FEDERATION='./relay/tn.federation_urls.txt'
39 | RELAY_DATA='/custom_path/testnet/relay'
40 |
41 | # Port and number of HTTP server workers for the robosats backend
42 | WEB_LOCAL_PORT=8001
43 | GUNICORN_WORKERS=2
44 |
45 | BU_DIR1='/backup'
46 | BU_DIR2='/custom_path/backup'
47 | BU_DIR3='/custom_path/backup2'
48 |
49 | BITCOIN_DATA='/custom_path/testnet/bitcoin'
50 | BITCOIN_CONF='./env-sample/lndtn/bitcoin.conf'
51 | LND_DATA='/custom_path/testnet/lnd'
52 | LND_CONF='./env-sample/lndtn/lnd.conf'
53 | LIT_DATA='/custom_path/testnet/lit'
54 |
55 | GUEST_BITCOIN=/home/bitcoin/.bitcoin
56 |
57 | TOR_DATA='./env-sample/lndtn/tor'
58 | TOR_CONFIG='./env-sample/lndtn/torrc'
59 |
60 | GUEST_LND_RPC_PORT=10009
61 | GUEST_LND_REST_PORT=8080
62 |
63 | HOST_LND_RPC_PORT=10009
64 | HOST_LND_REST_PORT=8080
65 |
66 | THUB_LOCAL_PORT=3001
67 | THUB_ACCOUNTS='./env-sample/lndtn/thunderhub-accounts.yml'
68 | # THUB_ACCOUNTS_CONFIG_PATH must match thub...env
69 | THUB_ACCOUNTS_CONFIG_PATH='/config/accounts.yml'
70 |
71 | LNDG_CONFIG='./env-sample/lndtn/supervisord.conf'
72 | LNDG_DATA='/custom_path/testnet/lndg/data'
73 | LNDG_PASSWORD=999999999
74 |
75 | LIT_LOCAL_PORT=4001
76 | LIT_PASSWORD=999999999
77 |
78 | LND_BACKUP_PATH='/custom_path/testnet/lnd-backup'
79 | NODE_ALIAS='🤖RoboSats⚡(Testnet)'
80 | NODE_COLOR='#4126a7'
81 |
82 | LND_AUTOUNLOCK_PWD='./env-sample/lndtn/lnd_autounlock_pwd'
83 |
--------------------------------------------------------------------------------
/compose/env-sample/lndtn/lnd.conf:
--------------------------------------------------------------------------------
1 | # Reference: https://github.com/lightningnetwork/lnd/blob/master/sample-lnd.conf
2 |
3 | debuglevel=info
4 | alias=🤖RoboSats⚡ COORDINATOR_NAME
5 | color=#4126a7
6 | maxpendingchannels=6
7 | bitcoin.active=1
8 | bitcoin.testnet=1
9 | bitcoin.node=bitcoind
10 | bitcoind.rpcuser=robosats_testnet_bitcoind
11 | bitcoind.rpcpass=robosats_testnet_bitcoind
12 | bitcoind.zmqpubrawblock=tcp://127.0.0.1:18501
13 | bitcoind.zmqpubrawtx=tcp://127.0.0.1:18502
14 |
15 | # /robosats-deploy/compose/env/{namespace}/lnd_autounlock_pwd is mounted to /tmp/pwd
16 | wallet-unlock-password-file=/tmp/pwd
17 |
18 |
19 | routerrpc.estimator=bimodal
20 | routerrpc.bimodal.scale=300000000
21 | routerrpc.bimodal.nodeweight=0.2
22 | routerrpc.bimodal.decaytime=100h
23 |
24 | accept-amp=true
25 | accept-keysend=true
26 | protocol.wumbo-channels=true
27 |
28 | # Neutrino
29 | neutrino.connect=faucet.lightning.community
30 |
31 | # needed for lit > 0.9.0
32 | rpcmiddleware.enable=true
33 |
34 |
35 | # Configuring Tor docs:
36 | # https://github.com/lightningnetwork/lnd/blob/master/docs/configuring_tor.md
37 | tor.active=1
38 | tor.v3=1
39 |
40 | # Listening port will need to be changed if multiple LND instances are running
41 | listen=localhost:9735
42 |
43 | # Allow connection to gRPC from host
44 | rpclisten=0.0.0.0:10009
45 | restlisten=0.0.0.0:8080
46 | tlsextraip=0.0.0.0
47 |
--------------------------------------------------------------------------------
/compose/env-sample/lndtn/lnd_autounlock_pwd:
--------------------------------------------------------------------------------
1 | 12345678
--------------------------------------------------------------------------------
/compose/env-sample/lndtn/relay.strfry.conf:
--------------------------------------------------------------------------------
1 | ##
2 | ## Default strfry config
3 | ##
4 |
5 | # Directory that contains the strfry LMDB database (restart required)
6 | db = "/app/strfry-db/"
7 |
8 | dbParams {
9 | # Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)
10 | maxreaders = 256
11 |
12 | # Size of mmap() to use when loading LMDB (default is 10TB, does *not* correspond to disk-space used) (restart required)
13 | mapsize = 10995116277760
14 |
15 | # Disables read-ahead when accessing the LMDB mapping. Reduces IO activity when DB size is larger than RAM. (restart required)
16 | noReadAhead = false
17 | }
18 |
19 | events {
20 | # Maximum size of normalised JSON, in bytes
21 | maxEventSize = 65536
22 |
23 | # Events newer than this will be rejected
24 | rejectEventsNewerThanSeconds = 900
25 |
26 | # Events older than this will be rejected
27 | rejectEventsOlderThanSeconds = 94608000
28 |
29 | # Ephemeral events older than this will be rejected
30 | rejectEphemeralEventsOlderThanSeconds = 60
31 |
32 | # Ephemeral events will be deleted from the DB when older than this
33 | ephemeralEventsLifetimeSeconds = 300
34 |
35 | # Maximum number of tags allowed
36 | maxNumTags = 2000
37 |
38 | # Maximum size for tag values, in bytes
39 | maxTagValSize = 1024
40 | }
41 |
42 | relay {
43 | # Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
44 | bind = "0.0.0.0"
45 |
46 | # Port to open for the nostr websocket protocol (restart required)
47 | port = 7778
48 |
49 | # Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
50 | nofiles = 524288
51 |
52 | # HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
53 | realIpHeader = ""
54 |
55 | info {
56 | # NIP-11: Name of this server. Short/descriptive (< 30 characters)
57 | name = "Robosats"
58 |
59 | # NIP-11: Detailed information about relay, free-form
60 | description = "Federation cache system."
61 |
62 | # NIP-11: Administrative nostr pubkey, for contact purposes
63 | pubkey = ""
64 |
65 | # NIP-11: Alternative administrative contact (email, website, etc)
66 | contact = ""
67 | }
68 |
69 | # Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
70 | maxWebsocketPayloadSize = 131072
71 |
72 | # Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
73 | autoPingSeconds = 55
74 |
75 | # If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy)
76 | enableTcpKeepalive = false
77 |
78 | # How much uninterrupted CPU time a REQ query should get during its DB scan
79 | queryTimesliceBudgetMicroseconds = 10000
80 |
81 | # Maximum records that can be returned per filter
82 | maxFilterLimit = 500
83 |
84 | # Maximum number of subscriptions (concurrent REQs) a connection can have open at any time
85 | maxSubsPerConnection = 3
86 |
87 | writePolicy {
88 | # If non-empty, path to an executable script that implements the writePolicy plugin logic
89 | plugin = ""
90 | }
91 |
92 | compression {
93 | # Use permessage-deflate compression if supported by client. Reduces bandwidth, but slight increase in CPU (restart required)
94 | enabled = true
95 |
96 | # Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required)
97 | slidingWindow = false
98 | }
99 |
100 | logging {
101 | # Dump all incoming messages
102 | dumpInAll = false
103 |
104 | # Dump all incoming EVENT messages
105 | dumpInEvents = false
106 |
107 | # Dump all incoming REQ/CLOSE messages
108 | dumpInReqs = false
109 |
110 | # Log performance metrics for initial REQ database scans
111 | dbScanPerf = false
112 |
113 | # Log reason for invalid event rejection? Can be disabled to silence excessive logging
114 | invalidEvents = true
115 | }
116 |
117 | numThreads {
118 | # Ingester threads: route incoming requests, validate events/sigs (restart required)
119 | ingester = 3
120 |
121 | # reqWorker threads: Handle initial DB scan for events (restart required)
122 | reqWorker = 3
123 |
124 | # reqMonitor threads: Handle filtering of new events (restart required)
125 | reqMonitor = 3
126 |
127 | # negentropy threads: Handle negentropy protocol messages (restart required)
128 | negentropy = 2
129 | }
130 |
131 | negentropy {
132 | # Support negentropy protocol messages
133 | enabled = true
134 |
135 | # Maximum records that sync will process before returning an error
136 | maxSyncEvents = 1000000
137 | }
138 | }
139 |
--------------------------------------------------------------------------------
/compose/env-sample/lndtn/robosats.env:
--------------------------------------------------------------------------------
1 | # Coordinator Alias (Same as longAlias)
2 | COORDINATOR_ALIAS="coordinator_NAME"
3 | # Lightning node vendor: CLN | LND
4 | LNVENDOR='LND'
5 |
6 | # LND directory to read TLS cert and macaroon
7 | LND_DIR='/lnd/'
8 | MACAROON_PATH='data/chain/bitcoin/testnet/admin.macaroon'
9 |
10 | # If LND directory is not specified, cert and macaroon can be provided as base64 strings
11 | # base64 ~/.lnd/tls.cert | tr -d '\n'
12 | LND_CERT_BASE64='LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNLVENDQWRDZ0F3SUJBZ0lRQ0VoeGpPZXY1bGQyVFNPTXhKalFvekFLQmdncWhrak9QUVFEQWpBNE1SOHcKSFFZRFZRUUtFeFpzYm1RZ1lYVjBiMmRsYm1WeVlYUmxaQ0JqWlhKME1SVXdFd1lEVlFRREV3d3dNakJtTVRnMQpZelkwTnpVd0hoY05Nakl3TWpBNE1UWXhOalV3V2hjTk1qTXdOREExTVRZeE5qVXdXakE0TVI4d0hRWURWUVFLCkV4WnNibVFnWVhWMGIyZGxibVZ5WVhSbFpDQmpaWEowTVJVd0V3WURWUVFERXd3d01qQm1NVGcxWXpZME56VXcKV1RBVEJnY3Foa2pPUFFJQkJnZ3Foa2pPUFFNQkJ3TkNBQVNJVWdkcVMrWFZKL3EzY0JZeWd6ZDc2endaanlmdQpLK3BzcWNYVkFyeGZjU2NXQ25jbXliNGRaMy9Lc3lLWlRaamlySDE3aEY0OGtIMlp5clRZSW9hZG80RzdNSUc0Ck1BNEdBMVVkRHdFQi93UUVBd0lDcERBVEJnTlZIU1VFRERBS0JnZ3JCZ0VGQlFjREFUQVBCZ05WSFJNQkFmOEUKQlRBREFRSC9NQjBHQTFVZERnUVdCQlEwWUJjZXdsd1BqYTJPRXFyTGxzZnJscEswUFRCaEJnTlZIUkVFV2pCWQpnZ3d3TWpCbU1UZzFZelkwTnpXQ0NXeHZZMkZzYUc5emRJSUVkVzVwZUlJS2RXNXBlSEJoWTJ0bGRJSUhZblZtClkyOXVib2NFZndBQUFZY1FBQUFBQUFBQUFBQUFBQUFBQUFBQUFZY0V3S2dRQW9jRUFBQUFBREFLQmdncWhrak8KUFFRREFnTkhBREJFQWlBd0dMY05qNXVZSkVwanhYR05OUnNFSzAwWmlSUUh2Qm50NHp6M0htWHBiZ0lnSWtvUQo3cHFvNGdWNGhiczdrSmt1bnk2bkxlNVg0ZzgxYjJQOW52ZnZ2bkk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K'
13 | # base64 ~/.lnd/data/chain/bitcoin/testnet/admin.macaroon | tr -d '\n'
14 | LND_MACAROON_BASE64='AgEDbG5kAvgBAwoQsyI+PK+fyb7F2UyTeZ4seRIBMBoWCgdhZGRyZXNzEgRyZWFkEgV3cml0ZRoTCgRpbmZvEgRyZWFkEgV3cml0ZRoXCghpbnZvaWNlcxIEcmVhZBIFd3JpdGUaIQoIbWFjYXJvb24SCGdlbmVyYXRlEgRyZWFkEgV3cml0ZRoWCgdtZXNzYWdlEgRyZWFkEgV3cml0ZRoXCghvZmZjaGFpbhIEcmVhZBIFd3JpdGUaFgoHb25jaGFpbhIEcmVhZBIFd3JpdGUaFAoFcGVlcnMSBHJlYWQSBXdyaXRlGhgKBnNpZ25lchIIZ2VuZXJhdGUSBHJlYWQAAAYgMt90uD6v4truTadWCjlppoeJ4hZrL1SBb09Y+4WOiI0='
15 |
16 | # CLN directory (only needed if LNVENDOR='CLN')
17 | CLN_DIR='/cln/testnet/'
18 | CLN_GRPC_HOST='localhost:9999'
19 | CLN_GRPC_HOLD_HOST='localhost:9998'
20 |
21 | # Bitcoin Core Daemon RPC, used to validate addresses
22 | # For mainnet the URL port is 8332
23 | BITCOIND_RPCURL = 'http://127.0.0.1:18332'
24 | BITCOIND_RPCUSER = 'robosats_testnet_bitcoind'
25 | BITCOIND_RPCPASSWORD = 'robosats_testnet_bitcoind'
26 |
27 | # Auto unlock LND password. Only used in development docker-compose environment.
28 | # It will fail starting up the node without it.
29 | # To disable auto unlock, comment out 'wallet-unlock-password-file=/tmp/pwd' from 'docker/lnd/lnd.conf'
30 | LND_GRPC_HOST='localhost:10009'
31 |
32 | REDIS_URL='redis://localhost:6379/1'
33 |
34 | # Postgresql Database (These are fed from STACK-XX.env)
35 | # Deprecated
36 | POSTGRES_NAME='postgres'
37 | # New
38 | POSTGRES_DB='postgres'
39 | POSTGRES_USER='postgres'
40 | POSTGRES_PASSWORD='example'
41 | POSTGRES_HOST='127.0.0.1'
42 | POSTGRES_PORT='5432'
43 |
44 | # Tor proxy for remote calls (e.g. fetching prices or sending Telegram messages)
45 | USE_TOR='True'
46 | TOR_PROXY='127.0.0.1:9050'
47 |
48 | # List of market price public APIs. If the currency is available in more than 1 API, will use median price.
49 | MARKET_PRICE_APIS = 'https://blockchain.info/ticker, https://api.yadio.io/exrates/BTC'
50 |
51 | # Host e.g. 'robotesta15gf7845r4rgksb4nmmr43fh77bzn2ia2eucduyeafnyd.onion'
52 | HOST_NAME = 'robotestagw3dcxmd66r4rgksb4nmmr43fh77bzn2ia2eucduyeafnyd.onion'
53 | HOST_NAME2 = '*' # Do not change
54 | I2P_ALIAS = ''
55 | I2P_LONG = ''
56 | LOCAL_ALIAS = ''
57 | ONION_LOCATION = 'robotestagw3dcxmd66r4rgksb4nmmr43fh77bzn2ia2eucduyeafnyd.onion'
58 |
59 | # Geoblocked countries (will reject F2F trades).
60 | # List of A3 country codes (see fhttps://en.wikipedia.org/wiki/ISO_3166-1_alpha-3)
61 | # Leave empty '' to allow all countries.
62 | # Example 'NOR,USA,CZE'.
63 | GEOBLOCKED_COUNTRIES = 'ABW,AFG,AGO'
64 |
65 | # Link to robosats alternative site (shown in frontend in statsfornerds so users can switch mainnet/testnet)
66 | ALTERNATIVE_SITE = 'RoboSats6tkf3eva7x2voqso3a5wcorsnw34jveyxfqi2fu7oyheasid.onion'
67 | ALTERNATIVE_NAME = 'RoboSats Mainnet'
68 |
69 | SECRET_KEY= 'n098eafh82390fu9283uyt9yf023uf'
70 |
71 | # Link to robosats mainnet/testnet (shown on frontend in statsfornerds so users can switch mainnet/testnet)
72 | THE_OTHER_ROBOSATS = 'robosats6tkf3eva7x2voqso3a5wcorsnw34jveyxfqi2fu7oyheasid.onion'
73 |
74 | # Telegram bot token
75 | TELEGRAM_TOKEN = ''
76 | TELEGRAM_BOT_NAME = ''
77 | # Telegram chat id to receive admin notifications
78 | TELEGRAM_COORDINATOR_CHAT_ID = 'AdminNotificationChatId'
79 |
80 | # Notify new messages in-chat app (fiat exchange step) if at least X minutes has passed since the last chat message.
81 | CHAT_NOTIFICATION_TIMEGAP = 5
82 |
83 | # Maintainance notice or and other coordinator messages on client start
84 | # Style of the notice on the client app, use None for no notice: 'None' | 'Warning' | 'Success' | 'Error' | 'Info'
85 | NOTICE_SEVERITY = 'info'
86 | NOTICE_MESSAGE = 'This is a coordinator message with LINK'
87 |
88 | # Lightning node open info, url to amboss and 1ML
89 | NETWORK = 'testnet'
90 | NODE_ALIAS = '🤖RoboSats⚡(Coordinator_NAME)'
91 | NODE_ID = '028.....'
92 |
93 | # Total trade fee as fraction
94 | FEE = 0.002
95 | # Maker/taker fee split. As seen in https://bisq.wiki/Trading_fees . It is implicit that TAKER_FEE_SPLIT = (1 - MAKER_FEE_SPLIT)
96 | # Shall incentivize order making
97 | MAKER_FEE_SPLIT=0.125
98 |
99 | # Time out penalty for canceling takers in SECONDS
100 | PENALTY_TIMEOUT = 60
101 | # Time between routing attempts of buyer invoice in MINUTES
102 | RETRY_TIME = 1
103 |
104 | # Store Order Logs in DB. Verbose logging for each order as property of the order object in DB. Useful for debugging and for learning
105 | # the order flow for new robosats coordinators (prints a pretty timestamped table on the coordinator panel on each order). But a bit heavy
106 | # on write operations and can potentially affect performance for every request.
107 | DISABLE_ORDER_LOGS = False
108 |
109 | # Robosats Development Fund donation as fraction. 0.2 = 20% of successful orders proceeds are donated via keysend.
110 | # Donations to the devfund are important for the sustainabilty of the project, however, these are totally optional (you
111 | # can run a coordinator without donating devfund!). Coordinators with higher devfund donations % will be more prominently
112 | # displayed (and have special badges), while coordinators that do not donate might eventually lose frontend/client support.
113 | # Leaving the default value (20%) will grant the DevFund contributor badge.
114 | DEVFUND = 0.2
115 |
116 | # Coordinator activity limits
117 | MAX_PUBLIC_ORDERS = 100
118 |
119 | # Coordinator Order size limits in Satoshi
120 | # Minimum order size (must be bigger than DB constrain in /robosats/settings.py MIN_TRADE, currently 20_000 Sats)
121 | MIN_ORDER_SIZE = 20000
122 | # Minimum order size (must be smaller than DB constrain in /robosats/settings.py MAX_TRADE, currently 5_000_000 Sats)
123 | MAX_ORDER_SIZE = 500000
124 |
125 | # For CLTV_expiry calculation
126 | # Assume 8 min/block assumed
127 | BLOCK_TIME = 8
128 | # Safety multiplier in case of mining speed up (CLTV expiry will be times X larger than real time needs for locked bonds/escrow)
129 | MAX_MINING_NETWORK_SPEEDUP_EXPECTED = 1.7
130 |
131 | # Expiration time for locking collateral in SECONDS
132 | EXP_MAKER_BOND_INVOICE = 300
133 | EXP_TAKER_BOND_INVOICE = 200
134 |
135 | # ROUTING
136 | # Proportional routing fee limit (fraction of total payout: % / 100)
137 | PROPORTIONAL_ROUTING_FEE_LIMIT = 0.001
138 | # Base flat limit fee for routing in Sats (used only when proportional is lower than this)
139 | MIN_FLAT_ROUTING_FEE_LIMIT = 10
140 | MIN_FLAT_ROUTING_FEE_LIMIT_REWARD = 2
141 | # Routing timeouts
142 | REWARDS_TIMEOUT_SECONDS = 30
143 | PAYOUT_TIMEOUT_SECONDS = 90
144 | DEBUG_PERMISSIONED_PAYOUTS = False
145 |
146 | # Allow self keysend on keysend function (set true to debug keysend functionality)
147 | ALLOW_SELF_KEYSEND = False
148 |
149 | # REVERSE SUBMARINE SWAP PAYOUTS
150 | # Disable on-the-fly swaps feature
151 | DISABLE_ONCHAIN = False
152 | # Shape of fee to available liquidity curve. Either "linear" or "exponential"
153 | SWAP_FEE_SHAPE = 'exponential'
154 | # EXPONENTIAL. fee (%) = MIN_SWAP_FEE + (MAX_SWAP_FEE - MIN_SWAP_FEE) * e ^ (-LAMBDA * onchain_liquidity_fraction)
155 | SWAP_LAMBDA = 8.8
156 | # LINEAR. 4 parameters needed: min/max fees and min/max balance points. E.g. If 25% or more of liquidity
157 | # is onchain the fee for swap is 2% (minimum), if it is 12% fee is 6%, and for 0% fee is 10%.
158 | # Minimum swap fee as fraction (1%)
159 | MIN_SWAP_FEE = 0.008
160 | # Liquidity split point (LN/onchain) at which we use MIN_SWAP_FEE
161 | MIN_SWAP_POINT = 0.35
162 | # Maximum swap fee as fraction (~10%)
163 | MAX_SWAP_FEE = 0.1
164 | # Liquidity split point (LN/onchain) at which we use MAX_SWAP_FEE
165 | MAX_SWAP_POINT = 0
166 | # Min and Max amount allowed for Swap
167 | MIN_SWAP_AMOUNT = 10000
168 | MAX_SWAP_AMOUNT = 1000000
169 | # Spend Unconfirmed UTXOS (more capital efficient, more risky)
170 | SPEND_UNCONFIRMED = False
171 | # Mining fee confirmation target
172 | SUGGESTED_TARGET_CONF = 4
173 | MINIMUM_TARGET_CONF = 24
174 |
175 | # Fraction rewarded to user from the slashed bond of a counterpart.
176 | # It should not be close to 1, or could be exploited by an attacker trading with himself to DDOS the LN node.
177 | SLASHED_BOND_REWARD_SPLIT = 0.5
178 |
179 | # Username for HTLCs escrows
180 | ESCROW_USERNAME = 'admin'
181 |
182 | #Social
183 | NOSTR_NSEC = 'nsec1vxhs2zc4kqe0dhz4z2gfrdyjsrwf8pg3neeqx6w4nl8djfzdp0dqwd6rxh'
184 |
--------------------------------------------------------------------------------
/compose/env-sample/lndtn/strfry.conf:
--------------------------------------------------------------------------------
1 | ##
2 | ## Default strfry config
3 | ##
4 |
5 | # Directory that contains the strfry LMDB database (restart required)
6 | db = "/app/strfry-db/"
7 |
8 | dbParams {
9 | # Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)
10 | maxreaders = 256
11 |
12 | # Size of mmap() to use when loading LMDB (default is 10TB, does *not* correspond to disk-space used) (restart required)
13 | mapsize = 10995116277760
14 |
15 | # Disables read-ahead when accessing the LMDB mapping. Reduces IO activity when DB size is larger than RAM. (restart required)
16 | noReadAhead = false
17 | }
18 |
19 | events {
20 | # Maximum size of normalised JSON, in bytes
21 | maxEventSize = 65536
22 |
23 | # Events newer than this will be rejected
24 | rejectEventsNewerThanSeconds = 900
25 |
26 | # Events older than this will be rejected
27 | rejectEventsOlderThanSeconds = 94608000
28 |
29 | # Ephemeral events older than this will be rejected
30 | rejectEphemeralEventsOlderThanSeconds = 60
31 |
32 | # Ephemeral events will be deleted from the DB when older than this
33 | ephemeralEventsLifetimeSeconds = 300
34 |
35 | # Maximum number of tags allowed
36 | maxNumTags = 2000
37 |
38 | # Maximum size for tag values, in bytes
39 | maxTagValSize = 1024
40 | }
41 |
42 | relay {
43 | # Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
44 | bind = "0.0.0.0"
45 |
46 | # Port to open for the nostr websocket protocol (restart required)
47 | port = 7777
48 |
49 | # Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
50 | nofiles = 524288
51 |
52 | # HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
53 | realIpHeader = ""
54 |
55 | info {
56 | # NIP-11: Name of this server. Short/descriptive (< 30 characters)
57 | name = "Robosats"
58 |
59 | # NIP-11: Detailed information about relay, free-form
60 | description = "Federation cache system."
61 |
62 | # NIP-11: Administrative nostr pubkey, for contact purposes
63 | pubkey = ""
64 |
65 | # NIP-11: Alternative administrative contact (email, website, etc)
66 | contact = ""
67 | }
68 |
69 | # Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
70 | maxWebsocketPayloadSize = 131072
71 |
72 | # Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
73 | autoPingSeconds = 55
74 |
75 | # If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy)
76 | enableTcpKeepalive = false
77 |
78 | # How much uninterrupted CPU time a REQ query should get during its DB scan
79 | queryTimesliceBudgetMicroseconds = 10000
80 |
81 | # Maximum records that can be returned per filter
82 | maxFilterLimit = 500
83 |
84 | # Maximum number of subscriptions (concurrent REQs) a connection can have open at any time
85 | maxSubsPerConnection = 3
86 |
87 | writePolicy {
88 | # If non-empty, path to an executable script that implements the writePolicy plugin logic
89 | plugin = ""
90 | }
91 |
92 | compression {
93 | # Use permessage-deflate compression if supported by client. Reduces bandwidth, but slight increase in CPU (restart required)
94 | enabled = true
95 |
96 | # Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required)
97 | slidingWindow = false
98 | }
99 |
100 | logging {
101 | # Dump all incoming messages
102 | dumpInAll = false
103 |
104 | # Dump all incoming EVENT messages
105 | dumpInEvents = false
106 |
107 | # Dump all incoming REQ/CLOSE messages
108 | dumpInReqs = false
109 |
110 | # Log performance metrics for initial REQ database scans
111 | dbScanPerf = false
112 |
113 | # Log reason for invalid event rejection? Can be disabled to silence excessive logging
114 | invalidEvents = true
115 | }
116 |
117 | numThreads {
118 | # Ingester threads: route incoming requests, validate events/sigs (restart required)
119 | ingester = 3
120 |
121 | # reqWorker threads: Handle initial DB scan for events (restart required)
122 | reqWorker = 3
123 |
124 | # reqMonitor threads: Handle filtering of new events (restart required)
125 | reqMonitor = 3
126 |
127 | # negentropy threads: Handle negentropy protocol messages (restart required)
128 | negentropy = 2
129 | }
130 |
131 | negentropy {
132 | # Support negentropy protocol messages
133 | enabled = true
134 |
135 | # Maximum records that sync will process before returning an error
136 | maxSyncEvents = 1000000
137 | }
138 | }
139 |
--------------------------------------------------------------------------------
/compose/env-sample/lndtn/supervisord.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | user=root
3 | childlogdir = /var/log
4 | logfile = /var/log/supervisord.log
5 | logfile_maxbytes = 50MB
6 | logfile_backups = 30
7 | loglevel = info
8 | pidfile = /var/supervisord.pid
9 | umask = 022
10 | nodaemon = false
11 | nocleanup = false
12 |
13 | [inet_http_server]
14 | port = 9001
15 | username = lndg-supervisord
16 | password = 8888888888 # Change this password
17 |
18 | [supervisorctl]
19 | serverurl = http://localhost:9001
20 | username = lndg-supervisord
21 | password = 8888888888 # Change this password
22 |
23 | [rpcinterface:supervisor]
24 | supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface
25 |
26 | [program:controller]
27 | command = sh -c "python controller.py && sleep 15"
28 | process_name = lndg-controller
29 | directory = /app
30 | autorestart = true
31 | redirect_stderr = true
32 | stdout_logfile = /var/log/lndg-controller.log
33 | stdout_logfile_maxbytes = 150MB
34 | stdout_logfile_backups = 15
35 |
--------------------------------------------------------------------------------
/compose/env-sample/lndtn/thunderhub-accounts.yml:
--------------------------------------------------------------------------------
1 | defaultNetwork: "testnet"
2 | accounts:
3 | - name: "RoboSats (Testnet)"
4 | serverUrl: "127.0.0.1:10009"
5 | lndDir: "/lnd/"
6 | password: "sillywhoreadsthis"
7 |
--------------------------------------------------------------------------------
/compose/env-sample/lndtn/thunderhub.env:
--------------------------------------------------------------------------------
1 | # -----------
2 | # Server Configs
3 | # -----------
4 | LOG_LEVEL='warn'
5 | # LOG_JSON=true
6 | TOR_PROXY_SERVER=socks://127.0.0.1:9050
7 | # DISABLE_TWOFA=true
8 |
9 | # -----------
10 | # URLs
11 | # -----------
12 | # MEMPOOL_URL='https://mempool.space'
13 |
14 | # -----------
15 | # Interface Configs
16 | # -----------
17 | THEME='dark'
18 | CURRENCY='sat'
19 |
20 | # -----------
21 | # Subscription Configs
22 | # -----------
23 | # DISABLE_ALL_SUBS=true
24 | # DISABLE_INVOICE_SUB=true
25 | # DISABLE_PAYMENT_SUB=true
26 | # DISABLE_FORWARD_SUB=true
27 | # DISABLE_CHANNEL_SUB=true
28 | # DISABLE_BACKUP_SUB=true
29 |
30 | # -----------
31 | # Privacy Configs
32 | # -----------
33 | FETCH_PRICES=false
34 | FETCH_FEES=false
35 | DISABLE_LINKS=true
36 | DISABLE_LNMARKETS=true
37 | NO_VERSION_CHECK=true
38 |
39 | # -----------
40 | # Account Configs
41 | # -----------
42 | ACCOUNT_CONFIG_PATH='/config/accounts.yml'
43 | # MASTER_PASSWORD_OVERRIDE='secretPasswordForAllAccounts'
44 | # YML_ENV_1=''
45 | # YML_ENV_2=''
46 | # YML_ENV_3=''
47 | # YML_ENV_4=''
48 |
49 | # -----------
50 | # SSO Account Configs
51 | # -----------
52 | # COOKIE_PATH='/lnd/.cookie'
53 | # SSO_SERVER_URL='127.0.0.1:10009'
54 | # SSO_CERT_PATH='/lnd/tls.cert'
55 | # SSO_MACAROON_PATH='/path/to/folder/containing/macaroons'
56 | # DANGEROUS_NO_SSO_AUTH=false
57 | # LOGOUT_URL='http://thunderhub.io'
58 |
59 | # -----------
60 | # SSL Config
61 | # -----------
62 | # PUBLIC_URL='app.example.com'
63 | # SSL_PORT=8080
64 | # SSL_SAVE=true
65 |
--------------------------------------------------------------------------------
/compose/env-sample/lndtn/tor/.gikeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RoboSats/robosats-deploy/e446170082d28db68f26ffa65ea9916b19a46661/compose/env-sample/lndtn/tor/.gikeep
--------------------------------------------------------------------------------
/compose/env-sample/lndtn/torrc:
--------------------------------------------------------------------------------
1 | Log notice file /var/log/tor/notices.log
2 |
3 | ## The directory for keeping all the keys/etc. By default, we store
4 | ## things in $HOME/.tor on Unix, and in Application Data\tor on Windows.
5 | DataDirectory /var/lib/tor
6 | DataDirectoryGroupReadable 1
7 |
8 | ## Enable ControlPort
9 | ControlPort 9051
10 | CookieAuthentication 1
11 | CookieAuthFileGroupReadable 1
12 | CookieAuthFile /var/lib/tor/control_auth_cookie
13 |
14 | # Robosats LND Testnet Onion Service
15 | HiddenServiceDir /var/lib/tor/robotest/
16 | HiddenServiceVersion 3
17 | HiddenServicePort 80 127.0.0.1:80
18 |
19 | # Robosats Admin Testnet Onion Service
20 | HiddenServiceDir /var/lib/tor/robotest-admin/
21 | HiddenServiceVersion 3
22 | HiddenServicePort 80 127.0.0.1:80
23 |
24 | HiddenServiceDir /var/lib/tor/robotest-thunderhub/
25 | HiddenServiceVersion 3
26 | HiddenServicePort 80 127.0.0.1:3000
27 |
28 | HiddenServiceDir /var/lib/tor/robotest-lit/
29 | HiddenServiceVersion 3
30 | HiddenServicePort 8443 127.0.0.1:8443
31 |
32 | HiddenServiceDir /var/lib/tor/robotest-lndg/
33 | HiddenServiceVersion 3
34 | HiddenServicePort 80 127.0.0.1:8889
35 |
--------------------------------------------------------------------------------
/compose/hidden-service.md:
--------------------------------------------------------------------------------
1 | Excerpt from https://serverok.in/tor-hidden-service-in-ubuntu-debian
2 |
3 | To enable hidden service, edit /etc/tor/torrc
4 | ```
5 | nano /etc/tor/torrc
6 | ```
7 | Add lines
8 | ```
9 | HiddenServiceDir /var/lib/tor/hidden_service/
10 | HiddenServicePort 80 127.0.0.1:80
11 | ```
12 |
13 | Create folder for your hidden service
14 | ```
15 | mkdir /var/lib/tor/hidden_service/
16 | chmod 700 /var/lib/tor/hidden_service/
17 | chown -R debian-tor:debian-tor /var/lib/tor/hidden_service/
18 | ```
19 | set the permits correctly!
20 |
21 | ```
22 | systemctl start tor@default
23 | ```
24 |
--------------------------------------------------------------------------------
/compose/i2p/docker-compose.yml:
--------------------------------------------------------------------------------
1 | # Example i2p service
2 |
3 | # i2p:
4 | # image: geti2p/i2p
5 | # container_name: i2p
6 | # ports:
7 | # - "7657:7657"
8 | # - "54321:12345"
9 | # - "54321:12345/udp"
10 | # volumes:
11 | # - ./i2pconfig:/i2p/.i2p
12 |
--------------------------------------------------------------------------------
/compose/i2p/run.sh:
--------------------------------------------------------------------------------
1 | # Stand alone I2P docker service. Allows you to run a single I2P router for several coordinators (Mainnet / testnet)
2 | # Edit `/home/user/` for your correct path. It will store the services and other config under /robosats-deploy/i2p/i2pconfig
3 | docker run \
4 | -e JVM_XMX=256m \
5 | -v /home/USER/robosats-deploy/compose/i2p/i2pconfig:/i2p/.i2p \
6 | -p 7657:7657 \
7 | --name i2p \
8 | -d geti2p/i2p:latest
--------------------------------------------------------------------------------
/compose/lndg/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3-alpine
2 | ENV TZ=Europe/London
3 | ENV PYTHONUNBUFFERED 1
4 |
5 | RUN apk add git g++ linux-headers && git clone https://github.com/cryptosharks131/lndg /app
6 | WORKDIR /app
7 |
8 | RUN git checkout "v1.10.0"
9 | RUN pip install -r requirements.txt
10 | RUN pip install supervisor whitenoise
11 |
--------------------------------------------------------------------------------
/compose/nginx/mn.conf.d/local.conf:
--------------------------------------------------------------------------------
1 | limit_req_zone $binary_remote_addr zone=tenpersec:10m rate=100r/s;
2 |
3 | # first we declare our upstream server, which is our Gunicorn application
4 | upstream robosats_gunicorn_rest {
5 | # docker will automatically resolve this to the correct address
6 | # because we use the same name as the service: "robosats"
7 | server localhost:8000;
8 |
9 | }
10 |
11 | upstream robosats_daphne_websocket {
12 | # docker will automatically resolve this to the correct address
13 | # because we use the same name as the service: "robosats"
14 | server localhost:9000;
15 | }
16 |
17 | map $host $allowed_onion {
18 | default 0;
19 | "~*your-robotest-admin-onion-address\.onion" 1; # Allows access for your coordinator onion address
20 | }
21 |
22 | # now we declare our main server
23 | server {
24 |
25 | listen 80;
26 | server_name robosats.com;
27 | large_client_header_buffers 4 64k;
28 |
29 | location /static {
30 | alias /usr/src/static;
31 | }
32 |
33 | # Tor to web providers (identification files)
34 | location /.well-known {
35 | alias /usr/src/.well-known;
36 | }
37 |
38 | location / {
39 | # requests are passed to Gunicorn
40 | proxy_pass http://robosats_gunicorn_rest;
41 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
42 | proxy_set_header Host $host;
43 | proxy_redirect off;
44 | # Replace with the onion hidden service of your coordinator
45 | add_header Onion-Location http://satstraoq35jffvkgpfoqld32nzw2siuvowanruindbfojowpwsjdgad.onion$request_uri;
46 | limit_req zone=tenpersec burst=10;
47 | }
48 |
49 | location /coordinator {
50 | # Denies any access by default
51 | set $allow_access 0;
52 |
53 | if ($allowed_onion = 1) {
54 | set $allow_access 1; # Allows access for your coordinator onion address
55 | }
56 |
57 | if ($allow_access = 0){
58 | return 403; # Access is forbidden if none of the above conditions are met.
59 | }
60 |
61 | proxy_pass http://robosats_gunicorn_rest;
62 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
63 | proxy_set_header Host $host;
64 | proxy_redirect off;
65 | # Replace with the onion hidden service of your coordinator
66 | add_header Onion-Location https://satstraoq35jffvkgpfoqld32nzw2siuvowanruindbfojowpwsjdgad.onion$request_uri;
67 | }
68 |
69 | location /ws/ {
70 | # websockets are passed to Daphne
71 | proxy_pass http://robosats_daphne_websocket;
72 | proxy_http_version 1.1;
73 | proxy_set_header Upgrade $http_upgrade;
74 | proxy_set_header Connection "Upgrade";
75 | proxy_set_header Host $host;
76 | limit_req zone=tenpersec burst=10;
77 | }
78 |
79 | location /nostr {
80 | proxy_pass http://127.0.0.1:7777;
81 | proxy_http_version 1.1;
82 | proxy_set_header Upgrade $http_upgrade;
83 | proxy_set_header Connection "Upgrade";
84 | proxy_set_header Host $host;
85 | }
86 |
87 | location /relay {
88 | proxy_pass http://127.0.0.1:7778;
89 | proxy_http_version 1.1;
90 | proxy_set_header Upgrade $http_upgrade;
91 | proxy_set_header Connection "Upgrade";
92 | proxy_set_header Host $host;
93 | }
94 |
95 | location = /favicon.ico { access_log off; log_not_found off; }
96 |
97 | }
98 |
--------------------------------------------------------------------------------
/compose/nginx/mn.well-known/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RoboSats/robosats-deploy/e446170082d28db68f26ffa65ea9916b19a46661/compose/nginx/mn.well-known/.gitkeep
--------------------------------------------------------------------------------
/compose/nginx/tn.conf.d/local.conf:
--------------------------------------------------------------------------------
1 | limit_req_zone $binary_remote_addr zone=fivepersec:10m rate=5r/s;
2 |
3 | # first we declare our upstream server, which is our Gunicorn application
4 | upstream robosats_gunicorn_rest {
5 | # docker will automatically resolve this to the correct address
6 | # because we use the same name as the service: "robosats"
7 | server localhost:8000;
8 |
9 | }
10 |
11 | upstream robosats_daphne_websocket {
12 | # docker will automatically resolve this to the correct address
13 | # because we use the same name as the service: "robosats"
14 | server localhost:9000;
15 | }
16 |
17 | map $host $allowed_onion {
18 | default 0;
19 | "~*testraliar7xkhos2gipv2k65obykofb4jqzl5l4danfryacifi4t7qd\.onion" 1; # Allows access for your coordinator onion address
20 | }
21 |
22 | # now we declare our main server
23 | server {
24 |
25 | listen 80;
26 | server_name satstralia.com;
27 | large_client_header_buffers 4 64k;
28 |
29 | location /static {
30 | alias /usr/src/static;
31 | }
32 |
33 | location /.well-known {
34 | alias /usr/src/.well-known;
35 | }
36 |
37 | location / {
38 | # requests are passed to Gunicorn
39 | proxy_pass http://robosats_gunicorn_rest;
40 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
41 | proxy_set_header Host $host;
42 | proxy_redirect off;
43 | # Replace with the onion hidden service of your coordinator
44 | add_header Onion-Location https://testraliar7xkhos2gipv2k65obykofb4jqzl5l4danfryacifi4t7qd.onion$request_uri;
45 | limit_req zone=fivepersec burst=10;
46 | }
47 |
48 | location /coordinator {
49 | # Denies any access by default
50 | set $allow_access 0;
51 |
52 | if ($allowed_onion = 1) {
53 | set $allow_access 1; # Allows access for your coordinator onion address
54 | }
55 |
56 | if ($allow_access = 0){
57 | return 403; # Access is forbidden if none of the above conditions are met.
58 | }
59 |
60 | proxy_pass http://robosats_gunicorn_rest;
61 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
62 | proxy_set_header Host $host;
63 | proxy_redirect off;
64 | # Replace with the onion hidden service of your coordinator
65 | add_header Onion-Location https://testraliar7xkhos2gipv2k65obykofb4jqzl5l4danfryacifi4t7qd.onion$request_uri;
66 |
67 | }
68 |
69 | location /ws/ {
70 | # websockets are passed to Daphne
71 | proxy_pass http://robosats_daphne_websocket;
72 | proxy_http_version 1.1;
73 | proxy_set_header Upgrade $http_upgrade;
74 | proxy_set_header Connection "Upgrade";
75 | proxy_set_header Host $host;
76 | }
77 |
78 | location /nostr {
79 | proxy_pass http://127.0.0.1:7777;
80 | proxy_http_version 1.1;
81 | proxy_set_header Upgrade $http_upgrade;
82 | proxy_set_header Connection "Upgrade";
83 | proxy_set_header Host $host;
84 | }
85 |
86 | location /relay {
87 | proxy_pass http://127.0.0.1:7778;
88 | proxy_http_version 1.1;
89 | proxy_set_header Upgrade $http_upgrade;
90 | proxy_set_header Connection "Upgrade";
91 | proxy_set_header Host $host;
92 | }
93 |
94 | location = /favicon.ico { access_log off; log_not_found off; }
95 |
96 | }
97 |
--------------------------------------------------------------------------------
/compose/nginx/tn.well-known/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RoboSats/robosats-deploy/e446170082d28db68f26ffa65ea9916b19a46661/compose/nginx/tn.well-known/.gitkeep
--------------------------------------------------------------------------------
/compose/relay/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:jammy
2 | ENV TZ=Europe/London
3 |
4 | RUN apt update && apt install -y --no-install-recommends \
5 | git g++ make pkg-config libtool ca-certificates \
6 | libssl-dev zlib1g-dev liblmdb-dev libflatbuffers-dev \
7 | libsecp256k1-dev libzstd-dev
8 |
9 | # setup app
10 | RUN git clone --branch 1.0.4 --single-branch https://github.com/hoytech/strfry /app
11 |
12 | WORKDIR /app
13 |
14 | RUN git submodule update --init
15 | RUN make setup-golpe
16 | RUN make clean
17 | RUN make -j4
18 |
19 | RUN apt update && apt install -y --no-install-recommends \
20 | liblmdb0 libflatbuffers1 libsecp256k1-0 libb2-1 libzstd1 torsocks cron\
21 | && rm -rf /var/lib/apt/lists/*
22 |
23 | RUN echo "TorAddress 127.0.0.1" >> /etc/tor/torsocks.conf
24 | RUN echo "TorPort 9050" >> /etc/tor/torsocks.conf
25 |
26 | # Setting up crontab
27 | COPY crontab /etc/cron.d/crontab
28 | RUN chmod 0644 /etc/cron.d/crontab
29 | RUN crontab /etc/cron.d/crontab
30 |
31 | # Setting up entrypoints
32 | COPY sync_external.sh /etc/strfry/sync_external.sh
33 | COPY sync_federation.sh /etc/strfry/sync_federation.sh
34 | COPY entrypoint.sh /etc/strfry/entrypoint.sh
35 |
36 | RUN chmod +x /etc/strfry/entrypoint.sh
37 | RUN chmod +x /etc/strfry/sync_external.sh
38 | RUN chmod +x /etc/strfry/sync_federation.sh
39 |
40 | #Setting up logs
41 | RUN touch /var/log/cron.log && chmod 0644 /var/log/cron.log
42 |
43 | ENTRYPOINT ["/etc/strfry/entrypoint.sh"]
44 |
--------------------------------------------------------------------------------
/compose/relay/README.md:
--------------------------------------------------------------------------------
1 | ## Synchronization
2 |
3 | To keep a healthy network, make sure your relay sinchronizes with at least 2 clearnet relays (implementing strfry).
4 |
5 | If they are different from other coordinators, better.
6 |
--------------------------------------------------------------------------------
/compose/relay/crontab:
--------------------------------------------------------------------------------
1 | # Edit this file to introduce tasks to be run by cron.
2 | #
3 | # Each task to run has to be defined through a single line
4 | # indicating with different fields when the task will be run
5 | # and what command to run for the task
6 | #
7 | # To define the time you can provide concrete values for
8 | # minute (m), hour (h), day of month (dom), month (mon),
9 | # and day of week (dow) or use '*' in these fields (for 'any').
10 | #
11 | # Notice that tasks will be started based on the cron's system
12 | # daemon's notion of time and timezones.
13 | #
14 | # Output of the crontab jobs (including errors) is sent through
15 | # email to the user the crontab file belongs to (unless redirected).
16 | #
17 | # For example, you can run a backup of all your user accounts
18 | # at 5 a.m every week with:
19 | # 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/
20 | #
21 | # For more information see the manual pages of crontab(5) and cron(8)
22 | #
23 | # m h dom mon dow command
24 | */2 * * * * torsocks /etc/strfry/sync_federation.sh >> /var/log/cron.log 2>&1
25 | */5 * * * * torsocks /etc/strfry/sync_external.sh >> /var/log/cron.log 2>&1
26 |
--------------------------------------------------------------------------------
/compose/relay/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | cron -f -l 8 & tail -f /var/log/cron.log & /app/strfry relay
4 |
--------------------------------------------------------------------------------
/compose/relay/mn.external_urls.txt:
--------------------------------------------------------------------------------
1 | wss://nostr.satstralia.com
2 | wss://relay.damus.io
3 | wss://freelay.sovbit.host
4 |
--------------------------------------------------------------------------------
/compose/relay/mn.federation_urls.txt:
--------------------------------------------------------------------------------
1 | ws://ngdk7ocdzmz5kzsysa3om6du7ycj2evxp2f2olfkyq37htx3gllwp2yd.onion/relay
2 | ws://4t4jxmivv6uqej6xzx2jx3fxh75gtt65v3szjoqmc4ugdlhipzdat6yd.onion/relay
3 | ws://mmhaqzuirth5rx7gl24d4773lknltjhik57k7ahec5iefktezv4b3uid.onion/relay
4 | ws://otmoonrndnrddqdlhu6b36heunmbyw3cgvadqo2oqeau3656wfv7fwad.onion/relay
5 |
--------------------------------------------------------------------------------
/compose/relay/sync_external.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | filters_external='{"kinds":[38383]}'
4 | timeout_duration="15s"
5 |
6 | while IFS= read -r line; do
7 | timeout "$timeout_duration" /app/strfry --config /etc/strfry.conf sync ${line} --filter "$filters_external" --dir both
8 | done < /app/external_urls.txt
9 |
--------------------------------------------------------------------------------
/compose/relay/sync_federation.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | filters_federation='{"kinds":[38383, 31986, 1059]}'
4 | timeout_duration="15s"
5 |
6 | while IFS= read -r line; do
7 | timeout "$timeout_duration" /app/strfry --config /etc/strfry.conf sync ${line} --filter "$filters_federation" --dir both
8 | done < /app/federation_urls.txt
9 |
--------------------------------------------------------------------------------
/compose/relay/tn.external_urls.txt:
--------------------------------------------------------------------------------
1 | wss://nostr.satstralia.com
2 | wss://relay.damus.io
3 | wss://freelay.sovbit.host
4 |
--------------------------------------------------------------------------------
/compose/relay/tn.federation_urls.txt:
--------------------------------------------------------------------------------
1 | ws://jpp3w5tpxtyg6lifonisdszpriiapszzem4wod2zsdweyfenlsxeoxid.onion/relay
2 | ws://ghbtv7lhoyhomyir4xvxaeyqgx4ylxksia343jaat3njqqlkqpdjqcyd.onion/relay
3 | ws://wsjyhbashc4zrrex6vijpryujggbka5plry2o62dxqoz3pxinblnj4ad.onion/relay
4 | ws://otmtestgbj3kqo3nre6oksusuqfb4ids5zg2y5z2qza2jogeu67stwid.onion/relay
5 |
--------------------------------------------------------------------------------
/compose/strfry/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:jammy
2 | ENV TZ=Europe/London
3 |
4 | RUN apt update && apt install -y --no-install-recommends \
5 | git g++ make pkg-config libtool ca-certificates \
6 | libssl-dev zlib1g-dev liblmdb-dev libflatbuffers-dev \
7 | libsecp256k1-dev libzstd-dev
8 |
9 | # setup app
10 | RUN git clone https://github.com/KoalaSat/strfry /app
11 |
12 | WORKDIR /app
13 |
14 | RUN git submodule update --init
15 | RUN make setup-golpe
16 | RUN make clean
17 | RUN make -j4
18 |
19 | RUN apt update && apt install -y --no-install-recommends \
20 | liblmdb0 libflatbuffers1 libsecp256k1-0 libb2-1 libzstd1 torsocks cron\
21 | && rm -rf /var/lib/apt/lists/*
22 |
23 | RUN echo "TorAddress 127.0.0.1" >> /etc/tor/torsocks.conf
24 | RUN echo "TorPort 9050" >> /etc/tor/torsocks.conf
25 |
26 | # Setting up crontab
27 | COPY crontab /etc/cron.d/crontab
28 | RUN chmod 0644 /etc/cron.d/crontab
29 | RUN crontab /etc/cron.d/crontab
30 |
31 | # Setting up entrypoints
32 | COPY sync_external.sh /etc/strfry/sync_external.sh
33 | COPY sync_federation.sh /etc/strfry/sync_federation.sh
34 | COPY entrypoint.sh /etc/strfry/entrypoint.sh
35 |
36 | RUN chmod +x /etc/strfry/entrypoint.sh
37 | RUN chmod +x /etc/strfry/sync_external.sh
38 | RUN chmod +x /etc/strfry/sync_federation.sh
39 |
40 | #Setting up logs
41 | RUN touch /var/log/cron.log && chmod 0644 /var/log/cron.log
42 |
43 | ENTRYPOINT ["/etc/strfry/entrypoint.sh"]
44 |
--------------------------------------------------------------------------------
/compose/strfry/README.md:
--------------------------------------------------------------------------------
1 | ## Synchronization
2 |
3 | To keep a healthy network, make sure your relay sinchronizes with at least 2 clearnet relays (implementing strfry).
4 |
5 | If they are different from other coordinators, better.
6 |
--------------------------------------------------------------------------------
/compose/strfry/crontab:
--------------------------------------------------------------------------------
1 | # Edit this file to introduce tasks to be run by cron.
2 | #
3 | # Each task to run has to be defined through a single line
4 | # indicating with different fields when the task will be run
5 | # and what command to run for the task
6 | #
7 | # To define the time you can provide concrete values for
8 | # minute (m), hour (h), day of month (dom), month (mon),
9 | # and day of week (dow) or use '*' in these fields (for 'any').
10 | #
11 | # Notice that tasks will be started based on the cron's system
12 | # daemon's notion of time and timezones.
13 | #
14 | # Output of the crontab jobs (including errors) is sent through
15 | # email to the user the crontab file belongs to (unless redirected).
16 | #
17 | # For example, you can run a backup of all your user accounts
18 | # at 5 a.m every week with:
19 | # 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/
20 | #
21 | # For more information see the manual pages of crontab(5) and cron(8)
22 | #
23 | # m h dom mon dow command
24 | */2 * * * * torsocks /etc/strfry/sync_federation.sh >> /var/log/cron.log 2>&1
25 | */5 * * * * torsocks /etc/strfry/sync_external.sh >> /var/log/cron.log 2>&1
26 |
--------------------------------------------------------------------------------
/compose/strfry/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | cron -f -l 8 & tail -f /var/log/cron.log & /app/strfry relay
4 |
--------------------------------------------------------------------------------
/compose/strfry/mn.external_urls.txt:
--------------------------------------------------------------------------------
1 | wss://nostr.satstralia.com
2 | wss://relay.damus.io
3 | wss://freelay.sovbit.host
--------------------------------------------------------------------------------
/compose/strfry/mn.federation_urls.txt:
--------------------------------------------------------------------------------
1 | ws://ngdk7ocdzmz5kzsysa3om6du7ycj2evxp2f2olfkyq37htx3gllwp2yd.onion/nostr
2 | ws://4t4jxmivv6uqej6xzx2jx3fxh75gtt65v3szjoqmc4ugdlhipzdat6yd.onion/nostr
3 | ws://mmhaqzuirth5rx7gl24d4773lknltjhik57k7ahec5iefktezv4b3uid.onion/nostr
4 | ws://otmoonrndnrddqdlhu6b36heunmbyw3cgvadqo2oqeau3656wfv7fwad.onion/nostr
5 |
--------------------------------------------------------------------------------
/compose/strfry/sync_external.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | filters_external='{"kinds":[38383]}'
4 | timeout_duration="15s"
5 |
6 | while IFS= read -r line; do
7 | timeout "$timeout_duration" /app/strfry --config /etc/strfry.conf sync ${line} --filter "$filters_external" --dir both
8 | done < /app/external_urls.txt
9 |
10 |
--------------------------------------------------------------------------------
/compose/strfry/sync_federation.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | filters_federation='{"kinds":[38383, 31986, 1059]}'
4 | timeout_duration="15s"
5 |
6 | while IFS= read -r line; do
7 | timeout "$timeout_duration" /app/strfry --config /etc/strfry.conf sync ${line} --filter "$filters_federation" --dir both
8 | done < /app/federation_urls.txt
9 |
--------------------------------------------------------------------------------
/compose/strfry/tn.external_urls.txt:
--------------------------------------------------------------------------------
1 | wss://nostr.satstralia.com
2 | wss://relay.damus.io
3 | wss://freelay.sovbit.host
--------------------------------------------------------------------------------
/compose/strfry/tn.federation_urls.txt:
--------------------------------------------------------------------------------
1 | ws://jpp3w5tpxtyg6lifonisdszpriiapszzem4wod2zsdweyfenlsxeoxid.onion/nostr
2 | ws://ghbtv7lhoyhomyir4xvxaeyqgx4ylxksia343jaat3njqqlkqpdjqcyd.onion/nostr
3 | ws://wsjyhbashc4zrrex6vijpryujggbka5plry2o62dxqoz3pxinblnj4ad.onion/nostr
4 | ws://otmtestgbj3kqo3nre6oksusuqfb4ids5zg2y5z2qza2jogeu67stwid.onion/nostr
5 |
--------------------------------------------------------------------------------
/compose/systemd.md:
--------------------------------------------------------------------------------
1 | Docker compose as a systemd unit
2 | ================================
3 |
4 | Initially designed for `docker-compose` binary. Not tested for built in `docker-compose-plugin` (i.e., `docker compose` commands).
5 | Create file `/etc/systemd/system/docker-compose@.service`. SystemD calling binaries using an absolute path. In my case is prefixed by `/usr/local/bin`, you should use paths specific for your environment.
6 |
7 | ```ini
8 | [Unit]
9 | Description=RoboSats Mainnet Full Stack
10 | Requires=docker.service
11 | After=docker.service
12 |
13 | [Service]
14 | Type=oneshot
15 | Restart=always
16 | RestartSec=5
17 | StartLimitBurst=1000
18 | RemainAfterExit=true
19 | WorkingDirectory=/home/USER/robosats-deploy/compose
20 | ExecStart=/usr/local/bin/docker-compose up -d --remove-orphans
21 | ExecStop=/usr/local/bin/docker-compose down
22 |
23 | [Install]
24 | WantedBy=multi-user.target
25 | ```
26 |
27 | Place your `docker-compose.yml` into `/etc/docker/compose/myservice` and call
28 |
29 | ```
30 | systemctl start docker-compose@myservice
31 | ```
32 |
33 |
34 | Docker cleanup timer with system
35 | ================================
36 |
37 | Create `/etc/systemd/system/docker-cleanup.timer` with this content:
38 |
39 | ```ini
40 | [Unit]
41 | Description=Docker cleanup timer
42 |
43 | [Timer]
44 | OnUnitInactiveSec=12h
45 |
46 | [Install]
47 | WantedBy=timers.target
48 | ```
49 |
50 | And service file `/etc/systemd/system/docker-cleanup.service`:
51 |
52 | ```ini
53 | [Unit]
54 | Description=Docker cleanup
55 | Requires=docker.service
56 | After=docker.service
57 |
58 | [Service]
59 | Type=oneshot
60 | WorkingDirectory=/tmp
61 | User=root
62 | Group=root
63 | ExecStart=/usr/bin/docker system prune -af
64 |
65 | [Install]
66 | WantedBy=multi-user.target
67 | ```
68 |
69 | run `systemctl enable docker-cleanup.timer` for enabling the timer
70 |
71 | JournalD support
72 | ================
73 |
74 | Just add the following line to the `/etc/docker/daemon.json`:
75 |
76 | ```json
77 | {
78 | ...
79 | "log-driver": "journald",
80 | ...
81 | }
82 | ```
83 |
84 | And restart your docker service.
--------------------------------------------------------------------------------
/compose/tor/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM alpine:3
2 |
3 | RUN apk --no-cache --no-progress add tor=~0.4
4 |
5 | EXPOSE 9001 9050
6 |
7 | # hadolint ignore=DL3002
8 | USER root
9 | ARG LOCAL_USER_ID=9999
10 | ENV TOR_DATA=/var/lib/tor
11 |
12 | # Add useradd and usermod
13 | # Create user account (UID will be changed in entrypoint script)
14 | RUN apk --no-cache --no-progress add shadow=~4 sudo=~1 && \
15 | useradd -u $LOCAL_USER_ID --shell /bin/sh -m alice && \
16 | usermod -g alice tor
17 |
18 | COPY entrypoint.sh /root/entrypoint.sh
19 |
20 | ENTRYPOINT [ "/root/entrypoint.sh" ]
--------------------------------------------------------------------------------
/compose/tor/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -e
3 |
4 | # Change local user id and group
5 | usermod -u 1000 alice
6 | groupmod -g 1000 alice
7 |
8 | # Set correct owners on volumes
9 | chown -R tor:alice /var/lib/tor
10 | chown -R :alice /etc/tor
11 | chown -R alice:alice /home/alice
12 |
13 | exec sudo -u tor /usr/bin/tor
--------------------------------------------------------------------------------
/k8s/README.md:
--------------------------------------------------------------------------------
1 | # Kubernetes deployment
2 | This orchestration is Work in Progress. Help from expert in K8S is very appreciated. Rewarded (Sats) tasks can be created to finalize this work.
3 |
4 | # dev environment
5 | Needs microk8s / minikube, kubectl and helm.
6 |
7 | Add helm bitnami repo
8 |
9 | ```
10 | helm repo add bitnami https://charts.bitnami.com/bitnami
11 | ```
12 |
13 | # microk8s
14 |
15 | Install
16 | ```
17 | snap install microk8s --classic
18 | ```
19 |
20 | Add rights to your user
21 | ```
22 | sudo usermod -a -G microk8s
23 | sudo chown -f -R ~/.kube
24 | newgrp microk8s
25 | ```
26 |
27 | Shortcut for kubectl as mkctl (feel free to add bashrc `nano ~/.bashrc`)
28 | ```
29 | alias mkctl="microk8s kubectl"
30 | ```
31 |
32 | Install ISCi for Ubuntu (prerequisit of OpenEBS)
33 | ```
34 | sudo apt-get update
35 | sudo apt-get install open-iscsi -y
36 | sudo systemctl enable --now iscsid
37 | ```
38 |
39 | Start microk8s
40 | ```
41 | micrk8s start
42 | ```
43 |
44 | Enable dns, community and openebs storage
45 | ```
46 | microk8s enable dns
47 | microk8s enable community
48 | microk8s enable openebs
49 | ```
50 |
51 | Enable Dashboard
52 | ```
53 | microk8s enable dashboard
54 | microk8s dashboard-proxy
55 | ```
56 |
57 | Delete default coredns configmap (we override it to add hosts)
58 | ```
59 | mkctl delete configmap coredns -n kube-system
60 | ```
61 |
62 | Apply all cluster configuration for a variant, e.g. testnet
63 | ```
64 | cd robosats-deploy/k8s
65 | mkctl apply -k base
66 | ```
67 |
68 | More info on openebs-hostpath volumes in https://openebs.io/docs/user-guides/localpv-hostpath (also guides to backup).
69 | Local data within the PVCs will be stored persistently in the pvc directories under
70 | ```
71 | /var/snap/microk8s/common/var/openebs/local/
72 | ```
73 |
74 | Set default namespace for mkctl commands
75 | ```
76 | mkctl config set-context --current --namespace=testnet
77 | ```
78 |
79 | Create onion-service secret with privkey from existing Onion V3 files
80 | ```
81 | mkctl create secret generic my-full-onion-secret \
82 | --from-file=privateKeyFile=~/path/to/hs_ed25519_secret_key \
83 | --from-file=publicKeyFile=~/path/to/hs_ed25519_public_key \
84 | --from-file=onionAddress=~/path/to/hostname
85 | ```
86 | Print onion hostname
87 | ```
88 | mkctl exec -- cat /var/lib/tor/robosite/hostname
89 | ```
90 |
91 | Export .yml of a resource
92 | ```
93 | mkctl get -o yaml > .yml
94 | ```
95 |
96 |
97 | First time start up of LND. Create wallet. First comment out the auto-unlock-file line. Then apply the statefulset lnd
98 | ```
99 | # create wallet
100 | mkctl exec -it lnd-0 -- lncli create
101 | ```
102 |
103 | ## TODO
104 |
105 |
106 | - [ ] Implement CLN service for coordinators that prefer core-lightning
107 | - [ ] Bitcoind use onlynets Tor / I2P
108 | - [ ] Open I2P to other hosts
109 | - [ ] Run LND
110 | - [ ] Mount LND dir to gunicorn, celery-worker and follow invoices
111 |
112 | - [ ] Learn configmaps (put variables into deployment for example: gunicorn number of workers... now hardcoded as 2)
113 | - [ ] Also study this: Kubernetes namespace kustomizations
114 | https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization/
115 |
116 | - [ ] Research whitenoise to improve static serving directly with gunicorn: http://whitenoise.evans.io/en/stable/django.html
117 | - [ ] Implement torrc cookie authentication method
118 |
119 | - [ ] Network File Storage so multiple nodes of MicroK8s can access data https://microk8s.io/docs/nfs
120 | - [ ]Research OpenEBS storage solution
121 |
122 | ```
123 | mkctl apply -f https://openebs.github.io/charts/openebs-operator.yaml
124 | ```
125 |
126 | ## Locally using robosats
127 |
128 | ```
129 | minikube service gunicorn -n testnet --url
130 | > http://192.168.49.2:30677
131 | ```
132 | Use in browser
133 |
134 | ## First start up
135 |
136 | run for all .yml on k8s folder
137 | ```
138 | kubectl apply -f .
139 | ```
140 |
141 | Create database and admin
142 | ```
143 | kubectl exec -it -n testnet -- bash
144 | python3 manage.py makemigrations control api chat
145 | python3 manage.py migrate
146 | python3 manage.py createsuperuser
147 | python3 manage.py collectstatic
148 | ```
149 |
150 | Warning django webserver will start up faster than postgres. Needs to be staged.
151 |
152 |
153 | ## For convenience
154 |
155 | change kubectl default namespace to testnet or mainnet
156 | ```
157 | kubectl config set-context --current --namespace=testnet
158 | ```
159 |
160 | ## k8s dev tricks used
161 |
162 | Create a configmap.yml or secret.yml from any file. Then mount the configmap as a file.
163 | https://stackoverflow.com/questions/58407501/how-to-deploy-nginx-config-file-in-kubernetes
164 |
165 | ```
166 | kubectl create configmap nginx-configmap --from-file=./nginx.conf
167 | kubectl get configmap nginx-configmap -n testnet -o yaml > nginx-configmap.yml
168 | ```
--------------------------------------------------------------------------------
/k8s/base/bitcoind/configmap.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: bitcoind-configmap
5 | data:
6 | server: '1'
7 | txindex: '1'
8 | onion: 'tor:9050'
9 | proxy: 'tor:9050' # connects to IP nodes through TOR proxy
10 | torcontrol: 'tor:9051'
11 | i2psam: 'i2p-cluster:7656'
12 | i2pacceptincoming: "1"
13 | zmqpubrawblock: 'tcp://0.0.0.0:18501'
14 | zmqpubrawtx: 'tcp://0.0.0.0:18502'
15 | maxuploadtarget: '20'
16 | rpcbind: '0.0.0.0'
17 | rpcallowip: 0.0.0.0/0
18 | testnet: '1'
19 | onlynet1: 'onion'
20 | onlynet2: 'i2p'
21 |
22 | # data:
23 | # bitcoin.conf: |
24 | # # Reference: https://en.bitcoin.it/wiki/Running_Bitcoin
25 | # # https://github.com/bitcoin/bitcoin/blob/master/share/examples/bitcoin.conf
26 |
27 | # server=1
28 | # txindex=1
29 | # onion=tor:9050
30 | # proxy=tor:9050
31 | # torcontrol=tor:9051
32 | # rpcuser=bitcoindbase
33 | # rpcpassword=bitcoindbase
34 | # zmqpubrawblock=tcp://0.0.0.0:18501
35 | # zmqpubrawtx=tcp://0.0.0.0:18502
36 |
37 | # # limit upload bandwith (in MB per day)
38 | # maxuploadtarget=20
39 | # # Allow RPC connections from outside of container localhost
40 | # rpcbind=0.0.0.0
41 | # # Only connect to typical docker IP addresses (Usually from docker host computer)
42 | # #rpcallowip=172.0.0.0/255.0.0.0
43 | # # Allow access from any IP address (Usually from another computer on LAN)
44 | # rpcallowip=0.0.0.0/0
45 |
46 | # # Run on the test network instead of the real bitcoin network.
47 | # testnet=1
48 |
49 | # [main]
50 | # # Only run on Tor
51 | # onlynet=onion
52 |
53 | # # Add Tor seed nodes
54 | # addnode=i4x66albngo3sg3w.onion:8333
55 |
56 | # # Some testnet settings needed for 0.19, if using testnet
57 | # [test]
58 | # # Allow RPC connections from outside of container localhost
59 | # rpcbind=0.0.0.0
--------------------------------------------------------------------------------
/k8s/base/bitcoind/pvc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: bitcoind-pvc
5 | labels:
6 | app: bitcoind
7 | spec:
8 | storageClassName: openebs-host
9 | accessModes:
10 | - ReadWriteOnce
11 | resources:
12 | requests:
13 | storage: 50Gi
--------------------------------------------------------------------------------
/k8s/base/bitcoind/secret.yml:
--------------------------------------------------------------------------------
1 | # Use: echo -n 'super-secret-password' | base64
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | name: bitcoind-secret
6 | labels:
7 | app: bitcoind
8 | type: Opaque
9 | data:
10 | rpcuser: Yml0Y29pbmRiYXNlCg== # bitcoindbase
11 | rpcpassword: Yml0Y29pbmRiYXNlCg== # bitcoindbase
12 | torpassword: MTY6ODcyODYwQjc2NDUzQTc3RDYwQ0EyQkI4QzFBNzA0MjA3MjA5MzI3NkEzRDcwMUFENjg0MDUzRUM0QAA= # 16:872860B76453A77D60CA2BB8C1A7042072093276A3D701AD684053EC1A
--------------------------------------------------------------------------------
/k8s/base/bitcoind/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: bitcoind
5 | labels:
6 | app: bitcoind
7 | spec:
8 | type: ClusterIP
9 | ports:
10 | - name: rpc
11 | port: 18332 # mainnet 8332
12 | - name: peers
13 | port: 18333 # mainnet 8333
14 | - name: block-events
15 | port: 18501
16 | - name: tx-events
17 | port: 18502
18 | selector:
19 | app: bitcoind
--------------------------------------------------------------------------------
/k8s/base/bitcoind/statefulset.yml:
--------------------------------------------------------------------------------
1 | # postgres StatefulSet
2 | apiVersion: apps/v1
3 | kind: StatefulSet
4 | metadata:
5 | name: bitcoind
6 | labels:
7 | app: bitcoind
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: bitcoind
13 | serviceName: bitcoind
14 | template:
15 | metadata:
16 | labels:
17 | app: bitcoind
18 | spec:
19 | containers:
20 | - name: bitcoind
21 | image: ruimarinho/bitcoin-core:24.0.1-alpine # Note that alpine images are not supported by Core.
22 | imagePullPolicy: IfNotPresent
23 | resources:
24 | limits:
25 | cpu: "1000m"
26 | memory: "1500Mi"
27 | requests:
28 | cpu: "10m"
29 | memory: "50Mi"
30 | ports:
31 | - containerPort: 18332 # mainnet 8332
32 | - containerPort: 18333 # mainnet 8333
33 | args: [ # bitcoin.conf arguments from bitcoind-configmap
34 | # -reindex, # Enable in case re-indexing is needed
35 | --testnet=$(testnet),
36 | --server=$(server),
37 | --txindex=$(txindex),
38 | --rpcuser=$(rpcuser),
39 | --rpcpassword=$(rpcpassword),
40 | --rpcbind=$(rpcbind),
41 | --rpcallowip=$(rpcallowip),
42 | --onion=$(onion),
43 | --proxy=$(proxy),
44 | --i2psam=$(i2psam),
45 | --i2pacceptincoming=$(i2pacceptincoming),
46 | --onlynet=$(onlynet1),
47 | --onlynet=$(onlynet2),
48 | --torpassword=$(torcontrolpass),
49 | --torcontrol=$(torcontrol),
50 | --zmqpubrawblock=$(zmqpubrawblock),
51 | --zmqpubrawtx=$(zmqpubrawtx),
52 | --maxuploadtarget=$(maxuploadtarget),
53 | ]
54 | envFrom:
55 | - configMapRef:
56 | name: bitcoind-configmap
57 | - secretRef:
58 | name: bitcoind-secret
59 | - secretRef:
60 | name: tor-secret
61 | volumeMounts:
62 | - name: bitcoind-data
63 | mountPath: /home/bitcoin/.bitcoin
64 | volumes:
65 | - name: bitcoind-data
66 | persistentVolumeClaim:
67 | claimName: bitcoind-pvc
--------------------------------------------------------------------------------
/k8s/base/celery/beat-deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: celery-beat
5 | labels:
6 | app: celery-beat
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: celery-beat
11 | replicas: 1
12 | strategy:
13 | type: RollingUpdate
14 | template:
15 | metadata:
16 | labels:
17 | app: celery-beat
18 | spec:
19 | containers:
20 | - name: celery-beat
21 | image: recksato/robosats:latest
22 | # imagePullPolicy: IfNotPresent
23 | resources:
24 | limits:
25 | cpu: "1000m"
26 | memory: "500Mi"
27 | requests:
28 | cpu: "10m"
29 | memory: "50Mi"
30 | envFrom:
31 | - configMapRef:
32 | name: robosats-configmap
33 | - secretRef:
34 | name: robosats-secret
35 | - secretRef:
36 | name: postgres-secret # Using the Secret postgres-secret
37 | command: ["celery", "-A", "robosats", "beat", "--loglevel=INFO", "--scheduler","django_celery_beat.schedulers:DatabaseScheduler"]
--------------------------------------------------------------------------------
/k8s/base/celery/worker-deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: celery-worker
5 | labels:
6 | app: celery-worker
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: celery-worker
11 | replicas: 1
12 | strategy:
13 | type: RollingUpdate
14 | template:
15 | metadata:
16 | labels:
17 | app: celery-worker
18 | spec:
19 | containers:
20 | - name: celery-worker
21 | image: recksato/robosats:534e4c0
22 | # imagePullPolicy: IfNotPresent
23 | resources:
24 | limits:
25 | cpu: "1000m"
26 | memory: "500Mi"
27 | requests:
28 | cpu: "10m"
29 | memory: "50Mi"
30 | envFrom:
31 | - configMapRef:
32 | name: robosats-configmap
33 | - secretRef:
34 | name: robosats-secret
35 | - secretRef:
36 | name: postgres-secret # Using the Secret postgres-secret
37 | command: ["celery", "-A", "robosats", "worker", "--loglevel=INFO"]
38 | # ports:
39 | # - containerPort: 8000
40 | # volumeMounts:
41 | # - name: lnd-data
42 | # mountPath: /lnd
43 | # volumes:
44 | # - name: lnd-data
45 | # persistentVolumeClaim:
46 | # claimName: lnd-pvc
--------------------------------------------------------------------------------
/k8s/base/command/clean-orders-deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: clean-orders
5 | labels:
6 | app: clean-orders
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: clean-orders
11 | replicas: 1
12 | strategy:
13 | type: RollingUpdate
14 | template:
15 | metadata:
16 | labels:
17 | app: clean-orders
18 | spec:
19 | containers:
20 | - name: clean-orders
21 | image: recksato/robosats:534e4c0
22 | imagePullPolicy: IfNotPresent
23 | resources:
24 | limits:
25 | cpu: "1000m"
26 | memory: "500Mi"
27 | requests:
28 | cpu: "10m"
29 | memory: "50Mi"
30 | envFrom:
31 | - configMapRef:
32 | name: robosats-configmap
33 | - secretRef:
34 | name: robosats-secret
35 | - secretRef:
36 | name: postgres-secret # Using the Secret postgres-secret
37 | command: ["python3", "manage.py", "clean_orders"]
38 | volumeMounts:
39 | - name: lnd-data
40 | mountPath: /lnd
41 | volumes:
42 | - name: lnd-data
43 | persistentVolumeClaim:
44 | claimName: lnd-pvc
--------------------------------------------------------------------------------
/k8s/base/command/follow-invoices-deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: follow-invoices
5 | labels:
6 | app: follow-invoices
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: follow-invoices
11 | replicas: 1
12 | strategy:
13 | type: RollingUpdate
14 | template:
15 | metadata:
16 | labels:
17 | app: follow-invoices
18 | spec:
19 | containers:
20 | - name: follow-invoices
21 | image: recksato/robosats:534e4c0
22 | imagePullPolicy: IfNotPresent
23 | resources:
24 | limits:
25 | cpu: "1000m"
26 | memory: "500Mi"
27 | requests:
28 | cpu: "10m"
29 | memory: "50Mi"
30 | envFrom:
31 | - configMapRef:
32 | name: robosats-configmap
33 | - secretRef:
34 | name: robosats-secret
35 | - secretRef:
36 | name: postgres-secret # Using the Secret postgres-secret
37 | command: ["python3", "manage.py", "follow_invoices"]
38 | volumeMounts:
39 | - name: lnd-data
40 | mountPath: /lnd
41 | volumes:
42 | - name: lnd-data
43 | persistentVolumeClaim:
44 | claimName: lnd-pvc
--------------------------------------------------------------------------------
/k8s/base/command/telegram-watcher-deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: telegram-watcher
5 | labels:
6 | app: telegram-watcher
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: telegram-watcher
11 | replicas: 1
12 | strategy:
13 | type: RollingUpdate
14 | template:
15 | metadata:
16 | labels:
17 | app: telegram-watcher
18 | spec:
19 | containers:
20 | - name: telegram-watcher
21 | image: recksato/robosats:534e4c0
22 | imagePullPolicy: IfNotPresent
23 | resources:
24 | limits:
25 | cpu: "1000m"
26 | memory: "500Mi"
27 | requests:
28 | cpu: "10m"
29 | memory: "50Mi"
30 | envFrom:
31 | - configMapRef:
32 | name: robosats-configmap
33 | - secretRef:
34 | name: robosats-secret
35 | - secretRef:
36 | name: postgres-secret # Using the Secret postgres-secret
37 | command: ["python3", "manage.py", "telegram_watcher"]
--------------------------------------------------------------------------------
/k8s/base/coredns-configmap.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | labels:
5 | addonmanager.kubernetes.io/mode: EnsureExists
6 | k8s-app: kube-dns
7 | name: coredns
8 | namespace: kube-system
9 | data:
10 | Corefile: |
11 | .:53 {
12 | errors
13 | health {
14 | lameduck 5s
15 | }
16 | ready
17 |
18 | log . {
19 | class error
20 | }
21 | kubernetes cluster.local in-addr.arpa ip6.arpa {
22 | pods insecure
23 | fallthrough in-addr.arpa ip6.arpa
24 | }
25 | # These are the only IPs containers can make requests to (hard-coded DNS)
26 | hosts {
27 | 149.154.167.99 telegram.org
28 | 149.154.167.99 core.telegram.org
29 | 149.154.167.99 api.telegram.org
30 | 104.16.145.212 blockchain.info
31 | 165.22.7.101 api.yadio.io
32 |
33 | ...
34 | fallthrough
35 | }
36 | prometheus :9153
37 | forward . 8.8.8.8 8.8.4.4
38 | cache 30
39 | loop
40 |
41 | reload
42 | loadbalance
43 | }
--------------------------------------------------------------------------------
/k8s/base/daphne/deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: daphne
5 | labels:
6 | app: daphne
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: daphne
11 | replicas: 1
12 | strategy:
13 | type: RollingUpdate
14 | template:
15 | metadata:
16 | labels:
17 | app: daphne
18 | spec:
19 | containers:
20 | - name: daphne
21 | image: recksato/robosats:534e4c0
22 | # imagePullPolicy: IfNotPresent
23 | resources:
24 | limits:
25 | cpu: "1000m"
26 | memory: "500Mi"
27 | requests:
28 | cpu: "20m"
29 | memory: "50Mi"
30 | envFrom:
31 | - configMapRef:
32 | name: robosats-configmap
33 | - secretRef:
34 | name: robosats-secret
35 | - secretRef:
36 | name: postgres-secret # Using the Secret postgres-secret
37 | command: ["daphne", "-b", "0.0.0.0", "-p", "9000", "robosats.asgi:application"]
38 | ports:
39 | - containerPort: 9000
40 | volumeMounts:
41 | - name: static-data
42 | mountPath: /usr/src/static
43 | - name: lnd-data
44 | mountPath: /lnd
45 | volumes:
46 | - name: static-data
47 | persistentVolumeClaim:
48 | claimName: static-pvc
49 | - name: lnd-data
50 | persistentVolumeClaim:
51 | claimName: lnd-pvc
--------------------------------------------------------------------------------
/k8s/base/daphne/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service # Create service
3 | metadata:
4 | name: daphne # Sets the service name
5 | labels:
6 | app: daphne # Defines app to create service for
7 | spec:
8 | type: ClusterIP # Sets the service type
9 | ports:
10 | - port: 9000 # Sets the port to run the daphne application
11 | selector:
12 | app: daphne
--------------------------------------------------------------------------------
/k8s/base/gunicorn/deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: gunicorn
5 | labels:
6 | app: gunicorn
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: gunicorn
11 | replicas: 1
12 | strategy:
13 | type: RollingUpdate
14 | template:
15 | metadata:
16 | labels:
17 | app: gunicorn
18 | spec:
19 | containers:
20 | - name: gunicorn
21 | image: recksato/robosats:534e4c0
22 | # imagePullPolicy: IfNotPresent
23 | resources:
24 | limits:
25 | cpu: "1000m"
26 | memory: "500Mi"
27 | requests:
28 | cpu: "20m"
29 | memory: "50Mi"
30 | envFrom:
31 | - configMapRef:
32 | name: robosats-configmap
33 | - secretRef:
34 | name: robosats-secret
35 | - secretRef:
36 | name: postgres-secret # Using the Secret postgres-secret
37 | command: ["gunicorn","--bind", ":8000", "--max-requests", "1000", "--max-requests-jitter", "200", "-w", "2", "robosats.wsgi:application"] # ["python3", "manage.py", "runserver", "0.0.0.0:8000"]
38 | ports:
39 | - containerPort: 8000
40 | volumeMounts:
41 | - name: static-data
42 | mountPath: /usr/src/static
43 | - name: lnd-data
44 | mountPath: /lnd
45 | volumes:
46 | - name: static-data
47 | persistentVolumeClaim:
48 | claimName: static-pvc
49 | - name: lnd-data
50 | persistentVolumeClaim:
51 | claimName: lnd-pvc
--------------------------------------------------------------------------------
/k8s/base/gunicorn/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service # Create service
3 | metadata:
4 | name: gunicorn # Sets the service name
5 | labels:
6 | app: gunicorn # Defines app to create service for
7 | spec:
8 | type: ClusterIP # Sets the service type
9 | ports:
10 | - port: 8000 # Sets the port to run the gunicorn application
11 | selector:
12 | app: gunicorn
--------------------------------------------------------------------------------
/k8s/base/i2p/configmap.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: i2p-configmap
5 | data:
6 | clients.config: |
7 | # NOTE: This I2P config file must use UTF-8 encoding
8 | #
9 | # If you have a 'split' directory installation, with configuration
10 | # files in ~/.i2p (Linux), %LOCALAPPDATA%\I2P (Windows),
11 | # or /Users/(user)/Library/Application Support/i2p (Mac), be sure to
12 | # edit the file in the configuration directory, NOT the install directory.
13 | # When running as a Linux daemon, the configuration directory is /var/lib/i2p
14 | # and the install directory is /usr/share/i2p .
15 | # When running as a Windows service, the configuration directory is \ProgramData\i2p
16 | # and the install directory is \Program Files\i2p .
17 | #
18 | # On first run, this file will be split into individual config files
19 | # in clients.config.d/ in the configuration directory.
20 | # Look in that directory for the file to edit.
21 | #
22 |
23 | # fire up the web console
24 | ## There are several choices, here are some examples:
25 | ## non-SSL, bind to local IPv4 only
26 | #clientApp.0.args=7657 127.0.0.1 ./webapps/
27 | ## non-SSL, bind to local IPv6 only
28 | #clientApp.0.args=7657 ::1 ./webapps/
29 | ## non-SSL, bind to all IPv4 addresses
30 | #clientApp.0.args=7657 0.0.0.0 ./webapps/
31 | ## non-SSL, bind to all IPv6 addresses
32 | #clientApp.0.args=7657 :: ./webapps/
33 | ## For SSL only, change clientApp.4.args below to https://
34 | ## SSL only
35 | #clientApp.0.args=-s 7657 ::1,127.0.0.1 ./webapps/
36 | ## non-SSL and SSL
37 | clientApp.0.args=7657 ::1,127.0.0.1 -s 7667 ::1,127.0.0.1 ./webapps/
38 | ## non-SSL only, both IPv6 and IPv4 local interfaces
39 | #clientApp.0.args=7657 0.0.0.0 ./webapps/
40 | clientApp.0.main=net.i2p.router.web.RouterConsoleRunner
41 | clientApp.0.name=webConsole
42 | clientApp.0.onBoot=true
43 |
44 | # start up the SAM bridge so other client apps can connect
45 | clientApp.1.main=net.i2p.sam.SAMBridge
46 | clientApp.1.name=SAMBridge
47 | clientApp.1.args=sam.keys 0.0.0.0 7656 i2cp.tcp.host=localhost i2cp.tcp.port=7654
48 | clientApp.1.startOnLoad=false
49 |
50 | # poke the i2ptunnels defined in i2ptunnel.config
51 | clientApp.2.main=net.i2p.i2ptunnel.TunnelControllerGroup
52 | clientApp.2.name=Tunnels
53 | clientApp.2.args=i2ptunnel.config
54 |
55 | # run our own eepsite with a seperate jetty instance
56 | clientApp.3.main=org.mortbay.jetty.Server
57 | clientApp.3.name=eepsite
58 | clientApp.3.args=eepsite/jetty.xml
59 | clientApp.3.delay=30
60 | clientApp.3.startOnLoad=false
61 |
62 | # load a browser pointing at the web console whenever we start up
63 | clientApp.4.main=net.i2p.apps.systray.UrlLauncher
64 | clientApp.4.name=consoleBrowser
65 | clientApp.4.args=http://0.0.0.0:7657/
66 | clientApp.4.delay=5
67 | clientApp.4.startOnLoad=false
68 |
--------------------------------------------------------------------------------
/k8s/base/i2p/deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: i2p
5 | labels:
6 | app: i2p
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: i2p
11 | replicas: 1
12 | strategy:
13 | type: RollingUpdate
14 | template:
15 | metadata:
16 | labels:
17 | app: i2p
18 | spec:
19 | containers:
20 | - name: i2p
21 | image: geti2p/i2p:i2p-i2p-1.8.0
22 | imagePullPolicy: IfNotPresent
23 | ports:
24 | - containerPort: 7657 # Router console
25 | - containerPort: 7656 # SAM Bridge TCP
26 | resources:
27 | limits:
28 | cpu: "100m"
29 | memory: "500Mi"
30 | requests:
31 | cpu: "10m"
32 | memory: "15Mi"
33 | volumeMounts:
34 | - name: i2p-conf
35 | mountPath: /i2p/clients.config
36 | subPath: clients.config
37 | volumes:
38 | - name: i2p-conf
39 | configMap:
40 | name: i2p-configmap
--------------------------------------------------------------------------------
/k8s/base/i2p/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: i2p-external
5 | labels:
6 | app: i2p
7 | spec:
8 | type: NodePort
9 | ports:
10 | - name: router-control
11 | nodePort: 30657
12 | port: 7657
13 | selector:
14 | app: i2p
15 | ---
16 | apiVersion: v1
17 | kind: Service
18 | metadata:
19 | name: i2p-cluster
20 | labels:
21 | app: i2p
22 | spec:
23 | type: ClusterIP
24 | ports:
25 | - name: sam-bridge
26 | port: 7656
27 | targetPort: 7656
28 | selector:
29 | app: i2p
--------------------------------------------------------------------------------
/k8s/base/kustomization.yml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | resources:
4 | # System
5 | - coredns-configmap.yml
6 | # Storage
7 | - openebs-local-storageclass.yml
8 | # General
9 | - robosats-configmap.yml
10 | - robosats-secret.yml
11 | # Bitcoind
12 | - bitcoind/service.yml
13 | - bitcoind/secret.yml
14 | - bitcoind/configmap.yml
15 | - bitcoind/pvc.yml
16 | - bitcoind/statefulset.yml
17 | # LND
18 | - lnd/service.yml
19 | - lnd/secret.yml
20 | - lnd/configmap.yml
21 | - lnd/pvc.yml
22 | - lnd/statefulset.yml
23 | # RoboSats Backend
24 | - daphne/service.yml
25 | - daphne/deployment.yml
26 | - gunicorn/service.yml
27 | - gunicorn/deployment.yml
28 | # Commands
29 | - command/follow-invoices-deployment.yml
30 | - command/clean-orders-deployment.yml
31 | - command/telegram-watcher-deployment.yml
32 | # Celery
33 | - celery/worker-deployment.yml
34 | - celery/beat-deployment.yml
35 | # Nginx
36 | - nginx/static-pvc.yml
37 | - nginx/configmap.yml
38 | - nginx/service.yml
39 | - nginx/deployment.yml
40 | # Redis
41 | - redis/service.yml
42 | - redis/pvc.yml
43 | - redis/statefulset.yml
44 | # Postgres
45 | - postgres/service.yml
46 | - postgres/secret.yml
47 | - postgres/pvc.yml
48 | - postgres/statefulset.yml
49 | # Tor
50 | #- tor/install.yml # Tor controler with ha load balancer. Cool but can't make it work
51 | #- tor/onionservice.yml
52 | - tor/service.yml
53 | - tor/tor-secret.yml
54 | # - tor/onion-secret.yml
55 | - tor/pvc.yml
56 | - tor/configmap.yml
57 | - tor/deployment.yml
58 | # I2P
59 | - i2p/configmap.yml
60 | - i2p/service.yml
61 | - i2p/deployment.yml
62 | # Thunderhub
63 | - thub/service.yml
64 | - thub/configmap.yml
65 | - thub/deployment.yml
66 | # LIT
67 | - litd/service.yml
68 | - litd/pvc.yml
69 | - litd/configmap.yml
70 | - litd/secret.yml
71 | - litd/statefulset.yml
--------------------------------------------------------------------------------
/k8s/base/litd/configmap.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: litd-configmap
5 | data:
6 | network: 'testnet'
7 | restcors: '*' # Add an ip:port/hostname to allow cross origin access from. To allow all origins, set as "*".
8 | remote.lnd.rpcserver: 'lnd:10009'
9 | remote.lnd.macaroonpath: '/root/.lnd/data/chain/bitcoin/${network}/admin.macaroon' # For mainnnet /lnd/data/chain/bitcoin/mainnet/admin.macaroon
10 | remote.lnd.tlscertpath: '/root/.lnd/tls.cert'
11 | httpslisten: '0.0.0.0:8443'
12 | loop.server.proxy: 'tor:9050'
13 | pool.proxy: 'tor:9050'
14 | loop.loopoutmaxparts: '25'
15 | loop.maxlsatfee: '1'
--------------------------------------------------------------------------------
/k8s/base/litd/pvc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: litd-pvc
5 | labels:
6 | app: litd
7 | spec:
8 | storageClassName: openebs-host
9 | accessModes:
10 | - ReadWriteOnce
11 | resources:
12 | requests:
13 | storage: 100Mi
--------------------------------------------------------------------------------
/k8s/base/litd/secret.yml:
--------------------------------------------------------------------------------
1 | # Use: echo -n 'litbaserobodevz' | base64
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | name: litd-secret
6 | labels:
7 | app: litd
8 | type: Opaque
9 | data:
10 | uipassword: bGl0YmFzZXJvYm9kZXZ6 # litbaserobodevz
--------------------------------------------------------------------------------
/k8s/base/litd/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: litd-external
5 | labels:
6 | app: litd
7 | spec:
8 | type: NodePort
9 | ports:
10 | - name: http
11 | nodePort: 30020
12 | port: 8443
13 | selector:
14 | app: litd
--------------------------------------------------------------------------------
/k8s/base/litd/statefulset.yml:
--------------------------------------------------------------------------------
1 | # postgres StatefulSet
2 | apiVersion: apps/v1
3 | kind: StatefulSet
4 | metadata:
5 | name: litd
6 | labels:
7 | app: litd
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: litd
13 | serviceName: litd
14 | template:
15 | metadata:
16 | labels:
17 | app: litd
18 | spec:
19 | containers:
20 | - name: litd
21 | image: lightninglabs/lightning-terminal:v0.10.5-alpha
22 | imagePullPolicy: IfNotPresent
23 | resources:
24 | limits:
25 | cpu: "1000m"
26 | memory: "500Mi"
27 | requests:
28 | cpu: "10m"
29 | memory: "100Mi"
30 | args: [
31 | --uipassword=$(uipassword),
32 | --network=$(network),
33 | # --restcors=$(restcors),
34 | --remote.lnd.rpcserver=$(remote.lnd.rpcserver),
35 | --remote.lnd.macaroonpath=$(remote.lnd.macaroonpath),
36 | --remote.lnd.tlscertpath=$(remote.lnd.tlscertpath),
37 | --httpslisten=$(httpslisten),
38 | --loop.server.proxy=$(loop.server.proxy),
39 | --loop.loopoutmaxparts=$(loop.loopoutmaxparts),
40 | --loop.maxlsatfee=$(loop.maxlsatfee)
41 | ]
42 | envFrom:
43 | - configMapRef:
44 | name: litd-configmap
45 | - secretRef:
46 | name: litd-secret
47 | volumeMounts:
48 | - name: lnd-data
49 | mountPath: /root/.lnd/
50 | - name: litd-data
51 | mountPath: /root/.litd/
52 | volumes:
53 | - name: lnd-data
54 | persistentVolumeClaim:
55 | claimName: lnd-pvc
56 | - name: litd-data
57 | persistentVolumeClaim:
58 | claimName: litd-pvc
--------------------------------------------------------------------------------
/k8s/base/lnd/configmap.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: lnd-configmap
5 | data:
6 | lnd.conf: |
7 | # Reference: https://github.com/lightningnetwork/lnd/blob/master/sample-lnd.conf
8 |
9 | debuglevel=info
10 | alias=🤖RoboSats⚡ COORDINATOR_NAME | LND
11 | color=#4126a7
12 | maxpendingchannels=6
13 | bitcoin.active=1
14 | bitcoin.testnet=1
15 | bitcoin.node=bitcoind
16 | bitcoind.rpchost=bitcoind
17 | bitcoind.zmqpubrawblock=tcp://bitcoind:18501
18 | bitcoind.zmqpubrawtx=tcp://bitcoind:18502
19 |
20 | bitcoin.feerate=2000
21 | bitcoin.basefee=100000
22 | minchansize=1000000
23 | bitcoin.minhtlc=75000
24 | routerrpc.minrtprob=0.05
25 |
26 | # Silence the wallet-unlock-password-file for first startup (create wallet first)
27 | wallet-unlock-password-file=/tmp/autounlockpass
28 |
29 | minchansize=1000000
30 | accept-amp=true
31 | accept-keysend=true
32 | protocol.wumbo-channels=true
33 |
34 | # Needed for LIT
35 | rpcmiddleware.enable=true
36 |
37 | # Configuring Tor docs:
38 | # https://github.com/lightningnetwork/lnd/blob/master/docs/configuring_tor.md
39 | tor.control=tor:9051
40 | tor.socks=tor:9050
41 | tor.active=1
42 | tor.v3=1
43 |
44 | # Listening port will need to be changed if multiple LND instances are running
45 | listen=localhost:9735
46 |
47 | # Allow connection to gRPC from host
48 | rpclisten=0.0.0.0:10009
49 | restlisten=0.0.0.0:8080
50 | tlsextradomain=lnd
51 | tlsextraip=lnd
--------------------------------------------------------------------------------
/k8s/base/lnd/pvc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: lnd-pvc
5 | labels:
6 | app: lnd
7 | spec:
8 | storageClassName: openebs-host
9 | accessModes:
10 | - ReadWriteOnce
11 | resources:
12 | requests:
13 | storage: 2Gi
--------------------------------------------------------------------------------
/k8s/base/lnd/secret.yml:
--------------------------------------------------------------------------------
1 | # Use: echo -n 'lndbaserobodevz' | base64
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | name: lnd-secret
6 | labels:
7 | app: lnd
8 | type: Opaque
9 | data:
10 | lndpass: bG5kYmFzZXJvYm9kZXZ6 # lndbaserobodevz
--------------------------------------------------------------------------------
/k8s/base/lnd/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: lnd
5 | labels:
6 | app: lnd
7 | spec:
8 | type: ClusterIP
9 | ports:
10 | - name: grpc
11 | port: 10009
12 | targetPort: 10009
13 | selector:
14 | app: lnd
--------------------------------------------------------------------------------
/k8s/base/lnd/statefulset.yml:
--------------------------------------------------------------------------------
1 | # postgres StatefulSet
2 | apiVersion: apps/v1
3 | kind: StatefulSet
4 | metadata:
5 | name: lnd
6 | labels:
7 | app: lnd
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: lnd
13 | serviceName: lnd
14 | template:
15 | metadata:
16 | labels:
17 | app: lnd
18 | spec:
19 | containers:
20 | - name: lnd
21 | image: lightninglabs/lnd:v0.16.4-beta
22 | imagePullPolicy: IfNotPresent
23 | resources:
24 | limits:
25 | cpu: "1000m"
26 | memory: "4000Mi"
27 | requests:
28 | cpu: "10m"
29 | memory: "100Mi"
30 | ports:
31 | - containerPort: 10009
32 | args: [ # lnd.conf arguments from lnd-configmap
33 | # --lnddir=/root/.lnd/
34 | # --configfile=/root/.lnd/lnd.conf,
35 | --bitcoind.rpcuser=$(rpcuser),
36 | --bitcoind.rpcpass=$(rpcpassword),
37 | --tor.password=$(torcontrolpass),
38 | ]
39 | envFrom:
40 | - configMapRef:
41 | name: lnd-configmap
42 | - secretRef:
43 | name: bitcoind-secret
44 | - secretRef:
45 | name: tor-secret
46 | volumeMounts:
47 | - name: lnd-data
48 | mountPath: /root/.lnd/
49 | - name: lnd-conf
50 | mountPath: /root/.lnd/lnd.conf
51 | subPath: lnd.conf
52 | - name: lnd-autounlock
53 | mountPath: /tmp/
54 | readOnly: true
55 | volumes:
56 | - name: lnd-data
57 | persistentVolumeClaim:
58 | claimName: lnd-pvc
59 | - name: lnd-conf
60 | configMap:
61 | name: lnd-configmap
62 | - name: lnd-autounlock
63 | secret:
64 | secretName: lnd-secret
65 | items:
66 | - key: lndpass
67 | path: autounlockpass
--------------------------------------------------------------------------------
/k8s/base/nginx/configmap.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: nginx-configmap
5 | data:
6 | local.conf: |
7 | limit_req_zone $binary_remote_addr zone=limitonesec:10m rate=1r/s;
8 |
9 | #first we declare our upstream server, which is our Gunicorn application
10 | upstream robosats_gunicorn_rest {
11 | # kubernetes will automatically resolve this to the correct address
12 | server gunicorn:8000;
13 | }
14 |
15 | upstream robosats_daphne_websocket {
16 | # kubernetes will automatically resolve this to the correct address
17 | server daphne:9000;
18 | }
19 |
20 | # now we declare our main server
21 | server {
22 |
23 | listen 80;
24 | server_name testnet.robosats.com;
25 |
26 | location /static {
27 | alias /usr/src/static;
28 | }
29 |
30 | location /.well-known {
31 | alias /usr/src/.well-known;
32 | }
33 |
34 | location / {
35 | # requests are passed to Gunicorn
36 | proxy_pass http://robosats_gunicorn_rest;
37 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
38 | proxy_set_header Host $host;
39 | proxy_redirect off;
40 |
41 | # Edit for your own onion service
42 | add_header Onion-Location http://robotestagw3dcxmd66r4rgksb4nmmr43fh77bzn2ia2eucduyeafnyd.onion$request_uri;
43 | }
44 |
45 | location /ws/ {
46 | # websockets are passed to Daphne
47 | proxy_pass http://robosats_daphne_websocket;
48 | proxy_http_version 1.1;
49 | proxy_set_header Upgrade $http_upgrade;
50 | proxy_set_header Connection "Upgrade";
51 | proxy_set_header Host $host;
52 | }
53 |
54 | location = /favicon.ico { access_log off; log_not_found off; }
55 | }
56 |
57 | nginx.conf: |
58 | user nginx;
59 | worker_processes auto;
60 |
61 | error_log /var/log/nginx/error.log notice;
62 | pid /var/run/nginx.pid;
63 |
64 | events {
65 | worker_connections 1024;
66 | }
67 |
68 | http {
69 | include /etc/nginx/mime.types;
70 | default_type application/octet-stream;
71 |
72 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
73 | '$status $body_bytes_sent "$http_referer" '
74 | '"$http_user_agent" "$http_x_forwarded_for"';
75 |
76 | access_log /var/log/nginx/access.log main;
77 |
78 | sendfile on;
79 | #tcp_nopush on;
80 |
81 | keepalive_timeout 65;
82 |
83 | #gzip on;
84 |
85 | include /etc/nginx/conf.d/local.conf;
86 | }
--------------------------------------------------------------------------------
/k8s/base/nginx/deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx
5 | labels:
6 | app: nginx
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: nginx
11 | replicas: 1
12 | strategy:
13 | type: RollingUpdate
14 | template:
15 | metadata:
16 | labels:
17 | app: nginx
18 | spec:
19 | containers:
20 | - name: nginx
21 | image: nginx:stable
22 | ports:
23 | - containerPort: 80
24 | resources:
25 | limits:
26 | cpu: "1"
27 | memory: "100Mi"
28 | requests:
29 | cpu: "1m"
30 | memory: "5Mi"
31 | volumeMounts:
32 | - name: static-data
33 | mountPath: /usr/src/static
34 | readOnly: true
35 | - name: nginx-conf
36 | mountPath: /etc/nginx/conf.d/local.conf
37 | subPath: local.conf
38 | - name: nginx-conf
39 | mountPath: /etc/nginx/nginx.conf
40 | subPath: nginx.conf
41 | volumes:
42 | - name: static-data
43 | persistentVolumeClaim:
44 | claimName: static-pvc
45 | - name: nginx-conf
46 | configMap:
47 | name: nginx-configmap
--------------------------------------------------------------------------------
/k8s/base/nginx/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: nginx-external
5 | labels:
6 | app: nginx
7 | spec:
8 | type: NodePort
9 | ports:
10 | - name: http
11 | nodePort: 30080
12 | port: 80
13 | selector:
14 | app: nginx
15 | ---
16 | apiVersion: v1
17 | kind: Service
18 | metadata:
19 | name: nginx
20 | labels:
21 | app: nginx
22 | spec:
23 | type: ClusterIP
24 | ports:
25 | - name: http
26 | port: 80
27 | targetPort: 80
28 | selector:
29 | app: nginx
--------------------------------------------------------------------------------
/k8s/base/nginx/static-pvc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim # Create PVC
3 | metadata:
4 | name: static-pvc # Sets name of PV
5 | labels:
6 | app: static # Defines app to create PVC for
7 | spec:
8 | storageClassName: openebs-host
9 | accessModes:
10 | - ReadWriteOnce # Sets read and write access
11 | resources:
12 | requests:
13 | storage: 2Gi # Sets PVC's size
--------------------------------------------------------------------------------
/k8s/base/openebs-local-storageclass.yml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | annotations:
5 | cas.openebs.io/config: |
6 | - name: StorageType
7 | value: "hostpath"
8 | - name: BasePath
9 | value: "/k8s-data"
10 | openebs.io/cas-type: local
11 | name: openebs-host
12 | provisioner: openebs.io/local
13 | reclaimPolicy: Retain
14 | volumeBindingMode: WaitForFirstConsumer
--------------------------------------------------------------------------------
/k8s/base/postgres/pvc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim # Create PVC
3 | metadata:
4 | name: postgres-pvc # Sets name of PV
5 | labels:
6 | app: postgres # Defines app to create PVC for
7 | spec:
8 | storageClassName: openebs-host
9 | accessModes:
10 | - ReadWriteOnce # Sets read and write access
11 | resources:
12 | requests:
13 | storage: 10Gi # Sets PVC's size
--------------------------------------------------------------------------------
/k8s/base/postgres/secret.yml:
--------------------------------------------------------------------------------
1 | # Secret postgres-secret for the postgres app
2 | # Define default database name, user, and password
3 | # Use: echo -n 'super-secret-password' | base64
4 | apiVersion: v1
5 | kind: Secret
6 | metadata:
7 | name: postgres-secret
8 | labels:
9 | app: postgres
10 | type: Opaque
11 | data:
12 | POSTGRES_DB: cm9ib3NhdHMtZGI= # robosats-db
13 | POSTGRES_USER: YWRtaW4= # admin
14 | POSTGRES_PASSWORD: MTIzNDU2NzhzdHJvbmdwYXNzd29yZA== # 12345678strongpassword
--------------------------------------------------------------------------------
/k8s/base/postgres/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service # Create service
3 | metadata:
4 | name: postgres # Sets the service name
5 | labels:
6 | app: postgres # Defines app to create service for
7 | spec:
8 | type: ClusterIP # Sets the service type
9 | ports:
10 | - port: 5432 # Sets the port to run the postgres application
11 | selector:
12 | app: postgres
--------------------------------------------------------------------------------
/k8s/base/postgres/statefulset.yml:
--------------------------------------------------------------------------------
1 | # postgres StatefulSet
2 | apiVersion: apps/v1
3 | kind: StatefulSet # Create a statefulset
4 | metadata:
5 | name: postgres # Set the name of the deployment
6 | labels:
7 | app: postgres
8 | spec:
9 | replicas: 1 # Set 1 deployment replicas
10 | selector:
11 | matchLabels:
12 | app: postgres
13 | serviceName: postgres
14 | template:
15 | metadata:
16 | labels:
17 | app: postgres
18 | spec:
19 | containers:
20 | - name: postgres
21 | image: postgres:alpine3.16 # Docker image
22 | imagePullPolicy: IfNotPresent
23 | resources:
24 | limits:
25 | cpu: "100m"
26 | memory: "300Mi"
27 | requests:
28 | cpu: "1m"
29 | memory: "5Mi"
30 | ports:
31 | - containerPort: 5432 # Exposing the container port 5432 for postgres client connections.
32 | envFrom:
33 | - secretRef:
34 | name: postgres-secret # Using the Secret postgres-secret
35 | volumeMounts:
36 | - mountPath: /var/lib/postgresql/data/
37 | name: postgres-data
38 | volumes:
39 | - name: postgres-data
40 | persistentVolumeClaim:
41 | claimName: postgres-pvc
--------------------------------------------------------------------------------
/k8s/base/redis/pvc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: redis-pvc
5 | labels:
6 | app: redis
7 | spec:
8 | storageClassName: openebs-host
9 | accessModes:
10 | - ReadWriteOnce
11 | resources:
12 | requests:
13 | storage: 20Mi
--------------------------------------------------------------------------------
/k8s/base/redis/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: redis
5 | labels:
6 | app: redis
7 | spec:
8 | type: ClusterIP
9 | ports:
10 | - name: redis
11 | port: 6379
12 | targetPort: 6379
13 | selector:
14 | app: redis
--------------------------------------------------------------------------------
/k8s/base/redis/statefulset.yml:
--------------------------------------------------------------------------------
1 | # More info on deploying Redis the right way
2 | # https://www.containiq.com/post/deploy-redis-cluster-on-kubernetes
3 |
4 | apiVersion: apps/v1
5 | kind: StatefulSet
6 | metadata:
7 | name: redis
8 | labels:
9 | app: redis
10 | spec:
11 | replicas: 1
12 | selector:
13 | matchLabels:
14 | app: redis
15 | serviceName: redis
16 | template:
17 | metadata:
18 | labels:
19 | app: redis
20 | spec:
21 | containers:
22 | - name: redis
23 | image: redis:7.0.4-alpine3.16
24 | ports:
25 | - containerPort: 6379
26 | resources:
27 | limits:
28 | cpu: "1"
29 | memory: "100Mi"
30 | requests:
31 | cpu: "1m"
32 | memory: "5Mi"
33 | volumeMounts:
34 | - name: redis-data
35 | mountPath: /data
36 | volumes:
37 | - name: redis-data
38 | persistentVolumeClaim:
39 | claimName: redis-pvc
--------------------------------------------------------------------------------
/k8s/base/robosats-configmap.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: robosats-configmap
5 | data:
6 | ALTERNATIVE_NAME: 'RoboSats Mainnet'
7 | ALTERNATIVE_SITE: 'RoboSats6tkf3eva7x2voqso3a5wcorsnw34jveyxfqi2fu7oyheasid.onion'
8 | AUTO_UNLOCK_PWD: '1234'
9 | BLOCK_TIME: "8"
10 | DEFAULT_BOND_SIZE: "3"
11 | DEFAULT_PUBLIC_ORDER_DURATION: "24"
12 | DISABLE_ONCHAIN: "False"
13 | ESCROW_USERNAME: 'admin'
14 | EXP_MAKER_BOND_INVOICE: '300 '
15 | EXP_TAKER_BOND_INVOICE: "200"
16 | FEE: "0.002"
17 | FIAT_EXCHANGE_DURATION: "24"
18 | HOST_NAME: ''
19 | HOST_NAME2: ''
20 | I2P_ALIAS: ''
21 | I2P_LONG: ''
22 | INVOICE_AND_ESCROW_DURATION: "30"
23 | LND_DIR: '/lnd/'
24 | LND_GRPC_HOST: 'lnd:10009'
25 | LOCAL_ALIAS: '127.0.0.1'
26 | MACAROON_PATH: 'data/chain/bitcoin/testnet/admin.macaroon'
27 | MAKER_FEE_SPLIT: "0.125"
28 | MARKET_PRICE_APIS: 'https://blockchain.info/ticker, https://api.yadio.io/exrates/BTC'
29 | MAX_BOND_SIZE: "15"
30 | MAX_MINING_NETWORK_SPEEDUP_EXPECTED: "1.7"
31 | MAX_PUBLIC_ORDER_DURATION: "24"
32 | MAX_PUBLIC_ORDERS: "100"
33 | MAX_SWAP_FEE: "0.1"
34 | MAX_SWAP_POINT: "0"
35 | MAX_TRADE: "3000000"
36 | MAX_TRADE_BONDLESS_TAKER: "50000"
37 | MIN_BOND_SIZE: "1"
38 | MIN_FLAT_ROUTING_FEE_LIMIT: "10"
39 | MIN_FLAT_ROUTING_FEE_LIMIT_REWARD: "2"
40 | MIN_PUBLIC_ORDER_DURATION: "0.166"
41 | MIN_SWAP_AMOUNT: "10000"
42 | MIN_SWAP_FEE: "0.01"
43 | MIN_SWAP_POINT: "0.35"
44 | MIN_TRADE: '20000 '
45 | NETWORK: 'testnet'
46 | NODE_ALIAS: "'\U0001F916RoboSats⚡(RoboDevs)'"
47 | NODE_ID: '033b58d7......'
48 | ONION_LOCATION: ''
49 | PAYOUT_TIMEOUT_SECONDS: "90"
50 | PENALTY_TIMEOUT: "60"
51 | POSTGRES_HOST: 'postgres'
52 | POSTGRES_PORT: '5432'
53 | PROPORTIONAL_ROUTING_FEE_LIMIT: "0.001"
54 | REDIS_URL: 'redis://redis:6379/1'
55 | RETRY_TIME: "1"
56 | REWARD_TIP: "100"
57 | REWARDS_TIMEOUT_SECONDS: "30"
58 | SLASHED_BOND_REWARD_SPLIT: "0.5"
59 | SWAP_FEE_SHAPE: 'exponential'
60 | SWAP_LAMBDA: "8.8"
61 | TELEGRAM_BOT_NAME: ''
62 | TOR_PROXY: 'tor:9050'
63 | USE_TOR: 'True'
64 | LND_VERSION: v0.15.0-beta
--------------------------------------------------------------------------------
/k8s/base/robosats-secret.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: robosats-secret
5 | data:
6 | LND_CERT_BASE64: a # Your LND BASE64 TLS CERT encoded as secret
7 | LND_MACAROON_BASE64: b # Your LND MACAROON BASE64 encoded as secret
8 | SECRET_KEY: ZGphbmdvLWluc2VjdXJlLTZeJjZ1dyRiNV5lbiUoY3Uya2M3X28pKG1ncGF6eCNqX3pud2x5bTB2eGZhbW4ydW8tCa== # 'django-insecure-6^&6uw$b5^en%(cu2kc7_o)(mgpazx#j_znwlym0vxfamn2uu-'
9 | TELEGRAM_TOKEN: d # Your Telegram Bot Token encoded as secret
--------------------------------------------------------------------------------
/k8s/base/thub/configmap.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: thub-configmap
5 | data:
6 | accounts.yml: |
7 | defaultNetwork: "testnet"
8 | accounts:
9 | - name: "RoboSats (Robodevz)"
10 | serverUrl: "lnd:10009"
11 | lndDir: "/lnd/"
12 | password: "examplepassword"
13 | TOR_PROXY_SERVER: 'socks://tor:9050'
14 | THEME: 'dark'
15 | CURRENCY: 'sat'
16 | FETCH_PRICES: 'false'
17 | FETCH_FEES: 'false'
18 | DISABLE_LINKS: 'true'
19 | DISABLE_LNMARKETS: 'true'
20 | NO_VERSION_CHECK: 'true'
21 | ACCOUNT_CONFIG_PATH: '/config/accounts.yml'
22 | # COOKIE_PATH: '/lnd/.cookie'
23 | # SSO_SERVER_URL: 'lnd:10009'
24 | # SSO_CERT_PATH: '/lnd/tls.cert'
25 | # SSO_MACAROON_PATH: '/lnd/data/chain/bitcoin/testnet'
26 | # DANGEROUS_NO_SSO_AUTH: 'false'
--------------------------------------------------------------------------------
/k8s/base/thub/deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: thub
5 | labels:
6 | app: thub
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: thub
11 | replicas: 1
12 | strategy:
13 | type: RollingUpdate
14 | template:
15 | metadata:
16 | labels:
17 | app: thub
18 | spec:
19 | containers:
20 | - name: thub
21 | image: apotdevin/thunderhub:v0.13.11
22 | imagePullPolicy: IfNotPresent
23 | envFrom:
24 | - configMapRef:
25 | name: thub-configmap
26 | ports:
27 | - containerPort: 3010
28 | resources:
29 | limits:
30 | cpu: "1"
31 | memory: "500Mi"
32 | requests:
33 | cpu: "10m"
34 | memory: "50Mi"
35 | volumeMounts:
36 | - name: thub-conf
37 | mountPath: /config/accounts.yml
38 | subPath: accounts.yml
39 | - name: lnd-data
40 | mountPath: /lnd
41 | volumes:
42 | - name: thub-conf
43 | configMap:
44 | name: thub-configmap
45 | - name: lnd-data
46 | persistentVolumeClaim:
47 | claimName: lnd-pvc
--------------------------------------------------------------------------------
/k8s/base/thub/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: thub-external
5 | labels:
6 | app: thub
7 | spec:
8 | type: NodePort
9 | ports:
10 | - name: http
11 | nodePort: 30010
12 | port: 3000
13 | selector:
14 | app: thub
--------------------------------------------------------------------------------
/k8s/base/tor/configmap.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: tor-configmap
5 | data:
6 | torrc: |
7 | ControlPort 0.0.0.0:9051
8 | # Need to defined a hashedpassword. Gen one: tor --hash-password torbasepassword
9 | # It is in the tor-onion k8s secret and passed as an arg
10 | # HashedControlPassword 16:B55F2A0995402608605FB7206DE0E474DF92EE2026A2FFBB1B8AC7A926
11 | # CookieAuthentication 0
12 |
13 | HiddenServiceDir /var/lib/tor/robosite/
14 | HiddenServicePort 80 nginx:80
15 |
16 | # add:
17 | # CookieAuthFileGroupReadable 1
18 | ## Configuration file for a typical Tor user
19 | ## Last updated 9 October 2013 for Tor 0.2.5.2-alpha.
20 | ## (may or may not work for much older or much newer versions of Tor.)
21 | ##
22 | ## Lines that begin with "## " try to explain what's going on. Lines
23 | ## that begin with just "#" are disabled commands: you can enable them
24 | ## by removing the "#" symbol.
25 | ##
26 | ## See 'man tor', or https://www.torproject.org/docs/tor-manual.html,
27 | ## for more options you can use in this file.
28 | ##
29 | ## Tor will look for this file in various places based on your platform:
30 | ## https://www.torproject.org/docs/faq#torrc
31 |
32 | ## Tor opens a socks proxy on port 9050 by default -- even if you don't
33 | ## configure one below. Set "SocksPort 0" if you plan to run Tor only
34 | ## as a relay, and not make any local application connections yourself.
35 | #SocksPort 9050 # Default: Bind to localhost:9050 for local connections.
36 | SocksPort 0.0.0.0:9050 # Bind to this address:port too.
37 |
38 | ## Entry policies to allow/deny SOCKS requests based on IP address.
39 | ## First entry that matches wins. If no SocksPolicy is set, we accept
40 | ## all (and only) requests that reach a SocksPort. Untrusted users who
41 | ## can access your SocksPort may be able to learn about the connections
42 | ## you make.
43 | #SocksPolicy accept bitcoind # Won't work
44 | #SocksPolicy accept 10.152.183.65 # Will work but this is a dynamic IP
45 | SocksPolicy accept 0.0.0.0 # Works but not ideal
46 | #SocksPolicy reject *
47 |
48 | ## Logs go to stdout at level "notice" unless redirected by something
49 | ## else, like one of the below lines. You can have as many Log lines as
50 | ## you want.
51 | ##
52 | ## We advise using "notice" in most cases, since anything more verbose
53 | ## may provide sensitive information to an attacker who obtains the logs.
54 | ##
55 | ## Send all messages of level 'notice' or higher to /var/log/tor/notices.log
56 | #Log notice file /var/log/tor/notices.log
57 | ## Send every possible message to /var/log/tor/debug.log
58 | #Log debug file /var/log/tor/debug.log
59 | ## Use the system log instead of Tor's logfiles
60 | #Log notice syslog
61 | ## To send all messages to stderr:
62 | #Log debug stderr
63 |
64 | ## Uncomment this to start the process in the background... or use
65 | ## --runasdaemon 1 on the command line. This is ignored on Windows;
66 | ## see the FAQ entry if you want Tor to run as an NT service.
67 | #RunAsDaemon 1
68 |
69 | ## The directory for keeping all the keys/etc. By default, we store
70 | ## things in $HOME/.tor on Unix, and in Application Data\tor on Windows.
71 | #DataDirectory /var/lib/tor
72 |
73 | ## The port on which Tor will listen for local connections from Tor
74 | ## controller applications, as documented in control-spec.txt.
75 | #ControlPort 9051
76 | ## If you enable the controlport, be sure to enable one of these
77 | ## authentication methods, to prevent attackers from accessing it.
78 | #HashedControlPassword 16:872860B76453A77D60CA2BB8C1A7042072093276A3D701AD684053EC4C
79 | #CookieAuthentication 1
80 |
81 | ############### This section is just for location-hidden services ###
82 |
83 | ## Once you have configured a hidden service, you can look at the
84 | ## contents of the file ".../hidden_service/hostname" for the address
85 | ## to tell people.
86 | ##
87 | ## HiddenServicePort x y:z says to redirect requests on port x to the
88 | ## address y:z.
89 |
90 | #HiddenServiceDir /var/lib/tor/hidden_service/
91 | #HiddenServicePort 80 127.0.0.1:80
92 |
93 | #HiddenServiceDir /var/lib/tor/other_hidden_service/
94 | #HiddenServicePort 80 127.0.0.1:80
95 | #HiddenServicePort 22 127.0.0.1:22
96 |
97 | ################ This section is just for relays #####################
98 | #
99 | ## See https://www.torproject.org/docs/tor-doc-relay for details.
100 |
101 | ## Required: what port to advertise for incoming Tor connections.
102 | #ORPort 9001
103 | ## If you want to listen on a port other than the one advertised in
104 | ## ORPort (e.g. to advertise 443 but bind to 9090), you can do it as
105 | ## follows. You'll need to do ipchains or other port forwarding
106 | ## yourself to make this work.
107 | #ORPort 443 NoListen
108 | #ORPort 127.0.0.1:9090 NoAdvertise
109 |
110 | ## The IP address or full DNS name for incoming connections to your
111 | ## relay. Leave commented out and Tor will guess.
112 | #Address noname.example.com
113 |
114 | ## If you have multiple network interfaces, you can specify one for
115 | ## outgoing traffic to use.
116 | # OutboundBindAddress 10.0.0.5
117 |
118 | ## A handle for your relay, so people don't have to refer to it by key.
119 | #Nickname ididnteditheconfig
120 |
121 | ## Define these to limit how much relayed traffic you will allow. Your
122 | ## own traffic is still unthrottled. Note that RelayBandwidthRate must
123 | ## be at least 20 KB.
124 | ## Note that units for these config options are bytes per second, not bits
125 | ## per second, and that prefixes are binary prefixes, i.e. 2^10, 2^20, etc.
126 | #RelayBandwidthRate 100 KB # Throttle traffic to 100KB/s (800Kbps)
127 | #RelayBandwidthBurst 200 KB # But allow bursts up to 200KB/s (1600Kbps)
128 |
129 | ## Use these to restrict the maximum traffic per day, week, or month.
130 | ## Note that this threshold applies separately to sent and received bytes,
131 | ## not to their sum: setting "4 GB" may allow up to 8 GB total before
132 | ## hibernating.
133 | ##
134 | ## Set a maximum of 4 gigabytes each way per period.
135 | #AccountingMax 4 GB
136 | ## Each period starts daily at midnight (AccountingMax is per day)
137 | #AccountingStart day 00:00
138 | ## Each period starts on the 3rd of the month at 15:00 (AccountingMax
139 | ## is per month)
140 | #AccountingStart month 3 15:00
141 |
142 | ## Administrative contact information for this relay or bridge. This line
143 | ## can be used to contact you if your relay or bridge is misconfigured or
144 | ## something else goes wrong. Note that we archive and publish all
145 | ## descriptors containing these lines and that Google indexes them, so
146 | ## spammers might also collect them. You may want to obscure the fact that
147 | ## it's an email address and/or generate a new address for this purpose.
148 | #ContactInfo Random Person
149 | ## You might also include your PGP or GPG fingerprint if you have one:
150 | #ContactInfo 0xFFFFFFFF Random Person
151 |
152 | ## Uncomment this to mirror directory information for others. Please do
153 | ## if you have enough bandwidth.
154 | #DirPort 9030 # what port to advertise for directory connections
155 | ## If you want to listen on a port other than the one advertised in
156 | ## DirPort (e.g. to advertise 80 but bind to 9091), you can do it as
157 | ## follows. below too. You'll need to do ipchains or other port
158 | ## forwarding yourself to make this work.
159 | #DirPort 80 NoListen
160 | #DirPort 127.0.0.1:9091 NoAdvertise
161 | ## Uncomment to return an arbitrary blob of html on your DirPort. Now you
162 | ## can explain what Tor is if anybody wonders why your IP address is
163 | ## contacting them. See contrib/tor-exit-notice.html in Tor's source
164 | ## distribution for a sample.
165 | #DirPortFrontPage /etc/tor/tor-exit-notice.html
166 |
167 | ## Uncomment this if you run more than one Tor relay, and add the identity
168 | ## key fingerprint of each Tor relay you control, even if they're on
169 | ## different networks. You declare it here so Tor clients can avoid
170 | ## using more than one of your relays in a single circuit. See
171 | ## https://www.torproject.org/docs/faq#MultipleRelays
172 | ## However, you should never include a bridge's fingerprint here, as it would
173 | ## break its concealability and potentionally reveal its IP/TCP address.
174 | #MyFamily $keyid,$keyid,...
175 |
176 | ## A comma-separated list of exit policies. They're considered first
177 | ## to last, and the first match wins. If you want to _replace_
178 | ## the default exit policy, end this with either a reject *:* or an
179 | ## accept *:*. Otherwise, you're _augmenting_ (prepending to) the
180 | ## default exit policy. Leave commented to just use the default, which is
181 | ## described in the man page or at
182 | ## https://www.torproject.org/documentation.html
183 | ##
184 | ## Look at https://www.torproject.org/faq-abuse.html#TypicalAbuses
185 | ## for issues you might encounter if you use the default exit policy.
186 | ##
187 | ## If certain IPs and ports are blocked externally, e.g. by your firewall,
188 | ## you should update your exit policy to reflect this -- otherwise Tor
189 | ## users will be told that those destinations are down.
190 | ##
191 | ## For security, by default Tor rejects connections to private (local)
192 | ## networks, including to your public IP address. See the man page entry
193 | ## for ExitPolicyRejectPrivate if you want to allow "exit enclaving".
194 | ##
195 | #ExitPolicy accept *:6660-6667,reject *:* # allow irc ports but no more
196 | #ExitPolicy accept *:119 # accept nntp as well as default exit policy
197 | #ExitPolicy reject *:* # no exits allowed
198 |
199 | ## Bridge relays (or "bridges") are Tor relays that aren't listed in the
200 | ## main directory. Since there is no complete public list of them, even an
201 | ## ISP that filters connections to all the known Tor relays probably
202 | ## won't be able to block all the bridges. Also, websites won't treat you
203 | ## differently because they won't know you're running Tor. If you can
204 | ## be a real relay, please do; but if not, be a bridge!
205 | #BridgeRelay 1
206 | ## By default, Tor will advertise your bridge to users through various
207 | ## mechanisms like https://bridges.torproject.org/. If you want to run
208 | ## a private bridge, for example because you'll give out your bridge
209 | ## address manually to your friends, uncomment this line:
210 | #PublishServerDescriptor 0
211 |
--------------------------------------------------------------------------------
/k8s/base/tor/deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: tor
5 | labels:
6 | app: tor
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: tor
11 | replicas: 1
12 | strategy:
13 | type: RollingUpdate
14 | template:
15 | metadata:
16 | labels:
17 | app: tor
18 | spec:
19 | # initContainers:
20 | # - name: changeowner
21 | # image: busybox
22 | # command: ["sh", "-c", "chown -R 100:100 /var/lib/tor/robosite"]
23 | # volumeMounts:
24 | # - name: onion
25 | # mountPath: /var/lib/tor/robosite/
26 | # readOnly: false
27 | containers:
28 | - name: tor
29 | image: osminogin/tor-simple:0.4.7.8
30 | imagePullPolicy: IfNotPresent
31 | ports:
32 | - containerPort: 9050 # torproxy
33 | - containerPort: 9051 # torcontrol
34 | command: [
35 | tor,
36 | HashedControlPassword,
37 | $(torcontrolhashedpass)
38 | ]
39 | envFrom:
40 | - secretRef:
41 | name: tor-secret
42 | resources:
43 | limits:
44 | cpu: "50m"
45 | memory: "100Mi"
46 | requests:
47 | cpu: "1m"
48 | memory: "5Mi"
49 | volumeMounts:
50 | - name: tor-conf
51 | mountPath: /etc/tor/torrc
52 | subPath: torrc
53 | # - name: onion
54 | # mountPath: /var/lib/tor/robosite/
55 | # readOnly: true
56 | # - name: onion
57 | # mountPath: /var/lib/tor/robosite/
58 | # readOnly: true
59 | # securityContext:
60 | # runAsUser: 0
61 | volumes:
62 | - name: tor-conf
63 | configMap:
64 | name: tor-configmap
65 | # - name: onion
66 | # persistentVolumeClaim:
67 | # claimName: onion-pvc
68 |
69 | # - name: onion
70 | # secret:
71 | # secretName: onion-secret
72 | # items:
73 | # - key: publicKeyFile
74 | # path: hs_ed25519_public_key
75 | # - key: privateKeyFile
76 | # path: hs_ed25519_secret_key
77 | # - key: onionAddress
78 | # path: hostname
79 |
80 |
81 | ## Mounting onion secrets
82 |
83 | # - name: onion-secret
84 | # mountPath: /var/lib/tor/robosite/
85 | # readOnly: true
86 | # securityContext:
87 | # runAsUser: 0
88 | # runAsGroup: 0
89 | # fsGroup: 0
90 | # volumes:
91 | # - name: tor-conf
92 | # configMap:
93 | # name: tor-configmap
94 | # - name: onion-secret
95 | # persistentVolumeClaim:
96 | # claimName: tor-pvc
--------------------------------------------------------------------------------
/k8s/base/tor/onion-secret.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | # Generate using commands in readme.md
4 | onionAddress: ...aWQub25pb24K
5 | privateKeyFile: ...F1mVQaWw2V6YahyUOiHSNg5QYwwAH
6 | publicKeyFile: ...LguGSudSC/MOE8SDtSrRvGQFF7VYlynxUCBgvtLWrMg==
7 | kind: Secret
8 | metadata:
9 | name: onion-secret
10 | namespace: default
11 | type: Opaque
12 |
--------------------------------------------------------------------------------
/k8s/base/tor/pvc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: onion-pvc # stores onion secrets
5 | labels:
6 | app: tor
7 | spec:
8 | storageClassName: openebs-hostpath
9 | accessModes:
10 | - ReadWriteOnce
11 | resources:
12 | requests:
13 | storage: 1Mi
--------------------------------------------------------------------------------
/k8s/base/tor/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: tor
5 | labels:
6 | app: tor
7 | spec:
8 | type: ClusterIP
9 | ports:
10 | - port: 9050
11 | name: torproxy
12 | - port: 9051
13 | name: torcontrol
14 | selector:
15 | app: tor
--------------------------------------------------------------------------------
/k8s/base/tor/tor-secret.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | torcontrolhashedpass: MTY6QjU1RjJBMDk5NTQwMjYwODYwNUZCNzIwNkRFMEU0NzRERjkyRUUyMDI2QTJGRkJCMUI4QUM3QTkyNg== # 16:B55F2A0995402608605FB7206DE0E474DF92EE2026A2FFBB1B8AC7A926
4 | torcontrolpass: dG9yYmFzZXBhc3N3b3Jk # torbasepassword
5 | kind: Secret
6 | metadata:
7 | name: tor-secret
8 | namespace: default
9 | type: Opaque
10 |
--------------------------------------------------------------------------------
/k8s/base/tor/~onionservice.yml:
--------------------------------------------------------------------------------
1 | apiVersion: tor.k8s.torproject.org/v1alpha2
2 | kind: OnionService
3 | metadata:
4 | name: onion-service
5 | spec:
6 | version: 3
7 | privateKeySecret:
8 | name: onion-secret
9 | rules:
10 | - port:
11 | number: 80
12 | backend:
13 | service:
14 | name: nginx-cluster
15 | port:
16 | number: 80
--------------------------------------------------------------------------------
/k8s/base/tor/~readme.md:
--------------------------------------------------------------------------------
1 | # Tor service controller Mantained by bugfest https://github.com/bugfest/tor-controller
2 | # run "mkctl apply -f base/tor/install.yml"
3 | # More info https://golangexample.com/run-tor-onion-services-on-kubernetes-actively-maintained/
--------------------------------------------------------------------------------
/k8s/overlays/development/kustomization.yml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | namespace: development
4 | #namePrefix: dev-
5 | commonLabels:
6 | variant: development
7 | resources:
8 | - ../../base
9 | - namespace.yml
--------------------------------------------------------------------------------
/k8s/overlays/development/namespace.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: development
5 | labels:
6 | name: development
--------------------------------------------------------------------------------
/k8s/overlays/testnet/kustomization.yml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | namespace: testnet
4 | #namePrefix: tn-
5 | commonLabels:
6 | variant: testnet
7 | resources:
8 | - ../../base
9 | - namespace.yml
--------------------------------------------------------------------------------
/k8s/overlays/testnet/namespace.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: testnet
5 | labels:
6 | name: testnet
--------------------------------------------------------------------------------
/web/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RoboSats/robosats-deploy/e446170082d28db68f26ffa65ea9916b19a46661/web/.gitignore
--------------------------------------------------------------------------------
/web/custom_nginx.conf:
--------------------------------------------------------------------------------
1 | daemon off;
2 |
3 | user nginx;
4 | worker_processes auto;
5 |
6 | error_log /var/log/nginx/error.log notice;
7 | pid /var/run/nginx.pid;
8 |
9 | events {
10 | worker_connections 1024;
11 | }
12 |
13 | http {
14 |
15 | include /etc/nginx/mime.types;
16 | default_type application/octet-stream;
17 | large_client_header_buffers 4 64K;
18 |
19 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
20 | '$status $body_bytes_sent "$http_referer" '
21 | '"$http_user_agent" "$http_x_forwarded_for"';
22 |
23 | access_log /dev/stdout main;
24 | error_log /dev/stderr warn;
25 |
26 | sendfile on;
27 | keepalive_timeout 65;
28 |
29 | server {
30 |
31 | listen 80;
32 | server_name robosats_web_client;
33 |
34 | # Add your custom verify files by editing location and alias
35 | location /clearnetonion_verify {
36 | alias /serve_misc/;
37 | }
38 |
39 | location / {
40 | root /usr/src/robosats;
41 | try_files $uri $uri/ /basic.html;
42 | index basic.html;
43 | }
44 |
45 | location /pro {
46 | root /usr/src/robosats;
47 | try_files $uri $uri/ /pro.html;
48 | index pro.html;
49 | }
50 |
51 | location /static/ {
52 | alias /usr/src/robosats/static/;
53 | autoindex on;
54 | }
55 |
56 | location = /favicon.ico {
57 | alias /usr/src/robosats/static/assets/images/favicon-96x96.png;
58 | }
59 | }
60 | }
--------------------------------------------------------------------------------
/web/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 | services:
3 | robosats:
4 | build: .
5 | image: recksato/robosats-web:latest
6 | container_name: robosats
7 | restart: always
8 | network_mode: service:tor
9 | volumes:
10 | - ./serve_misc:/serve_misc/
11 | - ./custom_nginx.conf:/etc/nginx/nginx.conf
12 | tor:
13 | build: ../compose/tor
14 | container_name: tor
15 | restart: always
16 | environment:
17 | LOCAL_USER_ID: 1000
18 | LOCAL_GROUP_ID: 1000
19 | ports:
20 | - 80:80
21 | volumes:
22 | - ./tor:/var/lib/tor
23 | - ./torrc:/etc/tor/torrc
--------------------------------------------------------------------------------
/web/readme.md:
--------------------------------------------------------------------------------
1 | # Host a RoboSat web client
2 |
3 | This docker app is intended for hosting a web client for public use. Example the docker nginx server bundled with all static is built in https://github.com/RoboSats/robosats/tree/main/web
4 |
5 | Works similarly to `/nodeapp`, but simpler. It does not use the selfhosted flags nor torify connections to coordinators. The browser itself must support Tor.
6 |
7 | Drop your service vanity key into a new folder named `tor`, make sure the folder is named `roboweb` or edit the `torrc` accordingly.
--------------------------------------------------------------------------------
/web/serve_misc/readme.md:
--------------------------------------------------------------------------------
1 | If you set up a clearnetonion.eu.org clearnet tunnel, you can drop in this folder you clearnetonion_verify_*.txt file.
--------------------------------------------------------------------------------
/web/torrc:
--------------------------------------------------------------------------------
1 | Log notice file /var/log/tor/notices.log
2 |
3 | ## The directory for keeping all the keys/etc. By default, we store
4 | ## things in $HOME/.tor on Unix, and in Application Data\tor on Windows.
5 | DataDirectory /var/lib/tor
6 | DataDirectoryGroupReadable 1
7 |
8 | ## Enable ControlPort
9 | #ControlPort 9051
10 | #CookieAuthentication 1
11 | #CookieAuthFileGroupReadable 1
12 | #CookieAuthFile /var/lib/tor/control_auth_cookie
13 |
14 | # Robosats LND Testnet Onion Service
15 | HiddenServiceDir /var/lib/tor/roboweb/
16 | HiddenServiceVersion 3
17 | HiddenServicePort 80 127.0.0.1:80
--------------------------------------------------------------------------------