├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── build.sbt ├── facade ├── js │ └── src │ │ └── test │ │ ├── resources │ │ └── application.conf │ │ └── scala │ │ └── AkkaConf.scala ├── jvm │ └── src │ │ └── test │ │ ├── resources │ │ ├── akka-long.conf │ │ ├── akka.conf │ │ └── application.conf │ │ └── scala │ │ └── AkkaConf.scala └── shared │ └── src │ ├── main │ └── scala │ │ ├── com │ │ └── typesafe │ │ │ └── config │ │ │ ├── Config.scala │ │ │ ├── ConfigException.scala │ │ │ ├── ConfigMergeable.scala │ │ │ ├── ConfigObject.scala │ │ │ ├── ConfigValue.scala │ │ │ ├── ConfigValueType.scala │ │ │ ├── MemoryUnit.scala │ │ │ └── package.scala │ │ └── org │ │ └── akkajs │ │ └── shocon │ │ └── ConfigLoader.scala │ └── test │ └── scala │ ├── AkkaConfig.scala │ └── SHoconGenericSpec.scala ├── plugin ├── build.sbt ├── project │ ├── Common.scala │ ├── build.properties │ ├── plugins.sbt │ └── project │ │ └── build.properties └── src │ ├── main │ └── scala │ │ └── org │ │ └── akkajs │ │ └── shocon │ │ └── sbtplugin │ │ └── ShoconPlugin.scala │ └── sbt-test │ └── shocon │ └── basic │ ├── build.sbt │ ├── js │ └── src │ │ └── main │ │ └── resources │ │ ├── application.conf │ │ └── reference.conf │ ├── jvm │ └── src │ │ └── main │ │ └── resources │ │ ├── application.conf │ │ └── reference.conf │ ├── lib │ ├── js │ │ └── src │ │ │ └── main │ │ │ └── resources │ │ │ └── reference.conf │ └── jvm │ │ └── src │ │ └── main │ │ └── resources │ │ └── reference.conf │ ├── project │ └── plugins.sbt │ ├── shared │ └── src │ │ └── main │ │ └── scala │ │ └── Main.scala │ └── test ├── project ├── Common.scala ├── build.properties └── plugins.sbt ├── publish.sh ├── scalastyle-config.xml ├── shared └── src │ └── main │ └── scala │ └── org │ └── akkajs │ └── shocon │ ├── ConfigMacroLoader.scala │ ├── ConfigParser.scala │ ├── Extractors.scala │ └── SHocon.scala └── travis ├── setNodeVersion.sh └── testSbtPlugin.sh /.gitignore: -------------------------------------------------------------------------------- 1 | /RUNNING_PID 2 | /logs/ 3 | /project/*-shim.sbt 4 | /project/project/ 5 | /project/target/ 6 | /target/ 7 | target 8 | bin 9 | .ensime* 10 | shared/src/main/scala/Empty.scala 11 | .idea/ 12 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | 3 | os: linux 4 | dist: trusty 5 | jdk: openjdk8 6 | language: scala 7 | 8 | scala: 9 | - "2.13.1" 10 | - "2.12.10" 11 | 12 | jdk: openjdk8 13 | 14 | matrix: 15 | include: 16 | - env: SBT_VERSION=1.3.8 17 | scala: "2.12.10" 18 | 19 | env: 20 | - TRAVIS_NODE_VERSION="10.17.0" 21 | - TRAVIS_NODE_VERSION="12.13.1" 22 | 23 | install: 24 | - source travis/setNodeVersion.sh 25 | 26 | cache: 27 | directories: 28 | - $HOME/.cache 29 | 30 | script: 31 | - sbt ++$TRAVIS_SCALA_VERSION test 32 | - source travis/testSbtPlugin.sh 33 | cache: 34 | directories: 35 | - $HOME/.cache 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SHocon 2 | 3 | [![Build Status](https://travis-ci.org/akka-js/shocon.png?branch=master)](https://travis-ci.org/akka-js/shocon) 4 | 5 | SHocon is a simple, pure-Scala, alternative implementation of the [HOCON](https://github.com/typesafehub/config/blob/master/HOCON.md) 6 | specification. 7 | 8 | SHocon ships with a native, Scala-idiomatic API, and a shim that mimics the [Typesafe Config](https://github.com/typesafehub/config) Java API, making it well-suited as a **drop-in replacement** wherever the Java implementation is not available, such as **Scala.JS** or **Scala Native** projects. 9 | 10 | This implementation does not cover all of the corner cases of the original implementation. Issues and PRs are welcome! 11 | 12 | ## Usage 13 | 14 | Add these lines to your `project/plugins.sbt`: 15 | ```scala 16 | addSbtPlugin("org.akka-js" % "sbt-shocon" % "1.0.0") 17 | ``` 18 | 19 | and in `build.sbt`: 20 | ```scala 21 | val root = project.in(file(".")) 22 | .enablePlugins(ShoconPlugin) 23 | .settings( 24 | libraryDependencies += "org.akka-js" %% "shocon" % "1.0.0", 25 | // for Scala.js/Native or cross projects use %%% instead: 26 | // libraryDependencies += "org.akka-js" %%% "shocon" % "1.0.0" 27 | 28 | // add dependency on shocon file generation task 29 | // (not required, but otherwise you need to call shoconConcat manually before compilation!) 30 | compile in Compile := (compile in Compile).dependsOn(shoconConcat).value 31 | 32 | /* ... */ 33 | ) 34 | ``` 35 | 36 | ## Credits 37 | 38 | SHocon wouldn't have been possible without the enormous support of the R&D department of UniCredit lead by Riccardo Prodam. Started as a side-project it quickly grew into an important open source milestone. 39 | Check out other projects from the UniCredit team [here](https://github.com/unicredit) 40 | 41 | ## Notes 42 | 43 | ### Scala.Js support 44 | 45 | Starting from shocon `1.0.0` we dropped support for Scala.Js `0.6`, the latest artifact published for Scala.Js `0.6` is Shocon `0.5.0` 46 | 47 | ### Loading of default configuration 48 | In contrast to Typesafe config, which loads configuration files dynamically at run time, shocon compiles the default configuration returned by `ConfigFactory.load()` statically into the the code. This includes all `reference.conf` files found in the `resources` directory of the project itself, as well as all `reference.conf` files found in JARs on which the project depends. If there is an `application.conf` file in the `resources` directory of the project, this one will be included as well (after all `reference.conf` files). 49 | 50 | The resulting HOCON configuration file is assembled in `target/scala-VERSION/shocon.conf`. 51 | 52 | *Note*: For Scala.JS / Native / JVM projects only the `reference.config` files located in either `js/src/main/resources` and `jvm/src/main/resources` are included; files in `shared/src/main/resources/` are ignored! 53 | 54 | Since version `0.3.1` the parse phase is aggressively moved at compile time, please note that runtime parsing cost a lot in terms of performances. 55 | 56 | ### ShoconPlugin settings 57 | You can control the contents of the included default configuration with the following sbt settings: 58 | 59 | * `shoconLoadFromJars`: set to false, if you don't want to include any `reference.conf` files found in JARs 60 | * `shoconFilter: Function1[(String,InputStream), Boolean]`: set this setting to a filter function that return `true` for all configuration files to be included; the first element in the tuple passed to the function is the absolute URL of the configuration file. 61 | -------------------------------------------------------------------------------- /build.sbt: -------------------------------------------------------------------------------- 1 | import xerial.sbt.Sonatype._ 2 | 3 | import sbtcrossproject.CrossPlugin.autoImport.crossProject 4 | 5 | lazy val root = project 6 | .in(file(".")) 7 | .aggregate(parserJS, parserJVM, facadeJS, facadeJVM) 8 | .settings(sonatypeSettings) 9 | 10 | lazy val fixResources = 11 | taskKey[Unit]("Fix application.conf presence on first clean build.") 12 | 13 | lazy val parser = crossProject(JSPlatform, JVMPlatform) 14 | .in(file(".")) 15 | .settings( 16 | name := "shocon-parser", 17 | scalacOptions ++= 18 | Seq( 19 | "-feature", 20 | "-unchecked", 21 | "-language:implicitConversions", 22 | "-deprecation", 23 | ), 24 | publishTo := sonatypePublishTo.value 25 | ) 26 | .settings(sonatypeSettings) 27 | .settings( 28 | fixResources := { 29 | val compileConf = (resourceDirectory in Compile).value / "application.conf" 30 | if (compileConf.exists) 31 | IO.copyFile( 32 | compileConf, 33 | (classDirectory in Compile).value / "application.conf" 34 | ) 35 | val testConf = (resourceDirectory in Test).value / "application.conf" 36 | if (testConf.exists) { 37 | IO.copyFile( 38 | testConf, 39 | (classDirectory in Test).value / "application.conf" 40 | ) 41 | } 42 | }, 43 | compile in Compile := (compile in Compile).dependsOn(fixResources).value, 44 | libraryDependencies ++= Seq( 45 | "org.scala-lang.modules" %%% "scala-collection-compat" % "2.1.4", 46 | "com.lihaoyi" %%% "fastparse" % "2.2.4", 47 | "org.scala-lang" % "scala-reflect" % scalaVersion.value % "provided" 48 | ) 49 | ) 50 | .jsSettings( 51 | libraryDependencies += "org.scala-js" %%% "scalajs-java-time" % "1.0.0", 52 | parallelExecution in Test := true 53 | ) 54 | 55 | lazy val parserJVM = parser.jvm 56 | lazy val parserJS = parser.js 57 | 58 | lazy val facade = crossProject(JSPlatform, JVMPlatform) 59 | .in(file("facade")) 60 | .dependsOn(parser) 61 | .settings( 62 | name := "shocon", 63 | scalacOptions ++= 64 | Seq( 65 | "-feature", 66 | "-unchecked", 67 | "-language:implicitConversions", 68 | "-deprecation", 69 | ), 70 | publishTo := sonatypePublishTo.value 71 | ) 72 | .settings(sonatypeSettings) 73 | .settings( 74 | fixResources := { 75 | val compileConf = (resourceDirectory in Compile).value / "application.conf" 76 | if (compileConf.exists) 77 | IO.copyFile( 78 | compileConf, 79 | (classDirectory in Compile).value / "application.conf" 80 | ) 81 | val testConf = (resourceDirectory in Test).value / "application.conf" 82 | if (testConf.exists) { 83 | IO.copyFile( 84 | testConf, 85 | (classDirectory in Test).value / "application.conf" 86 | ) 87 | } 88 | }, 89 | compile in Compile := (compile in Compile).dependsOn(fixResources).value, 90 | testFrameworks += new TestFramework("utest.runner.Framework"), 91 | libraryDependencies ++= Seq( 92 | "org.scala-lang.modules" %%% "scala-collection-compat" % "2.1.4", 93 | "com.lihaoyi" %%% "fastparse" % "2.2.4", 94 | "com.lihaoyi" %%% "utest" % "0.7.4" % "test", 95 | "org.scala-lang" % "scala-reflect" % scalaVersion.value % "provided" 96 | ) 97 | ) 98 | .jsSettings( 99 | libraryDependencies += "org.scala-js" %%% "scalajs-java-time" % "1.0.0", 100 | parallelExecution in Test := true 101 | ) 102 | 103 | lazy val facadeJVM = facade.jvm 104 | lazy val facadeJS = facade.js 105 | -------------------------------------------------------------------------------- /facade/js/src/test/resources/application.conf: -------------------------------------------------------------------------------- 1 | loaded = "DONE" 2 | -------------------------------------------------------------------------------- /facade/js/src/test/scala/AkkaConf.scala: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import scala.scalajs.js.Dynamic.global 4 | 5 | object AkkaConf { 6 | 7 | def basic: String = """ 8 | 9 | 10 | 11 | 12 | 13 | 14 | # comment 15 | # comment 16 | # comment 17 | # comment 18 | # comment 19 | # comment 20 | # comment 21 | # comment 22 | # comment 23 | # comment 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | akka { 33 | version = "2.0-SNAPSHOT" 34 | # comment 35 | enabled-modules = [] 36 | time-unit = "seconds" 37 | event-handlers = ["akka.event.Logging$DefaultLogger"] 38 | loglevel = "WARNING" 39 | 40 | 41 | , stdout-loglevel = "WARNING" 42 | 43 | , event-handler-dispatcher { 44 | type = "Dispatcher" 45 | # comment 46 | # comment 47 | # comment 48 | # comment 49 | # comment 50 | # comment 51 | # comment 52 | 53 | , name = "EventHandlerDispatcher" 54 | , keep-alive-time = 60 55 | , core-pool-size = 1 56 | , max-pool-size = 8 57 | , executor-bounds = -1 58 | , task-queue-size = -1 59 | , task-queue-type = "linked" 60 | , allow-core-timeout = on 61 | , rejection-policy = "caller-runs" 62 | , throughput = 5 63 | , throughput-deadline-time = -1 64 | , mailbox-capacity = -1 65 | 66 | 67 | 68 | , mailbox-push-timeout-time = -10, 69 | x=1 70 | }, 71 | boot = [] 72 | } 73 | 74 | """ 75 | 76 | def long = 77 | """ 78 | # This is an unmodified akka-reference.conf, except for this 79 | # comment and quoting one key that contained slashes. 80 | # Note: the outer akka{} would not be used in the usual setup 81 | # with this config lib. 82 | 83 | ############################## 84 | # Akka Reference Config File # 85 | ############################## 86 | 87 | # This the reference config file has all the default settings. 88 | # All these could be removed with no visible effect. 89 | # Modify as needed. 90 | # This file is imported in the 'akka.conf' file. Make your edits/overrides there. 91 | 92 | akka { 93 | version = "2.0-SNAPSHOT" # Akka version, checked against the runtime version of Akka. 94 | 95 | enabled-modules = [] # Comma separated list of the enabled modules. Options: ["cluster", "camel", "http"] 96 | 97 | time-unit = "seconds" # Time unit for all timeout properties throughout the config 98 | 99 | event-handlers = ["akka.event.Logging$DefaultLogger"] # Event handlers to register at boot time (Logging$DefaultLogger logs to STDOUT) 100 | loglevel = "WARNING" # Options: ERROR, WARNING, INFO, DEBUG 101 | # this level is used by the configured loggers (see "event-handlers") as soon 102 | # as they have been started; before that, see "stdout-loglevel" 103 | stdout-loglevel = "WARNING" # Loglevel for the very basic logger activated during AkkaApplication startup 104 | 105 | event-handler-dispatcher { 106 | type = "Dispatcher" # Must be one of the following 107 | # Dispatcher, (BalancingDispatcher, only valid when all actors using it are of the same type), 108 | # A FQCN to a class inheriting MessageDispatcherConfigurator with a no-arg visible constructor 109 | name = "EventHandlerDispatcher" # Optional, will be a generated UUID if omitted 110 | keep-alive-time = 60 # Keep alive time for threads 111 | core-pool-size = 1 # No of core threads 112 | max-pool-size = 8 # Max no of threads 113 | executor-bounds = -1 # Makes the Executor bounded, -1 is unbounded 114 | task-queue-size = -1 # Specifies the bounded capacity of the task queue (< 1 == unbounded) 115 | task-queue-type = "linked" # Specifies which type of task queue will be used, can be "array" or "linked" (default) 116 | allow-core-timeout = on # Allow core threads to time out 117 | rejection-policy = "caller-runs" # abort, caller-runs, discard-oldest, discard 118 | throughput = 5 # Throughput for Dispatcher, set to 1 for complete fairness 119 | throughput-deadline-time = -1 # Throughput deadline for Dispatcher, set to 0 or negative for no deadline 120 | mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) 121 | # If positive then a bounded mailbox is used and the capacity is set using the property 122 | # NOTE: setting a mailbox to 'blocking' can be a bit dangerous, could lead to deadlock, use with care 123 | # The following are only used for Dispatcher and only if mailbox-capacity > 0 124 | mailbox-push-timeout-time = 10 # Specifies the timeout to add a new message to a mailbox that is full - negative number means infinite timeout 125 | # (in unit defined by the time-unit property) 126 | } 127 | 128 | # These boot classes are loaded (and created) automatically when the Akka Microkernel boots up 129 | # Can be used to bootstrap your application(s) 130 | # Should be the FQN (Fully Qualified Name) of the boot class which needs to have a default constructor 131 | # boot = ["sample.camel.Boot", 132 | # "sample.rest.java.Boot", 133 | # "sample.rest.scala.Boot", 134 | # "sample.security.Boot"] 135 | boot = [] 136 | 137 | actor { 138 | timeout = 5 # Default timeout for Future based invocations 139 | # - Actor: ask && ? 140 | # - UntypedActor: ask 141 | # - TypedActor: methods with non-void return type 142 | serialize-messages = off # Does a deep clone of (non-primitive) messages to ensure immutability 143 | throughput = 5 # Default throughput for all Dispatcher, set to 1 for complete fairness 144 | throughput-deadline-time = -1 # Default throughput deadline for all Dispatcher, set to 0 or negative for no deadline 145 | dispatcher-shutdown-timeout = 1 # Using the akka.time-unit, how long dispatchers by default will wait for new actors until they shut down 146 | 147 | deployment { 148 | 149 | "/app/service-ping" { # deployment id pattern 150 | 151 | router = "round-robin" # routing (load-balance) scheme to use 152 | # available: "direct", "round-robin", "random", "scatter-gather" 153 | # "least-cpu", "least-ram", "least-messages" 154 | # or: fully qualified class name of the router class 155 | # default is "direct"; 156 | # if 'replication' is used then the only available router is "direct" 157 | 158 | nr-of-instances = 3 # number of actor instances in the cluster 159 | # available: positive integer (1-N) or the string "auto" for auto-scaling 160 | # default is '1' 161 | # if the "direct" router is used then this element is ignored (always '1') 162 | 163 | #create-as { 164 | # class = "com.biz.app.MyActor" # FIXME document 'create-as' 165 | #} 166 | 167 | remote { 168 | nodes = ["wallace:2552", "gromit:2552"] # A list of hostnames and ports for instantiating the remote actor instances 169 | # The format should be on "hostname:port", where: 170 | # - hostname can be either hostname or IP address the remote actor should connect to 171 | # - port should be the port for the remote server on the other node 172 | } 173 | 174 | #cluster { # defines the actor as a clustered actor 175 | # default (if omitted) is local non-clustered actor 176 | 177 | # preferred-nodes = ["node:node1"] # a list of preferred nodes for instantiating the actor instances on 178 | # defined as node name 179 | # available: "node:" 180 | 181 | 182 | # replication { # use replication or not? only makes sense for a stateful actor 183 | 184 | # FIXME should we have this config option here? If so, implement it all through. 185 | # serialize-mailbox = off # should the actor mailbox be part of the serialized snapshot? 186 | # default is 'off' 187 | 188 | # storage = "transaction-log" # storage model for replication 189 | # available: "transaction-log" and "data-grid" 190 | # default is "transaction-log" 191 | 192 | # strategy = "write-through" # guaranteees for replication 193 | # available: "write-through" and "write-behind" 194 | # default is "write-through" 195 | 196 | # } 197 | #} 198 | } 199 | } 200 | 201 | default-dispatcher { 202 | type = "Dispatcher" # Must be one of the following 203 | # Dispatcher, (BalancingDispatcher, only valid when all actors using it are of the same type), 204 | # A FQCN to a class inheriting MessageDispatcherConfigurator with a no-arg visible constructor 205 | name = "MyDispatcher" # Optional, will be a generated UUID if omitted 206 | keep-alive-time = 60 # Keep alive time for threads 207 | core-pool-size-factor = 8.0 # No of core threads ... ceil(available processors * factor) 208 | max-pool-size-factor = 8.0 # Max no of threads ... ceil(available processors * factor) 209 | executor-bounds = -1 # Makes the Executor bounded, -1 is unbounded 210 | task-queue-size = -1 # Specifies the bounded capacity of the task queue (< 1 == unbounded) 211 | task-queue-type = "linked" # Specifies which type of task queue will be used, can be "array" or "linked" (default) 212 | allow-core-timeout = on # Allow core threads to time out 213 | rejection-policy = "caller-runs" # abort, caller-runs, discard-oldest, discard 214 | throughput = 5 # Throughput for Dispatcher, set to 1 for complete fairness 215 | throughput-deadline-time = -1 # Throughput deadline for Dispatcher, set to 0 or negative for no deadline 216 | mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) 217 | # If positive then a bounded mailbox is used and the capacity is set using the property 218 | # NOTE: setting a mailbox to 'blocking' can be a bit dangerous, could lead to deadlock, use with care 219 | # The following are only used for Dispatcher and only if mailbox-capacity > 0 220 | mailbox-push-timeout-time = 10 # Specifies the timeout to add a new message to a mailbox that is full - negative number means infinite timeout 221 | # (in unit defined by the time-unit property) 222 | } 223 | 224 | debug { 225 | receive = off # enable function of Actor.loggable(), which is to log any received message at DEBUG level 226 | autoreceive = off # enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill and the like) 227 | lifecycle = off # enable DEBUG logging of actor lifecycle changes 228 | } 229 | 230 | mailbox { 231 | 232 | file-based { 233 | directory-path = "./_mb" 234 | max-items = 2147483647 235 | max-size = 2147483647 236 | max-items = 2147483647 237 | max-age = 0 238 | max-journal-size = 16777216 # 16 * 1024 * 1024 239 | max-memory-size = 134217728 # 128 * 1024 * 1024 240 | max-journal-overflow = 10 241 | max-journal-size-absolute = 9223372036854775807 242 | discard-old-when-full = on 243 | keep-journal = on 244 | sync-journal = off 245 | } 246 | 247 | redis { 248 | hostname = "127.0.0.1" 249 | port = 6379 250 | } 251 | 252 | mongodb { 253 | # Any specified collection name will be used as a prefix for collections that use durable mongo mailboxes 254 | uri = "mongodb://localhost/akka.mailbox" # Follow Mongo URI Spec - http://www.mongodb.org/display/DOCS/Connections 255 | 256 | # Configurable timeouts for certain ops 257 | timeout { 258 | read = 3000 # number of milliseconds to wait for a read to succeed before timing out the future 259 | write = 3000 # number of milliseconds to wait for a write to succeed before timing out the future 260 | } 261 | } 262 | 263 | zookeeper { 264 | server-addresses = "localhost:2181" 265 | session-timeout = 60 266 | connection-timeout = 60 267 | blocking-queue = on 268 | } 269 | 270 | beanstalk { 271 | hostname = "127.0.0.1" 272 | port = 11300 273 | reconnect-window = 5 274 | message-submit-delay = 0 275 | message-submit-timeout = 5 276 | message-time-to-live = 120 277 | } 278 | } 279 | 280 | # Entries for pluggable serializers and their bindings. If a binding for a specific class is not found, 281 | # then the default serializer (Java serialization) is used. 282 | # 283 | # serializers { 284 | # java = "akka.serialization.JavaSerializer" 285 | # proto = "akka.testing.ProtobufSerializer" 286 | # sjson = "akka.testing.SJSONSerializer" 287 | # default = "akka.serialization.JavaSerializer" 288 | # } 289 | 290 | # serialization-bindings { 291 | # java = ["akka.serialization.SerializeSpec$Address", 292 | # "akka.serialization.MyJavaSerializableActor", 293 | # "akka.serialization.MyStatelessActorWithMessagesInMailbox", 294 | # "akka.serialization.MyActorWithProtobufMessagesInMailbox"] 295 | # sjson = ["akka.serialization.SerializeSpec$Person"] 296 | # proto = ["com.google.protobuf.Message", 297 | # "akka.actor.ProtobufProtocol$MyMessage"] 298 | # } 299 | } 300 | 301 | remote { 302 | # FIXME rename to transport 303 | layer = "akka.cluster.netty.NettyRemoteSupport" 304 | 305 | secure-cookie = "" # Generate your own with '$AKKA_HOME/scripts/generate_config_with_secure_cookie.sh' 306 | # or using 'akka.util.Crypt.generateSecureCookie' 307 | 308 | remote-daemon-ack-timeout = 30 # Timeout for ACK of cluster operations, lik checking actor out etc. 309 | 310 | use-passive-connections = on # Reuse inbound connections for outbound messages 311 | 312 | failure-detector { # accrual failure detection config 313 | threshold = 8 # defines the failure detector threshold 314 | # A low threshold is prone to generate many wrong suspicions but ensures a 315 | # quick detection in the event of a real crash. Conversely, a high threshold 316 | # generates fewer mistakes but needs more time to detect actual crashes 317 | max-sample-size = 1000 318 | } 319 | 320 | server { 321 | port = 2552 # The default remote server port clients should connect to. Default is 2552 (AKKA) 322 | message-frame-size = 1048576 # Increase this if you want to be able to send messages with large payloads 323 | connection-timeout = 120 # Length in time-unit 324 | require-cookie = off # Should the remote server require that it peers share the same secure-cookie (defined in the 'remote' section)? 325 | untrusted-mode = off # Enable untrusted mode for full security of server managed actors, allows untrusted clients to connect. 326 | backlog = 4096 # Sets the size of the connection backlog 327 | } 328 | 329 | client { 330 | buffering { 331 | retry-message-send-on-failure = false # Should message buffering on remote client error be used (buffer flushed on successful reconnect) 332 | capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) 333 | # If positive then a bounded mailbox is used and the capacity is set using the property 334 | } 335 | reconnect-delay = 5 336 | read-timeout = 3600 337 | message-frame-size = 1048576 338 | reap-futures-delay = 5 339 | reconnection-time-window = 600 # Maximum time window that a client should try to reconnect for 340 | } 341 | } 342 | 343 | cluster { 344 | name = "test-cluster" 345 | zookeeper-server-addresses = "localhost:2181" # comma-separated list of ':' elements 346 | max-time-to-wait-until-connected = 30 347 | session-timeout = 60 348 | connection-timeout = 60 349 | include-ref-node-in-replica-set = on # Can a replica be instantiated on the same node as the cluster reference to the actor 350 | # Default: on 351 | log-directory = "_akka_cluster" # Where ZooKeeper should store the logs and data files 352 | 353 | replication { 354 | digest-type = "MAC" # Options: CRC32 (cheap & unsafe), MAC (expensive & secure using password) 355 | password = "secret" # FIXME: store open in file? 356 | ensemble-size = 3 357 | quorum-size = 2 358 | snapshot-frequency = 1000 # The number of messages that should be logged between every actor snapshot 359 | timeout = 30 # Timeout for asyncronous (write-behind) operations 360 | } 361 | } 362 | 363 | stm { 364 | fair = on # Should global transactions be fair or non-fair (non fair yield better performance) 365 | max-retries = 1000 366 | timeout = 5 # Default timeout for blocking transactions and transaction set (in unit defined by 367 | # the time-unit property) 368 | write-skew = true 369 | blocking-allowed = false 370 | interruptible = false 371 | speculative = true 372 | quick-release = true 373 | propagation = "requires" 374 | trace-level = "none" 375 | } 376 | 377 | test { 378 | timefactor = "1.0" # factor by which to scale timeouts during tests, e.g. to account for shared build system load 379 | filter-leeway = 3 # time-units EventFilter.intercept waits after the block is finished until all required messages are received 380 | single-expect-default = 3 # time-units to wait in expectMsg and friends outside of within() block by default 381 | } 382 | } 383 | 384 | """ 385 | 386 | } 387 | -------------------------------------------------------------------------------- /facade/jvm/src/test/resources/akka-long.conf: -------------------------------------------------------------------------------- 1 | # This is an unmodified akka-reference.conf, except for this 2 | # comment and quoting one key that contained slashes. 3 | # Note: the outer akka{} would not be used in the usual setup 4 | # with this config lib. 5 | 6 | ############################## 7 | # Akka Reference Config File # 8 | ############################## 9 | 10 | # This the reference config file has all the default settings. 11 | # All these could be removed with no visible effect. 12 | # Modify as needed. 13 | # This file is imported in the 'akka.conf' file. Make your edits/overrides there. 14 | 15 | akka { 16 | version = "2.0-SNAPSHOT" # Akka version, checked against the runtime version of Akka. 17 | 18 | enabled-modules = [] # Comma separated list of the enabled modules. Options: ["cluster", "camel", "http"] 19 | 20 | time-unit = "seconds" # Time unit for all timeout properties throughout the config 21 | 22 | event-handlers = ["akka.event.Logging$DefaultLogger"] # Event handlers to register at boot time (Logging$DefaultLogger logs to STDOUT) 23 | loglevel = "WARNING" # Options: ERROR, WARNING, INFO, DEBUG 24 | # this level is used by the configured loggers (see "event-handlers") as soon 25 | # as they have been started; before that, see "stdout-loglevel" 26 | stdout-loglevel = "WARNING" # Loglevel for the very basic logger activated during AkkaApplication startup 27 | 28 | event-handler-dispatcher { 29 | type = "Dispatcher" # Must be one of the following 30 | # Dispatcher, (BalancingDispatcher, only valid when all actors using it are of the same type), 31 | # A FQCN to a class inheriting MessageDispatcherConfigurator with a no-arg visible constructor 32 | name = "EventHandlerDispatcher" # Optional, will be a generated UUID if omitted 33 | keep-alive-time = 60 # Keep alive time for threads 34 | core-pool-size = 1 # No of core threads 35 | max-pool-size = 8 # Max no of threads 36 | executor-bounds = -1 # Makes the Executor bounded, -1 is unbounded 37 | task-queue-size = -1 # Specifies the bounded capacity of the task queue (< 1 == unbounded) 38 | task-queue-type = "linked" # Specifies which type of task queue will be used, can be "array" or "linked" (default) 39 | allow-core-timeout = on # Allow core threads to time out 40 | rejection-policy = "caller-runs" # abort, caller-runs, discard-oldest, discard 41 | throughput = 5 # Throughput for Dispatcher, set to 1 for complete fairness 42 | throughput-deadline-time = -1 # Throughput deadline for Dispatcher, set to 0 or negative for no deadline 43 | mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) 44 | # If positive then a bounded mailbox is used and the capacity is set using the property 45 | # NOTE: setting a mailbox to 'blocking' can be a bit dangerous, could lead to deadlock, use with care 46 | # The following are only used for Dispatcher and only if mailbox-capacity > 0 47 | mailbox-push-timeout-time = 10 # Specifies the timeout to add a new message to a mailbox that is full - negative number means infinite timeout 48 | # (in unit defined by the time-unit property) 49 | } 50 | 51 | # These boot classes are loaded (and created) automatically when the Akka Microkernel boots up 52 | # Can be used to bootstrap your application(s) 53 | # Should be the FQN (Fully Qualified Name) of the boot class which needs to have a default constructor 54 | # boot = ["sample.camel.Boot", 55 | # "sample.rest.java.Boot", 56 | # "sample.rest.scala.Boot", 57 | # "sample.security.Boot"] 58 | boot = [] 59 | 60 | actor { 61 | timeout = 5 # Default timeout for Future based invocations 62 | # - Actor: ask && ? 63 | # - UntypedActor: ask 64 | # - TypedActor: methods with non-void return type 65 | serialize-messages = off # Does a deep clone of (non-primitive) messages to ensure immutability 66 | throughput = 5 # Default throughput for all Dispatcher, set to 1 for complete fairness 67 | throughput-deadline-time = -1 # Default throughput deadline for all Dispatcher, set to 0 or negative for no deadline 68 | dispatcher-shutdown-timeout = 1 # Using the akka.time-unit, how long dispatchers by default will wait for new actors until they shut down 69 | 70 | deployment { 71 | 72 | "/app/service-ping" { # deployment id pattern 73 | 74 | router = "round-robin" # routing (load-balance) scheme to use 75 | # available: "direct", "round-robin", "random", "scatter-gather" 76 | # "least-cpu", "least-ram", "least-messages" 77 | # or: fully qualified class name of the router class 78 | # default is "direct"; 79 | # if 'replication' is used then the only available router is "direct" 80 | 81 | nr-of-instances = 3 # number of actor instances in the cluster 82 | # available: positive integer (1-N) or the string "auto" for auto-scaling 83 | # default is '1' 84 | # if the "direct" router is used then this element is ignored (always '1') 85 | 86 | #create-as { 87 | # class = "com.biz.app.MyActor" # FIXME document 'create-as' 88 | #} 89 | 90 | remote { 91 | nodes = ["wallace:2552", "gromit:2552"] # A list of hostnames and ports for instantiating the remote actor instances 92 | # The format should be on "hostname:port", where: 93 | # - hostname can be either hostname or IP address the remote actor should connect to 94 | # - port should be the port for the remote server on the other node 95 | } 96 | 97 | #cluster { # defines the actor as a clustered actor 98 | # default (if omitted) is local non-clustered actor 99 | 100 | # preferred-nodes = ["node:node1"] # a list of preferred nodes for instantiating the actor instances on 101 | # defined as node name 102 | # available: "node:" 103 | 104 | 105 | # replication { # use replication or not? only makes sense for a stateful actor 106 | 107 | # FIXME should we have this config option here? If so, implement it all through. 108 | # serialize-mailbox = off # should the actor mailbox be part of the serialized snapshot? 109 | # default is 'off' 110 | 111 | # storage = "transaction-log" # storage model for replication 112 | # available: "transaction-log" and "data-grid" 113 | # default is "transaction-log" 114 | 115 | # strategy = "write-through" # guaranteees for replication 116 | # available: "write-through" and "write-behind" 117 | # default is "write-through" 118 | 119 | # } 120 | #} 121 | } 122 | } 123 | 124 | default-dispatcher { 125 | type = "Dispatcher" # Must be one of the following 126 | # Dispatcher, (BalancingDispatcher, only valid when all actors using it are of the same type), 127 | # A FQCN to a class inheriting MessageDispatcherConfigurator with a no-arg visible constructor 128 | name = "MyDispatcher" # Optional, will be a generated UUID if omitted 129 | keep-alive-time = 60 # Keep alive time for threads 130 | core-pool-size-factor = 8.0 # No of core threads ... ceil(available processors * factor) 131 | max-pool-size-factor = 8.0 # Max no of threads ... ceil(available processors * factor) 132 | executor-bounds = -1 # Makes the Executor bounded, -1 is unbounded 133 | task-queue-size = -1 # Specifies the bounded capacity of the task queue (< 1 == unbounded) 134 | task-queue-type = "linked" # Specifies which type of task queue will be used, can be "array" or "linked" (default) 135 | allow-core-timeout = on # Allow core threads to time out 136 | rejection-policy = "caller-runs" # abort, caller-runs, discard-oldest, discard 137 | throughput = 5 # Throughput for Dispatcher, set to 1 for complete fairness 138 | throughput-deadline-time = -1 # Throughput deadline for Dispatcher, set to 0 or negative for no deadline 139 | mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) 140 | # If positive then a bounded mailbox is used and the capacity is set using the property 141 | # NOTE: setting a mailbox to 'blocking' can be a bit dangerous, could lead to deadlock, use with care 142 | # The following are only used for Dispatcher and only if mailbox-capacity > 0 143 | mailbox-push-timeout-time = 10 # Specifies the timeout to add a new message to a mailbox that is full - negative number means infinite timeout 144 | # (in unit defined by the time-unit property) 145 | } 146 | 147 | debug { 148 | receive = off # enable function of Actor.loggable(), which is to log any received message at DEBUG level 149 | autoreceive = off # enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill and the like) 150 | lifecycle = off # enable DEBUG logging of actor lifecycle changes 151 | } 152 | 153 | mailbox { 154 | 155 | file-based { 156 | directory-path = "./_mb" 157 | max-items = 2147483647 158 | max-size = 2147483647 159 | max-items = 2147483647 160 | max-age = 0 161 | max-journal-size = 16777216 # 16 * 1024 * 1024 162 | max-memory-size = 134217728 # 128 * 1024 * 1024 163 | max-journal-overflow = 10 164 | max-journal-size-absolute = 9223372036854775807 165 | discard-old-when-full = on 166 | keep-journal = on 167 | sync-journal = off 168 | } 169 | 170 | redis { 171 | hostname = "127.0.0.1" 172 | port = 6379 173 | } 174 | 175 | mongodb { 176 | # Any specified collection name will be used as a prefix for collections that use durable mongo mailboxes 177 | uri = "mongodb://localhost/akka.mailbox" # Follow Mongo URI Spec - http://www.mongodb.org/display/DOCS/Connections 178 | 179 | # Configurable timeouts for certain ops 180 | timeout { 181 | read = 3000 # number of milliseconds to wait for a read to succeed before timing out the future 182 | write = 3000 # number of milliseconds to wait for a write to succeed before timing out the future 183 | } 184 | } 185 | 186 | zookeeper { 187 | server-addresses = "localhost:2181" 188 | session-timeout = 60 189 | connection-timeout = 60 190 | blocking-queue = on 191 | } 192 | 193 | beanstalk { 194 | hostname = "127.0.0.1" 195 | port = 11300 196 | reconnect-window = 5 197 | message-submit-delay = 0 198 | message-submit-timeout = 5 199 | message-time-to-live = 120 200 | } 201 | } 202 | 203 | # Entries for pluggable serializers and their bindings. If a binding for a specific class is not found, 204 | # then the default serializer (Java serialization) is used. 205 | # 206 | # serializers { 207 | # java = "akka.serialization.JavaSerializer" 208 | # proto = "akka.testing.ProtobufSerializer" 209 | # sjson = "akka.testing.SJSONSerializer" 210 | # default = "akka.serialization.JavaSerializer" 211 | # } 212 | 213 | # serialization-bindings { 214 | # java = ["akka.serialization.SerializeSpec$Address", 215 | # "akka.serialization.MyJavaSerializableActor", 216 | # "akka.serialization.MyStatelessActorWithMessagesInMailbox", 217 | # "akka.serialization.MyActorWithProtobufMessagesInMailbox"] 218 | # sjson = ["akka.serialization.SerializeSpec$Person"] 219 | # proto = ["com.google.protobuf.Message", 220 | # "akka.actor.ProtobufProtocol$MyMessage"] 221 | # } 222 | } 223 | 224 | remote { 225 | # FIXME rename to transport 226 | layer = "akka.cluster.netty.NettyRemoteSupport" 227 | 228 | secure-cookie = "" # Generate your own with '$AKKA_HOME/scripts/generate_config_with_secure_cookie.sh' 229 | # or using 'akka.util.Crypt.generateSecureCookie' 230 | 231 | remote-daemon-ack-timeout = 30 # Timeout for ACK of cluster operations, lik checking actor out etc. 232 | 233 | use-passive-connections = on # Reuse inbound connections for outbound messages 234 | 235 | failure-detector { # accrual failure detection config 236 | threshold = 8 # defines the failure detector threshold 237 | # A low threshold is prone to generate many wrong suspicions but ensures a 238 | # quick detection in the event of a real crash. Conversely, a high threshold 239 | # generates fewer mistakes but needs more time to detect actual crashes 240 | max-sample-size = 1000 241 | } 242 | 243 | server { 244 | port = 2552 # The default remote server port clients should connect to. Default is 2552 (AKKA) 245 | message-frame-size = 1048576 # Increase this if you want to be able to send messages with large payloads 246 | connection-timeout = 120 # Length in time-unit 247 | require-cookie = off # Should the remote server require that it peers share the same secure-cookie (defined in the 'remote' section)? 248 | untrusted-mode = off # Enable untrusted mode for full security of server managed actors, allows untrusted clients to connect. 249 | backlog = 4096 # Sets the size of the connection backlog 250 | } 251 | 252 | client { 253 | buffering { 254 | retry-message-send-on-failure = false # Should message buffering on remote client error be used (buffer flushed on successful reconnect) 255 | capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) 256 | # If positive then a bounded mailbox is used and the capacity is set using the property 257 | } 258 | reconnect-delay = 5 259 | read-timeout = 3600 260 | message-frame-size = 1048576 261 | reap-futures-delay = 5 262 | reconnection-time-window = 600 # Maximum time window that a client should try to reconnect for 263 | } 264 | } 265 | 266 | cluster { 267 | name = "test-cluster" 268 | zookeeper-server-addresses = "localhost:2181" # comma-separated list of ':' elements 269 | max-time-to-wait-until-connected = 30 270 | session-timeout = 60 271 | connection-timeout = 60 272 | include-ref-node-in-replica-set = on # Can a replica be instantiated on the same node as the cluster reference to the actor 273 | # Default: on 274 | log-directory = "_akka_cluster" # Where ZooKeeper should store the logs and data files 275 | 276 | replication { 277 | digest-type = "MAC" # Options: CRC32 (cheap & unsafe), MAC (expensive & secure using password) 278 | password = "secret" # FIXME: store open in file? 279 | ensemble-size = 3 280 | quorum-size = 2 281 | snapshot-frequency = 1000 # The number of messages that should be logged between every actor snapshot 282 | timeout = 30 # Timeout for asyncronous (write-behind) operations 283 | } 284 | } 285 | 286 | stm { 287 | fair = on # Should global transactions be fair or non-fair (non fair yield better performance) 288 | max-retries = 1000 289 | timeout = 5 # Default timeout for blocking transactions and transaction set (in unit defined by 290 | # the time-unit property) 291 | write-skew = true 292 | blocking-allowed = false 293 | interruptible = false 294 | speculative = true 295 | quick-release = true 296 | propagation = "requires" 297 | trace-level = "none" 298 | } 299 | 300 | test { 301 | timefactor = "1.0" # factor by which to scale timeouts during tests, e.g. to account for shared build system load 302 | filter-leeway = 3 # time-units EventFilter.intercept waits after the block is finished until all required messages are received 303 | single-expect-default = 3 # time-units to wait in expectMsg and friends outside of within() block by default 304 | } 305 | } 306 | -------------------------------------------------------------------------------- /facade/jvm/src/test/resources/akka.conf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | # comment 8 | # comment 9 | # comment 10 | # comment 11 | # comment 12 | # comment 13 | # comment 14 | # comment 15 | # comment 16 | # comment 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | akka { 26 | version = "2.0-SNAPSHOT" 27 | # comment 28 | enabled-modules = [] 29 | time-unit = "seconds" 30 | event-handlers = ["akka.event.Logging$DefaultLogger"] 31 | loglevel = "WARNING" 32 | 33 | 34 | , stdout-loglevel = "WARNING" 35 | 36 | , event-handler-dispatcher { 37 | type = "Dispatcher" 38 | # comment 39 | # comment 40 | # comment 41 | # comment 42 | # comment 43 | # comment 44 | # comment 45 | 46 | , name = "EventHandlerDispatcher" 47 | , keep-alive-time = 60 48 | , core-pool-size = 1 49 | , max-pool-size = 8 50 | , executor-bounds = -1 51 | , task-queue-size = -1 52 | , task-queue-type = "linked" 53 | , allow-core-timeout = on 54 | , rejection-policy = "caller-runs" 55 | , throughput = 5 56 | , throughput-deadline-time = -1 57 | , mailbox-capacity = -1 58 | 59 | 60 | 61 | , mailbox-push-timeout-time = -10, 62 | x=1 63 | }, 64 | boot = [] 65 | } 66 | -------------------------------------------------------------------------------- /facade/jvm/src/test/resources/application.conf: -------------------------------------------------------------------------------- 1 | loaded = "DONE" 2 | -------------------------------------------------------------------------------- /facade/jvm/src/test/scala/AkkaConf.scala: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | object AkkaConf { 4 | 5 | def basic = 6 | io.Source.fromFile("facade/jvm/src/test/resources/akka.conf").mkString 7 | 8 | def long = 9 | io.Source.fromFile("facade/jvm/src/test/resources/akka-long.conf").mkString 10 | 11 | } 12 | -------------------------------------------------------------------------------- /facade/shared/src/main/scala/com/typesafe/config/Config.scala: -------------------------------------------------------------------------------- 1 | package com.typesafe.config 2 | 3 | import java.util.{concurrent => juc} 4 | import java.{time => jt, util => ju} 5 | 6 | import org.akkajs.shocon 7 | import org.akkajs.shocon.Config.Value 8 | import org.akkajs.shocon.Extractor 9 | 10 | import scala.jdk.CollectionConverters._ 11 | import scala.collection.compat._ 12 | import scala.collection.mutable 13 | import scala.concurrent.duration._ 14 | import scala.language.experimental.macros 15 | 16 | object ConfigFactory { 17 | 18 | import org.akkajs.shocon.ConfigLoader 19 | 20 | def parseString(s: String): Config = macro ConfigLoader.loadFromString 21 | 22 | def load(): Config = macro ConfigLoader.loadDefaultImpl 23 | 24 | def load(cl: ClassLoader): Config = macro ConfigLoader.loadDefaultImplCL 25 | 26 | def defaultReference(): Config = macro ConfigLoader.loadDefaultImpl 27 | 28 | def defaultReference(cl: ClassLoader): Config = macro ConfigLoader.loadDefaultImplCL 29 | 30 | def empty() = Config(shocon.Config.gen("{}")) 31 | 32 | def parseMap(values: java.util.Map[String, Any]) = 33 | Config(shocon.Config.Object.fromPairs(values.asScala.map{ 34 | case (k, v) => k -> shocon.Config.StringLiteral(v.toString) 35 | }.toSeq)) 36 | 37 | def load(conf: Config): Config = conf 38 | } 39 | 40 | case class Config(cfg: shocon.Config.Value) { self => 41 | import shocon.ConfigOps 42 | import shocon.Extractors._ 43 | 44 | val fallbackStack: mutable.Queue[shocon.Config.Value] = mutable.Queue(cfg) 45 | 46 | def this() = { 47 | this(shocon.Config.gen("{}")) 48 | } 49 | 50 | def root() = { 51 | new ConfigObject() { 52 | val inner = self.cfg 53 | def unwrapped = 54 | cfg.as[shocon.Config.Object].get.unwrapped.toMap.asJava 55 | def entrySet(): ju.Set[ju.Map.Entry[String, ConfigValue]] = 56 | cfg.as[shocon.Config.Object].get.fields.view.mapValues(v => new ConfigValue() { 57 | override val inner: Value = v 58 | }).toMap.asJava.entrySet() 59 | } 60 | } 61 | 62 | def entrySet(): ju.Set[ju.Map.Entry[String, ConfigValue]] = root.entrySet() 63 | 64 | def checkValid(c: Config, paths: String*): Unit = {} 65 | 66 | def resolve(): Config = this 67 | 68 | def withFallback(c: Config) = { 69 | if (c != null) 70 | c.fallbackStack.foreach(fallback => fallbackStack.enqueue(fallback)) 71 | this 72 | } 73 | 74 | def getOrReturnNull[T](path: String)(implicit ev: Extractor[T]): T = { 75 | lazy val res: T = 76 | scala.util.Try { 77 | ev( 78 | fallbackStack 79 | .find(_.get(path).isDefined) 80 | .flatMap(_.get(path)).get 81 | ) 82 | }.toOption.getOrElse(null.asInstanceOf[T]) 83 | 84 | res 85 | } 86 | 87 | def hasPath(path: String): Boolean = 88 | fallbackStack.exists(_.get(path).isDefined) 89 | 90 | def getConfig(path: String): Config = { 91 | try { 92 | val configs = fallbackStack.toSeq 93 | .filter(_.get(path).isDefined) 94 | .map(_.get(path).get) 95 | .filter(_ != null) 96 | .map(Config(_)) 97 | .filter(_ != null) 98 | 99 | val config = configs(0) 100 | configs.tail.foreach{ c => 101 | config.withFallback(c) 102 | } 103 | config 104 | } catch { 105 | case _ : Throwable => 106 | null.asInstanceOf[Config] 107 | } 108 | } 109 | 110 | def getString(path: String) = getOrReturnNull[String](path) 111 | 112 | def getBoolean(path: String): Boolean = getOrReturnNull[Boolean](path) 113 | 114 | def getInt(path: String) = getOrReturnNull[Int](path) 115 | 116 | def getLong(path: String) = getOrReturnNull[Long](path) 117 | 118 | def getDouble(path: String) = getOrReturnNull[Double](path) 119 | 120 | def getBytes(path: String): Long = { 121 | val bytesValue = getString(path) 122 | parseBytes(bytesValue, path) 123 | } 124 | 125 | /** 126 | * Parses a size-in-bytes string. If no units are specified in the string, 127 | * it is assumed to be in bytes. The returned value is in bytes. The purpose 128 | * of this function is to implement the size-in-bytes-related methods in the 129 | * Config interface. 130 | * 131 | * @param input 132 | * the string to parse 133 | * @param pathForException 134 | * path to include in exceptions 135 | * @return size in bytes 136 | * @throws ConfigException 137 | * if string is invalid 138 | */ 139 | def parseBytes(input: String, pathForException: String): Long = { 140 | val s: String = unicodeTrim(input) 141 | val unitString: String = getUnits(s) 142 | val numberString: String = unicodeTrim( 143 | s.substring(0, s.length() - unitString.length())) 144 | 145 | // this would be caught later anyway, but the error message 146 | // is more helpful if we check it here. 147 | if (numberString.length() == 0) { 148 | throw ConfigException.BadValue(pathForException) 149 | } 150 | val units: Option[MemoryUnit] = MemoryUnit.parseUnit(unitString) 151 | 152 | if (units.isEmpty) { 153 | throw ConfigException.BadValue(pathForException) 154 | } 155 | 156 | try { 157 | val unitBytes = units.get.bytes 158 | val result: BigInt = 159 | // if the string is purely digits, parse as an integer to avoid 160 | // possible precision loss; otherwise as a double. 161 | if (numberString.matches("[0-9]+")) { 162 | unitBytes * BigInt(numberString) 163 | } else { 164 | val resultDecimal: BigDecimal = BigDecimal(unitBytes) * BigDecimal( 165 | numberString) 166 | resultDecimal.toBigInt 167 | } 168 | 169 | if (result.bitLength < 64) { 170 | result.longValue 171 | } else { 172 | throw ConfigException.BadValue(pathForException) 173 | } 174 | } catch { 175 | case e: NumberFormatException => 176 | throw ConfigException.BadValue(pathForException) 177 | } 178 | } 179 | 180 | def getStringList(path: String): ju.List[String] = 181 | getOrReturnNull[ju.List[String]](path) match { 182 | case null => List[String]().asJava 183 | case ret => ret 184 | } 185 | 186 | def getConfigList(path: String): ju.List[Config] = 187 | getOrReturnNull[ju.List[shocon.Config.Value]](path) match { 188 | case null => List[Config]().asJava 189 | case ret => ret.asScala.map(Config).asJava 190 | } 191 | 192 | def getDuration(path: String, unit: TimeUnit): Long = { 193 | val durationValue = getString(path) 194 | val nanos = parseDurationAsNanos(durationValue) 195 | unit.convert(nanos, juc.TimeUnit.NANOSECONDS) 196 | } 197 | 198 | def getDuration(path: String): jt.Duration = { 199 | val durationValue = getString(path) 200 | val nanos = parseDurationAsNanos(durationValue) 201 | return jt.Duration.ofNanos(nanos) 202 | } 203 | 204 | def parseDurationAsNanos(input: String): Long = { 205 | import juc.TimeUnit._ 206 | 207 | val s: String = unicodeTrim(input) 208 | val originalUnitString: String = getUnits(s) 209 | var unitString: String = originalUnitString 210 | val numberString: String = unicodeTrim( 211 | s.substring(0, s.length - unitString.length)) 212 | 213 | if (numberString.length == 0) 214 | throw new ConfigException.BadValue( 215 | "No number in duration value '" + input + "'") 216 | if (unitString.length > 2 && !unitString.endsWith("s")) 217 | unitString = unitString + "s" 218 | 219 | val units = unitString match { 220 | case "" | "ms" | "millis" | "milliseconds" => MILLISECONDS 221 | case "us" | "micros" | "microseconds" => MICROSECONDS 222 | case "d" | "days" => DAYS 223 | case "h" | "hours" => HOURS 224 | case "s" | "seconds" => SECONDS 225 | case "m" | "minutes" => MINUTES 226 | case _ => 227 | throw new ConfigException.BadValue( 228 | "Could not parse time unit '" + originalUnitString + "' (try ns, us, ms, s, m, h, d)") 229 | } 230 | 231 | try { 232 | // return here 233 | if (numberString.matches("[0-9]+")) units.toNanos(numberString.toLong) 234 | else (numberString.toDouble * units.toNanos(1)).toLong 235 | } catch { 236 | case e: NumberFormatException => { 237 | throw new ConfigException.BadValue( 238 | "Could not parse duration number '" + numberString + "'") 239 | } 240 | } 241 | } 242 | 243 | def unicodeTrim(s: String) = s.trim() 244 | 245 | private def getUnits(s: String): String = { 246 | var i: Int = s.length - 1 247 | while (i >= 0) { 248 | val c: Char = s.charAt(i) 249 | if (!Character.isLetter(c)) return s.substring(i + 1) 250 | i -= 1 251 | } 252 | return s.substring(i + 1) 253 | } 254 | 255 | private val millis = Set("ms", "millis", "milliseconds") 256 | private val nanos = Set("ns", "nanos", "nanoseconds") 257 | def getMillisDuration(path: String) = { 258 | try { 259 | val res = parseDurationAsNanos(getString(path)) 260 | 261 | Duration(res, NANOSECONDS) 262 | } catch { 263 | case err: Exception => null 264 | } 265 | } 266 | 267 | def getNanosDuration(path: String) = { 268 | val res = getString(path) 269 | val parts = res.split("[ \t]") 270 | assert(parts.size == 2 && (nanos contains parts(1))) 271 | Duration(parts(0).toInt, NANOSECONDS) 272 | } 273 | 274 | } 275 | -------------------------------------------------------------------------------- /facade/shared/src/main/scala/com/typesafe/config/ConfigException.scala: -------------------------------------------------------------------------------- 1 | package com.typesafe.config 2 | 3 | /** 4 | * Created by evacchi on 27/11/15. 5 | */ 6 | 7 | abstract class ConfigException(message: String, cause: Throwable) extends RuntimeException(message,cause) { 8 | def this(message: String) = this(message,null) 9 | } 10 | 11 | object ConfigException { 12 | case class Missing(path: String) extends ConfigException(path) 13 | case class BadValue(path: String) extends ConfigException(path) 14 | case class BugOrBroken(message: String) extends ConfigException(message) 15 | } 16 | -------------------------------------------------------------------------------- /facade/shared/src/main/scala/com/typesafe/config/ConfigMergeable.scala: -------------------------------------------------------------------------------- 1 | package com.typesafe.config 2 | 3 | trait ConfigMergeable { 4 | 5 | def withFallback(other: ConfigMergeable): ConfigMergeable = 6 | throw new Exception("To be implemented") 7 | 8 | } 9 | -------------------------------------------------------------------------------- /facade/shared/src/main/scala/com/typesafe/config/ConfigObject.scala: -------------------------------------------------------------------------------- 1 | package com.typesafe.config 2 | 3 | import java.{util => ju} 4 | 5 | import scala.jdk.CollectionConverters._ 6 | 7 | trait ConfigObject extends ju.AbstractMap[String, ConfigValue] with ConfigValue { 8 | 9 | def toConfig = Config(inner) 10 | def unwrapped: ju.Map[String, Any] 11 | 12 | override def render: String = this.entrySet().asScala.map(kv => kv.getKey+" -> "+kv.getValue).mkString("[", ",", "]") 13 | override def valueType: ConfigValueType = ConfigValueType.OBJECT 14 | 15 | } 16 | -------------------------------------------------------------------------------- /facade/shared/src/main/scala/com/typesafe/config/ConfigValue.scala: -------------------------------------------------------------------------------- 1 | package com.typesafe.config 2 | 3 | import org.akkajs.shocon 4 | 5 | trait ConfigValue extends ConfigMergeable { 6 | val inner: shocon.Config.Value 7 | def render(): String = inner.toString 8 | def valueType(): ConfigValueType = inner match { 9 | case _: shocon.Config.Object => ConfigValueType.OBJECT 10 | case _: shocon.Config.Array => ConfigValueType.LIST 11 | case _: shocon.Config.NumberLiteral => ConfigValueType.NUMBER 12 | case _: shocon.Config.StringLiteral => ConfigValueType.STRING 13 | case _: shocon.Config.BooleanLiteral => ConfigValueType.BOOLEAN 14 | case _: shocon.Config.NullLiteral.type => ConfigValueType.NULL 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /facade/shared/src/main/scala/com/typesafe/config/ConfigValueType.scala: -------------------------------------------------------------------------------- 1 | package com.typesafe.config 2 | 3 | class ConfigValueType private() {} 4 | object ConfigValueType { 5 | val OBJECT, LIST, NUMBER, BOOLEAN, NULL, STRING = new ConfigValueType() 6 | } 7 | -------------------------------------------------------------------------------- /facade/shared/src/main/scala/com/typesafe/config/MemoryUnit.scala: -------------------------------------------------------------------------------- 1 | package com.typesafe.config 2 | 3 | sealed abstract class MemoryUnit(val prefix:String, val powerOf:Int, val power:Int) { 4 | lazy val bytes = BigInt(powerOf).pow(power) 5 | } 6 | 7 | object MemoryUnit { 8 | 9 | case object BYTES extends MemoryUnit("", 1024, 0) 10 | case object KILOBYTES extends MemoryUnit("kilo", 1000, 1) 11 | case object MEGABYTES extends MemoryUnit("mega", 1000, 2) 12 | case object GIGABYTES extends MemoryUnit("giga", 1000, 3) 13 | case object TERABYTES extends MemoryUnit("tera", 1000, 4) 14 | case object PETABYTES extends MemoryUnit("peta", 1000, 5) 15 | case object EXABYTES extends MemoryUnit("exa", 1000, 6) 16 | case object ZETTABYTES extends MemoryUnit("zetta", 1000, 7) 17 | case object YOTTABYTES extends MemoryUnit("yotta", 1000, 8) 18 | case object KIBIBYTES extends MemoryUnit("kibi", 1024, 1) 19 | case object MEBIBYTES extends MemoryUnit("mebi", 1024, 2) 20 | case object GIBIBYTES extends MemoryUnit("gibi", 1024, 3) 21 | case object TEBIBYTES extends MemoryUnit("tebi", 1024, 4) 22 | case object PEBIBYTES extends MemoryUnit("pebi", 1024, 5) 23 | case object EXBIBYTES extends MemoryUnit("exbi", 1024, 6) 24 | case object ZEBIBYTES extends MemoryUnit("zebi", 1024, 7) 25 | case object YOBIBYTES extends MemoryUnit("yobi", 1024, 8) 26 | 27 | val values: Vector[MemoryUnit] = Vector(BYTES, KILOBYTES, MEGABYTES, GIGABYTES, TERABYTES, PETABYTES, 28 | EXABYTES, ZETTABYTES, YOTTABYTES, KIBIBYTES, MEBIBYTES, GIBIBYTES, TEBIBYTES, 29 | PEBIBYTES, EXBIBYTES, ZEBIBYTES, YOBIBYTES) 30 | 31 | 32 | lazy val unitsMap: Map[String, MemoryUnit] = { 33 | val map = Map.newBuilder[String, MemoryUnit] 34 | 35 | MemoryUnit.values.foreach { unit => 36 | map += unit.prefix + "byte" -> unit 37 | map += unit.prefix + "bytes" -> unit 38 | if (unit.prefix.length() == 0) { 39 | map += "b" -> unit 40 | map += "B" -> unit 41 | map += "" -> unit // no unit specified means bytes 42 | } else { 43 | val first = unit.prefix.substring(0, 1) 44 | val firstUpper = first.toUpperCase() 45 | if (unit.powerOf == 1024) { 46 | map += first -> unit // 512m 47 | map += firstUpper -> unit // 512M 48 | map += firstUpper + "i" -> unit // 512Mi 49 | map += firstUpper + "iB" -> unit // 512MiB 50 | } else if (unit.powerOf == 1000) { 51 | if (unit.power == 1) { 52 | map += first + "B" -> unit // 512kB 53 | } else { 54 | map += firstUpper + "B" -> unit // 512MB 55 | } 56 | } 57 | } 58 | } 59 | map.result() 60 | } 61 | 62 | 63 | def parseUnit(unit: String):Option[MemoryUnit] = { 64 | unitsMap.get(unit) 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /facade/shared/src/main/scala/com/typesafe/config/package.scala: -------------------------------------------------------------------------------- 1 | package com.typesafe 2 | 3 | package object config { 4 | 5 | implicit def fromStringToShoconConfig(s: String): org.akkajs.shocon.Config.Value = 6 | org.akkajs.shocon.Config.StringLiteral(s) 7 | 8 | implicit def fromShoconConfigToString(cv: org.akkajs.shocon.Config.Value): String = 9 | cv.toString 10 | 11 | } -------------------------------------------------------------------------------- /facade/shared/src/main/scala/org/akkajs/shocon/ConfigLoader.scala: -------------------------------------------------------------------------------- 1 | package org.akkajs.shocon 2 | 3 | import scala.language.experimental.macros 4 | import scala.reflect.macros.blackbox.Context 5 | 6 | object ConfigLoader { 7 | 8 | import org.akkajs.shocon.verboseLog 9 | 10 | /// Loads the content of all config files passed with -Xmacro-settings: 11 | private def loadExplicitConfigFiles(c: Context): Option[String] = 12 | // check if config files to be loaded are defined via macro setting -Xmacro-settings:shocon.files=file1.conf;file2.conf 13 | c.settings.find(_.startsWith("shocon.files=")) 14 | // load these files 15 | .map( _.split("=") match { 16 | case Array(_,paths) => 17 | val (found,notfound) = paths.split(";").toList 18 | .map( new java.io.File(_) ) 19 | .partition( _.canRead ) 20 | 21 | if(notfound.nonEmpty) 22 | c.warning(c.enclosingPosition, s"shocon - could not read configuration files: $notfound") 23 | 24 | c.info(c.enclosingPosition, s"shocon - statically reading configuration from $found", force=false) 25 | found 26 | case _ => Nil 27 | }) 28 | // concat these files into a single string 29 | .map( _.map(scala.io.Source.fromFile(_).getLines.mkString("\n")).mkString("\n\n") ) 30 | 31 | def loadDefault(c: Context) = { 32 | import c.universe._ 33 | 34 | val configStr: String = 35 | // load explicitly defined config files vi -Xmacro-settings:file1.conf;file2.conf;... 36 | loadExplicitConfigFiles(c) 37 | // or else load application.conf 38 | .getOrElse{ 39 | try { 40 | val confPath = new Object {}.getClass 41 | .getResource("/") 42 | .toString + "application.conf" 43 | 44 | c.info(c.enclosingPosition, 45 | s"shocon - statically reading configuration from $confPath", force=false) 46 | 47 | val stream = 48 | new Object {}.getClass.getResourceAsStream("/application.conf") 49 | 50 | scala.io.Source.fromInputStream(stream).getLines.mkString("\n") 51 | } catch { 52 | case e: Throwable => 53 | // we use print instead of c.warning, since multiple warnings at the same c.enclosingPosition seem not to work (?) 54 | println(c.enclosingPosition, s"WARNING: could not load config file: $e") 55 | "{}" 56 | } 57 | } 58 | 59 | c.Expr[com.typesafe.config.Config](q"""{ 60 | com.typesafe.config.Config( 61 | org.akkajs.shocon.Config.gen($configStr) 62 | ) 63 | }""") 64 | } 65 | 66 | 67 | def loadDefaultImpl(c: Context)() = loadDefault(c) 68 | def loadDefaultImplCL(c: Context)(cl: c.Expr[ClassLoader]) = loadDefault(c) 69 | 70 | def loadFromString(c: Context)(s: c.Expr[String]) = { 71 | import c.universe._ 72 | 73 | s.tree match { 74 | case q"""$strLit""" => 75 | strLit match { 76 | case Literal(Constant(str)) => 77 | if (verboseLog) 78 | c.info(c.enclosingPosition, "[shocon-facade] optimized at compile time", false) 79 | 80 | c.Expr[com.typesafe.config.Config](q"""{ 81 | com.typesafe.config.Config( 82 | org.akkajs.shocon.Config.gen(${str.toString}) 83 | ) 84 | }""") 85 | case _ => 86 | if (verboseLog) 87 | c.warning(c.enclosingPosition, "[shocon-facade] fallback to runtime parser") 88 | 89 | c.Expr[com.typesafe.config.Config](q"""{ 90 | com.typesafe.config.Config( 91 | org.akkajs.shocon.Config($strLit) 92 | ) 93 | }""") 94 | } 95 | } 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /facade/shared/src/test/scala/AkkaConfig.scala: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | object AkkaConfig { 4 | 5 | val default = """ 6 | #################################### 7 | # Akka Actor Reference Config File # 8 | #################################### 9 | 10 | # This is the reference config file that contains all the default settings. 11 | # Make your edits/overrides in your application.conf. 12 | 13 | # Akka version, checked against the runtime version of Akka. Loaded from generated conf file. 14 | #include "version" 15 | 16 | akka { 17 | # Home directory of Akka, modules in the deploy directory will be loaded 18 | home = "" 19 | 20 | # Loggers to register at boot time (akka.event.Logging$DefaultLogger logs 21 | # to STDOUT) 22 | #loggers = ["akka.event.Logging$DefaultLogger"] 23 | loggers = ["akka.event.LoggingBusActor"] 24 | 25 | # Filter of log events that is used by the LoggingAdapter before 26 | # publishing log events to the eventStream. It can perform 27 | # fine grained filtering based on the log source. The default 28 | # implementation filters on the `loglevel`. 29 | # FQCN of the LoggingFilter. The Class of the FQCN must implement 30 | # akka.event.LoggingFilter and have a public constructor with 31 | # (akka.actor.ActorSystem.Settings, akka.event.EventStream) parameters. 32 | logging-filter = "akka.event.DefaultLoggingFilter" 33 | 34 | # Specifies the default loggers dispatcher 35 | loggers-dispatcher = "akka.actor.default-dispatcher" 36 | 37 | # Loggers are created and registered synchronously during ActorSystem 38 | # start-up, and since they are actors, this timeout is used to bound the 39 | # waiting time 40 | logger-startup-timeout = 5s 41 | 42 | # Log level used by the configured loggers (see "loggers") as soon 43 | # as they have been started; before that, see "stdout-loglevel" 44 | # Options: OFF, ERROR, WARNING, INFO, DEBUG 45 | loglevel = "INFO" 46 | 47 | # Log level for the very basic logger activated during ActorSystem startup. 48 | # This logger prints the log messages to stdout (System.out). 49 | # Options: OFF, ERROR, WARNING, INFO, DEBUG 50 | stdout-loglevel = "WARNING" 51 | 52 | # Log the complete configuration at INFO level when the actor system is started. 53 | # This is useful when you are uncertain of what configuration is used. 54 | log-config-on-start = off 55 | 56 | # Log at info level when messages are sent to dead letters. 57 | # Possible values: 58 | # on: all dead letters are logged 59 | # off: no logging of dead letters 60 | # n: positive integer, number of dead letters that will be logged 61 | log-dead-letters = 10 62 | 63 | # Possibility to turn off logging of dead letters while the actor system 64 | # is shutting down. Logging is only done when enabled by 'log-dead-letters' 65 | # setting. 66 | log-dead-letters-during-shutdown = on 67 | 68 | # List FQCN of extensions which shall be loaded at actor system startup. 69 | # Library extensions are regular extensions that are loaded at startup and are 70 | # available for third party library authors to enable auto-loading of extensions when 71 | # present on the classpath. This is done by appending entries: 72 | # 'library-extensions += "Extension"' in the library `reference.conf`. 73 | # 74 | # Should not be set by end user applications in 'application.conf', use the extensions property for that 75 | # 76 | library-extensions = [] 77 | 78 | # List FQCN of extensions which shall be loaded at actor system startup. 79 | # Should be on the format: 'extensions = ["foo", "bar"]' etc. 80 | # See the Akka Documentation for more info about Extensions 81 | extensions = [] 82 | 83 | # Toggles whether threads created by this ActorSystem should be daemons or not 84 | daemonic = off 85 | 86 | # JVM shutdown, System.exit(-1), in case of a fatal error, 87 | # such as OutOfMemoryError 88 | jvm-exit-on-fatal-error = on 89 | 90 | actor { 91 | 92 | # FQCN of the ActorRefProvider to be used; the below is the built-in default, 93 | # another one is akka.remote.RemoteActorRefProvider in the akka-remote bundle. 94 | provider = "akka.actor.LocalActorRefProvider" 95 | 96 | # The guardian "/user" will use this class to obtain its supervisorStrategy. 97 | # It needs to be a subclass of akka.actor.SupervisorStrategyConfigurator. 98 | # In addition to the default there is akka.actor.StoppingSupervisorStrategy. 99 | guardian-supervisor-strategy = "akka.actor.DefaultSupervisorStrategy" 100 | 101 | # Timeout for ActorSystem.actorOf 102 | creation-timeout = 20s 103 | 104 | # Serializes and deserializes (non-primitive) messages to ensure immutability, 105 | # this is only intended for testing. 106 | serialize-messages = off 107 | 108 | # Serializes and deserializes creators (in Props) to ensure that they can be 109 | # sent over the network, this is only intended for testing. Purely local deployments 110 | # as marked with deploy.scope == LocalScope are exempt from verification. 111 | serialize-creators = off 112 | 113 | # Timeout for send operations to top-level actors which are in the process 114 | # of being started. This is only relevant if using a bounded mailbox or the 115 | # CallingThreadDispatcher for a top-level actor. 116 | unstarted-push-timeout = 10s 117 | 118 | typed { 119 | # Default timeout for typed actor methods with non-void return type 120 | timeout = 5s 121 | } 122 | 123 | # Mapping between ´deployment.router' short names to fully qualified class names 124 | router.type-mapping { 125 | from-code = "akka.routing.NoRouter" 126 | round-robin-pool = "akka.routing.RoundRobinPool" 127 | round-robin-group = "akka.routing.RoundRobinGroup" 128 | random-pool = "akka.routing.RandomPool" 129 | random-group = "akka.routing.RandomGroup" 130 | balancing-pool = "akka.routing.BalancingPool" 131 | smallest-mailbox-pool = "akka.routing.SmallestMailboxPool" 132 | broadcast-pool = "akka.routing.BroadcastPool" 133 | broadcast-group = "akka.routing.BroadcastGroup" 134 | scatter-gather-pool = "akka.routing.ScatterGatherFirstCompletedPool" 135 | scatter-gather-group = "akka.routing.ScatterGatherFirstCompletedGroup" 136 | tail-chopping-pool = "akka.routing.TailChoppingPool" 137 | tail-chopping-group = "akka.routing.TailChoppingGroup" 138 | consistent-hashing-pool = "akka.routing.ConsistentHashingPool" 139 | consistent-hashing-group = "akka.routing.ConsistentHashingGroup" 140 | } 141 | 142 | deployment { 143 | 144 | # deployment id pattern - on the format: /parent/child etc. 145 | default { 146 | 147 | # The id of the dispatcher to use for this actor. 148 | # If undefined or empty the dispatcher specified in code 149 | # (Props.withDispatcher) is used, or default-dispatcher if not 150 | # specified at all. 151 | dispatcher = "" 152 | 153 | # The id of the mailbox to use for this actor. 154 | # If undefined or empty the default mailbox of the configured dispatcher 155 | # is used or if there is no mailbox configuration the mailbox specified 156 | # in code (Props.withMailbox) is used. 157 | # If there is a mailbox defined in the configured dispatcher then that 158 | # overrides this setting. 159 | mailbox = "" 160 | 161 | # routing (load-balance) scheme to use 162 | # - available: "from-code", "round-robin", "random", "smallest-mailbox", 163 | # "scatter-gather", "broadcast" 164 | # - or: Fully qualified class name of the router class. 165 | # The class must extend akka.routing.CustomRouterConfig and 166 | # have a public constructor with com.typesafe.config.Config 167 | # and optional akka.actor.DynamicAccess parameter. 168 | # - default is "from-code"; 169 | # Whether or not an actor is transformed to a Router is decided in code 170 | # only (Props.withRouter). The type of router can be overridden in the 171 | # configuration; specifying "from-code" means that the values specified 172 | # in the code shall be used. 173 | # In case of routing, the actors to be routed to can be specified 174 | # in several ways: 175 | # - nr-of-instances: will create that many children 176 | # - routees.paths: will route messages to these paths using ActorSelection, 177 | # i.e. will not create children 178 | # - resizer: dynamically resizable number of routees as specified in 179 | # resizer below 180 | router = "from-code" 181 | 182 | # number of children to create in case of a router; 183 | # this setting is ignored if routees.paths is given 184 | nr-of-instances = 1 185 | 186 | # within is the timeout used for routers containing future calls 187 | within = 5 seconds 188 | 189 | # number of virtual nodes per node for consistent-hashing router 190 | virtual-nodes-factor = 10 191 | 192 | tail-chopping-router { 193 | # interval is duration between sending message to next routee 194 | interval = 10 milliseconds 195 | } 196 | 197 | routees { 198 | # Alternatively to giving nr-of-instances you can specify the full 199 | # paths of those actors which should be routed to. This setting takes 200 | # precedence over nr-of-instances 201 | paths = [] 202 | } 203 | 204 | # To use a dedicated dispatcher for the routees of the pool you can 205 | # define the dispatcher configuration inline with the property name 206 | # 'pool-dispatcher' in the deployment section of the router. 207 | # For example: 208 | # pool-dispatcher { 209 | # fork-join-executor.parallelism-min = 5 210 | # fork-join-executor.parallelism-max = 5 211 | # } 212 | 213 | # Routers with dynamically resizable number of routees; this feature is 214 | # enabled by including (parts of) this section in the deployment 215 | resizer { 216 | 217 | enabled = off 218 | 219 | # The fewest number of routees the router should ever have. 220 | lower-bound = 1 221 | 222 | # The most number of routees the router should ever have. 223 | # Must be greater than or equal to lower-bound. 224 | upper-bound = 10 225 | 226 | # Threshold used to evaluate if a routee is considered to be busy 227 | # (under pressure). Implementation depends on this value (default is 1). 228 | # 0: number of routees currently processing a message. 229 | # 1: number of routees currently processing a message has 230 | # some messages in mailbox. 231 | # > 1: number of routees with at least the configured pressure-threshold 232 | # messages in their mailbox. Note that estimating mailbox size of 233 | # default UnboundedMailbox is O(N) operation. 234 | pressure-threshold = 1 235 | 236 | # Percentage to increase capacity whenever all routees are busy. 237 | # For example, 0.2 would increase 20% (rounded up), i.e. if current 238 | # capacity is 6 it will request an increase of 2 more routees. 239 | rampup-rate = 0.2 240 | 241 | # Minimum fraction of busy routees before backing off. 242 | # For example, if this is 0.3, then we'll remove some routees only when 243 | # less than 30% of routees are busy, i.e. if current capacity is 10 and 244 | # 3 are busy then the capacity is unchanged, but if 2 or less are busy 245 | # the capacity is decreased. 246 | # Use 0.0 or negative to avoid removal of routees. 247 | backoff-threshold = 0.3 248 | 249 | # Fraction of routees to be removed when the resizer reaches the 250 | # backoffThreshold. 251 | # For example, 0.1 would decrease 10% (rounded up), i.e. if current 252 | # capacity is 9 it will request an decrease of 1 routee. 253 | backoff-rate = 0.1 254 | 255 | # Number of messages between resize operation. 256 | # Use 1 to resize before each message. 257 | messages-per-resize = 10 258 | } 259 | 260 | # Routers with dynamically resizable number of routees based on 261 | # performance metrics. 262 | # This feature is enabled by including (parts of) this section in 263 | # the deployment, cannot be enabled together with default resizer. 264 | optimal-size-exploring-resizer { 265 | 266 | enabled = off 267 | 268 | # The fewest number of routees the router should ever have. 269 | lower-bound = 1 270 | 271 | # The most number of routees the router should ever have. 272 | # Must be greater than or equal to lower-bound. 273 | upper-bound = 10 274 | 275 | # probability of doing a ramping down when all routees are busy 276 | # during exploration. 277 | chance-of-ramping-down-when-full = 0.2 278 | 279 | # Interval between each resize attempt 280 | action-interval = 5s 281 | 282 | # If the routees have not been fully utilized (i.e. all routees busy) 283 | # for such length, the resizer will downsize the pool. 284 | downsize-after-underutilized-for = 72h 285 | 286 | # Duration exploration, the ratio between the largest step size and 287 | # current pool size. E.g. if the current pool size is 50, and the 288 | # explore-step-size is 0.1, the maximum pool size change during 289 | # exploration will be +- 5 290 | explore-step-size = 0.1 291 | 292 | # Probabily of doing an exploration v.s. optmization. 293 | chance-of-exploration = 0.4 294 | 295 | # When downsizing after a long streak of underutilization, the resizer 296 | # will downsize the pool to the highest utiliziation multiplied by a 297 | # a downsize rasio. This downsize ratio determines the new pools size 298 | # in comparison to the highest utilization. 299 | # E.g. if the highest utilization is 10, and the down size ratio 300 | # is 0.8, the pool will be downsized to 8 301 | downsize-ratio = 0.8 302 | 303 | # When optimizing, the resizer only considers the sizes adjacent to the 304 | # current size. This number indicates how many adjacent sizes to consider. 305 | optimization-range = 16 306 | 307 | # The weight of the latest metric over old metrics when collecting 308 | # performance metrics. 309 | # E.g. if the last processing speed is 10 millis per message at pool 310 | # size 5, and if the new processing speed collected is 6 millis per 311 | # message at pool size 5. Given a weight of 0.3, the metrics 312 | # representing pool size 5 will be 6 * 0.3 + 10 * 0.7, i.e. 8.8 millis 313 | # Obviously, this number should be between 0 and 1. 314 | weight-of-latest-metric = 0.5 315 | } 316 | } 317 | 318 | #/IO-DNS/inet-address { 319 | # mailbox = "unbounded" 320 | # router = "consistent-hashing-pool" 321 | # nr-of-instances = 4 322 | #} 323 | } 324 | 325 | default-dispatcher { 326 | # Must be one of the following 327 | # Dispatcher, PinnedDispatcher, or a FQCN to a class inheriting 328 | # MessageDispatcherConfigurator with a public constructor with 329 | # both com.typesafe.config.Config parameter and 330 | # akka.dispatch.DispatcherPrerequisites parameters. 331 | # PinnedDispatcher must be used together with executor=thread-pool-executor. 332 | type = "Dispatcher" 333 | 334 | # Which kind of ExecutorService to use for this dispatcher 335 | # Valid options: 336 | # - "default-executor" requires a "default-executor" section 337 | # - "fork-join-executor" requires a "fork-join-executor" section 338 | # - "thread-pool-executor" requires a "thread-pool-executor" section 339 | # - A FQCN of a class extending ExecutorServiceConfigurator 340 | executor = "default-executor" 341 | 342 | # This will be used if you have set "executor = "default-executor"". 343 | # If an ActorSystem is created with a given ExecutionContext, this 344 | # ExecutionContext will be used as the default executor for all 345 | # dispatchers in the ActorSystem configured with 346 | # executor = "default-executor". Note that "default-executor" 347 | # is the default value for executor, and therefore used if not 348 | # specified otherwise. If no ExecutionContext is given, 349 | # the executor configured in "fallback" will be used. 350 | default-executor { 351 | fallback = "fork-join-executor" 352 | } 353 | 354 | # This will be used if you have set "executor = "fork-join-executor"" 355 | # Underlying thread pool implementation is scala.concurrent.forkjoin.ForkJoinPool 356 | fork-join-executor { 357 | # Min number of threads to cap factor-based parallelism number to 358 | parallelism-min = 8 359 | 360 | # The parallelism factor is used to determine thread pool size using the 361 | # following formula: ceil(available processors * factor). Resulting size 362 | # is then bounded by the parallelism-min and parallelism-max values. 363 | parallelism-factor = 3.0 364 | 365 | # Max number of threads to cap factor-based parallelism number to 366 | parallelism-max = 64 367 | 368 | # Setting to "FIFO" to use queue like peeking mode which "poll" or "LIFO" to use stack 369 | # like peeking mode which "pop". 370 | task-peeking-mode = "FIFO" 371 | } 372 | 373 | # This will be used if you have set "executor = "thread-pool-executor"" 374 | # Underlying thread pool implementation is java.util.concurrent.ThreadPoolExecutor 375 | thread-pool-executor { 376 | # Keep alive time for threads 377 | keep-alive-time = 60s 378 | 379 | # Define a fixed thread pool size with this property. The corePoolSize 380 | # and the maximumPoolSize of the ThreadPoolExecutor will be set to this 381 | # value, if it is defined. Then the other pool-size properties will not 382 | # be used. 383 | # 384 | # Valid values are: `off` or a positive integer. 385 | fixed-pool-size = off 386 | 387 | # Min number of threads to cap factor-based corePoolSize number to 388 | core-pool-size-min = 8 389 | 390 | # The core-pool-size-factor is used to determine corePoolSize of the 391 | # ThreadPoolExecutor using the following formula: 392 | # ceil(available processors * factor). 393 | # Resulting size is then bounded by the core-pool-size-min and 394 | # core-pool-size-max values. 395 | core-pool-size-factor = 3.0 396 | 397 | # Max number of threads to cap factor-based corePoolSize number to 398 | core-pool-size-max = 64 399 | 400 | # Minimum number of threads to cap factor-based maximumPoolSize number to 401 | max-pool-size-min = 8 402 | 403 | # The max-pool-size-factor is used to determine maximumPoolSize of the 404 | # ThreadPoolExecutor using the following formula: 405 | # ceil(available processors * factor) 406 | # The maximumPoolSize will not be less than corePoolSize. 407 | # It is only used if using a bounded task queue. 408 | max-pool-size-factor = 3.0 409 | 410 | # Max number of threads to cap factor-based maximumPoolSize number to 411 | max-pool-size-max = 64 412 | 413 | # Specifies the bounded capacity of the task queue (< 1 == unbounded) 414 | task-queue-size = -1 415 | 416 | # Specifies which type of task queue will be used, can be "array" or 417 | # "linked" (default) 418 | task-queue-type = "linked" 419 | 420 | # Allow core threads to time out 421 | allow-core-timeout = on 422 | } 423 | 424 | # How long time the dispatcher will wait for new actors until it shuts down 425 | shutdown-timeout = 1s 426 | 427 | # Throughput defines the number of messages that are processed in a batch 428 | # before the thread is returned to the pool. Set to 1 for as fair as possible. 429 | throughput = 5 430 | 431 | # Throughput deadline for Dispatcher, set to 0 or negative for no deadline 432 | throughput-deadline-time = 0ms 433 | 434 | # For BalancingDispatcher: If the balancing dispatcher should attempt to 435 | # schedule idle actors using the same dispatcher when a message comes in, 436 | # and the dispatchers ExecutorService is not fully busy already. 437 | attempt-teamwork = on 438 | 439 | # If this dispatcher requires a specific type of mailbox, specify the 440 | # fully-qualified class name here; the actually created mailbox will 441 | # be a subtype of this type. The empty string signifies no requirement. 442 | mailbox-requirement = "" 443 | } 444 | 445 | default-mailbox { 446 | # FQCN of the MailboxType. The Class of the FQCN must have a public 447 | # constructor with 448 | # (akka.actor.ActorSystem.Settings, com.typesafe.config.Config) parameters. 449 | mailbox-type = "akka.dispatch.UnboundedMailbox" 450 | 451 | # If the mailbox is bounded then it uses this setting to determine its 452 | # capacity. The provided value must be positive. 453 | # NOTICE: 454 | # Up to version 2.1 the mailbox type was determined based on this setting; 455 | # this is no longer the case, the type must explicitly be a bounded mailbox. 456 | mailbox-capacity = 1000 457 | 458 | # If the mailbox is bounded then this is the timeout for enqueueing 459 | # in case the mailbox is full. Negative values signify infinite 460 | # timeout, which should be avoided as it bears the risk of dead-lock. 461 | mailbox-push-timeout-time = 10s 462 | 463 | # For Actor with Stash: The default capacity of the stash. 464 | # If negative (or zero) then an unbounded stash is used (default) 465 | # If positive then a bounded stash is used and the capacity is set using 466 | # the property 467 | stash-capacity = -1 468 | } 469 | 470 | mailbox { 471 | # Mapping between message queue semantics and mailbox configurations. 472 | # Used by akka.dispatch.RequiresMessageQueue[T] to enforce different 473 | # mailbox types on actors. 474 | # If your Actor implements RequiresMessageQueue[T], then when you create 475 | # an instance of that actor its mailbox type will be decided by looking 476 | # up a mailbox configuration via T in this mapping 477 | requirements { 478 | "akka.dispatch.UnboundedMessageQueueSemantics" = 479 | akka.actor.mailbox.unbounded-queue-based 480 | "akka.dispatch.BoundedMessageQueueSemantics" = 481 | akka.actor.mailbox.bounded-queue-based 482 | "akka.dispatch.DequeBasedMessageQueueSemantics" = 483 | akka.actor.mailbox.unbounded-deque-based 484 | "akka.dispatch.UnboundedDequeBasedMessageQueueSemantics" = 485 | akka.actor.mailbox.unbounded-deque-based 486 | "akka.dispatch.BoundedDequeBasedMessageQueueSemantics" = 487 | akka.actor.mailbox.bounded-deque-based 488 | "akka.dispatch.MultipleConsumerSemantics" = 489 | akka.actor.mailbox.unbounded-queue-based 490 | "akka.dispatch.ControlAwareMessageQueueSemantics" = 491 | akka.actor.mailbox.unbounded-control-aware-queue-based 492 | "akka.dispatch.UnboundedControlAwareMessageQueueSemantics" = 493 | akka.actor.mailbox.unbounded-control-aware-queue-based 494 | "akka.dispatch.BoundedControlAwareMessageQueueSemantics" = 495 | akka.actor.mailbox.bounded-control-aware-queue-based 496 | "akka.event.LoggerMessageQueueSemantics" = 497 | akka.actor.mailbox.logger-queue 498 | } 499 | 500 | unbounded-queue-based { 501 | # FQCN of the MailboxType, The Class of the FQCN must have a public 502 | # constructor with (akka.actor.ActorSystem.Settings, 503 | # com.typesafe.config.Config) parameters. 504 | mailbox-type = "akka.dispatch.UnboundedMailbox" 505 | } 506 | 507 | bounded-queue-based { 508 | # FQCN of the MailboxType, The Class of the FQCN must have a public 509 | # constructor with (akka.actor.ActorSystem.Settings, 510 | # com.typesafe.config.Config) parameters. 511 | mailbox-type = "akka.dispatch.BoundedMailbox" 512 | } 513 | 514 | unbounded-deque-based { 515 | # FQCN of the MailboxType, The Class of the FQCN must have a public 516 | # constructor with (akka.actor.ActorSystem.Settings, 517 | # com.typesafe.config.Config) parameters. 518 | mailbox-type = "akka.dispatch.UnboundedDequeBasedMailbox" 519 | } 520 | 521 | bounded-deque-based { 522 | # FQCN of the MailboxType, The Class of the FQCN must have a public 523 | # constructor with (akka.actor.ActorSystem.Settings, 524 | # com.typesafe.config.Config) parameters. 525 | mailbox-type = "akka.dispatch.BoundedDequeBasedMailbox" 526 | } 527 | 528 | unbounded-control-aware-queue-based { 529 | # FQCN of the MailboxType, The Class of the FQCN must have a public 530 | # constructor with (akka.actor.ActorSystem.Settings, 531 | # com.typesafe.config.Config) parameters. 532 | mailbox-type = "akka.dispatch.UnboundedControlAwareMailbox" 533 | } 534 | 535 | bounded-control-aware-queue-based { 536 | # FQCN of the MailboxType, The Class of the FQCN must have a public 537 | # constructor with (akka.actor.ActorSystem.Settings, 538 | # com.typesafe.config.Config) parameters. 539 | mailbox-type = "akka.dispatch.BoundedControlAwareMailbox" 540 | } 541 | 542 | # The LoggerMailbox will drain all messages in the mailbox 543 | # when the system is shutdown and deliver them to the StandardOutLogger. 544 | # Do not change this unless you know what you are doing. 545 | logger-queue { 546 | mailbox-type = "akka.event.LoggerMailboxType" 547 | } 548 | } 549 | 550 | debug { 551 | # enable function of Actor.loggable(), which is to log any received message 552 | # at DEBUG level, see the “Testing Actor Systems” section of the Akka 553 | # Documentation at http://akka.io/docs 554 | receive = off 555 | 556 | # enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill et.c.) 557 | autoreceive = off 558 | 559 | # enable DEBUG logging of actor lifecycle changes 560 | lifecycle = off 561 | 562 | # enable DEBUG logging of all LoggingFSMs for events, transitions and timers 563 | fsm = off 564 | 565 | # enable DEBUG logging of subscription changes on the eventStream 566 | event-stream = off 567 | 568 | # enable DEBUG logging of unhandled messages 569 | unhandled = off 570 | 571 | # enable WARN logging of misconfigured routers 572 | router-misconfiguration = off 573 | } 574 | 575 | # Entries for pluggable serializers and their bindings. 576 | serializers { 577 | java = "akka.serialization.JavaSerializer" 578 | bytes = "akka.serialization.ByteArraySerializer" 579 | } 580 | 581 | # Class to Serializer binding. You only need to specify the name of an 582 | # interface or abstract base class of the messages. In case of ambiguity it 583 | # is using the most specific configured class, or giving a warning and 584 | # choosing the “first” one. 585 | # 586 | # To disable one of the default serializers, assign its class to "none", like 587 | # "java.io.Serializable" = none 588 | serialization-bindings { 589 | "[B" = bytes 590 | "java.io.Serializable" = java 591 | } 592 | 593 | # Log warnings when the default Java serialization is used to serialize messages. 594 | # The default serializer uses Java serialization which is not very performant and should not 595 | # be used in production environments unless you don't care about performance. In that case 596 | # you can turn this off. 597 | warn-about-java-serializer-usage = on 598 | 599 | # Configuration namespace of serialization identifiers. 600 | # Each serializer implementation must have an entry in the following format: 601 | # `akka.actor.serialization-identifiers."FQCN" = ID` 602 | # where `FQCN` is fully qualified class name of the serializer implementation 603 | # and `ID` is globally unique serializer identifier number. 604 | # Identifier values from 0 to 16 are reserved for Akka internal usage. 605 | serialization-identifiers { 606 | "akka.serialization.JavaSerializer" = 1 607 | "akka.serialization.ByteArraySerializer" = 4 608 | } 609 | 610 | # Configuration items which are used by the akka.actor.ActorDSL._ methods 611 | dsl { 612 | # Maximum queue size of the actor created by newInbox(); this protects 613 | # against faulty programs which use select() and consistently miss messages 614 | inbox-size = 1000 615 | 616 | # Default timeout to assume for operations like Inbox.receive et al 617 | default-timeout = 5s 618 | } 619 | } 620 | 621 | # Used to set the behavior of the scheduler. 622 | # Changing the default values may change the system behavior drastically so make 623 | # sure you know what you're doing! See the Scheduler section of the Akka 624 | # Documentation for more details. 625 | scheduler { 626 | # The LightArrayRevolverScheduler is used as the default scheduler in the 627 | # system. It does not execute the scheduled tasks on exact time, but on every 628 | # tick, it will run everything that is (over)due. You can increase or decrease 629 | # the accuracy of the execution timing by specifying smaller or larger tick 630 | # duration. If you are scheduling a lot of tasks you should consider increasing 631 | # the ticks per wheel. 632 | # Note that it might take up to 1 tick to stop the Timer, so setting the 633 | # tick-duration to a high value will make shutting down the actor system 634 | # take longer. 635 | tick-duration = 10ms 636 | 637 | # The timer uses a circular wheel of buckets to store the timer tasks. 638 | # This should be set such that the majority of scheduled timeouts (for high 639 | # scheduling frequency) will be shorter than one rotation of the wheel 640 | # (ticks-per-wheel * ticks-duration) 641 | # THIS MUST BE A POWER OF TWO! 642 | ticks-per-wheel = 512 643 | 644 | # This setting selects the timer implementation which shall be loaded at 645 | # system start-up. 646 | # The class given here must implement the akka.actor.Scheduler interface 647 | # and offer a public constructor which takes three arguments: 648 | # 1) com.typesafe.config.Config 649 | # 2) akka.event.LoggingAdapter 650 | # 3) java.util.concurrent.ThreadFactory 651 | #implementation = akka.actor.LightArrayRevolverScheduler 652 | implementation = akka.actor.EventLoopScheduler 653 | 654 | # When shutting down the scheduler, there will typically be a thread which 655 | # needs to be stopped, and this timeout determines how long to wait for 656 | # that to happen. In case of timeout the shutdown of the actor system will 657 | # proceed without running possibly still enqueued tasks. 658 | shutdown-timeout = 5s 659 | } 660 | 661 | } 662 | """ 663 | 664 | import com.typesafe.config.{Config, ConfigFactory} 665 | 666 | val config: Config = ConfigFactory.parseString(default) 667 | 668 | } 669 | -------------------------------------------------------------------------------- /facade/shared/src/test/scala/SHoconGenericSpec.scala: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import java.{util => ju} 4 | 5 | import utest._ 6 | 7 | import scala.util.{Failure, Success} 8 | import com.typesafe.config.{Config, ConfigFactory, ConfigValue} 9 | 10 | import scala.collection.JavaConverters._ 11 | import java.util.concurrent.TimeUnit 12 | 13 | import org.akkajs.shocon 14 | 15 | object SHoconGenericSpec extends TestSuite { 16 | 17 | val tests = Tests { 18 | 19 | 'parseEmptyList - { 20 | val config = ConfigFactory.parseString("""{ "a" : [] }""") 21 | 22 | assert { config != null } 23 | assert { config.hasPath("a") } 24 | 25 | assert { config.getStringList("a").isEmpty } 26 | } 27 | 28 | 'parseBasicValues - { 29 | val config = ConfigFactory.parseString("""{ "a" : "2" }""") 30 | 31 | assert { config != null } 32 | 33 | assert { config.getString("a") == "2" } 34 | assert { config.getInt("a") == 2 } 35 | } 36 | 37 | 'parseStringLiteralsWithSlashes - { 38 | val config = ConfigFactory.parseString("""a = some/path""") 39 | assert { config != null } 40 | assert { config.getString("a") == "some/path" } 41 | } 42 | 43 | 'parseLists - { 44 | val config1 = ConfigFactory.parseString( 45 | """l =[ a 46 | | 47 | | b 48 | | c 49 | | 50 | | d ]""".stripMargin 51 | ) 52 | 53 | val config2 = ConfigFactory.parseString("l = [a,b] \n[c, d]") 54 | 55 | assert { config1 != null && config2 != null } 56 | 57 | assert { config1.getStringList("l") == List("a", "b", "c", "d").asJava } 58 | assert { config2.getStringList("l") == config1.getStringList("l") } 59 | } 60 | 61 | 'parseNestedObjects - { 62 | val config = ConfigFactory.parseString("a = { b = 1 }") 63 | 64 | assert { config != null } 65 | 66 | assert { config.getConfig("a").getInt("b") == 1 } 67 | } 68 | 69 | 'pasreNewLinesIsteadOfCommas - { 70 | val config = ConfigFactory.parseString("""{ 71 | foo = 1 72 | 73 | bar = 2 74 | 75 | baz = 3} 76 | """) 77 | 78 | assert { config != null } 79 | 80 | assert { config.getInt("foo") == 1 } 81 | assert { config.getInt("bar") == 2 } 82 | assert { config.getInt("baz") == 3 } 83 | } 84 | 85 | 'parseConcatenatedValues - { 86 | val config1 = ConfigFactory.parseString("x = {a:1, b: 2}\n {c: 3, d: 4}") 87 | 88 | val config2 = ConfigFactory.parseString("x = {a:1, b: 2\nc: 3, d: 4}") 89 | 90 | assert { config1 != null && config2 != null } 91 | 92 | assert { config1 == config2 } 93 | } 94 | 95 | 'parseAndConcatenateStringValues - { 96 | val config = ConfigFactory.parseString(""" 97 | |x = a b c d 98 | |y = 10 99 | |""".stripMargin) 100 | 101 | assert { config != null } 102 | 103 | assert { config.getString("x") == "a b c d" } 104 | } 105 | 106 | 'parseAkkaConfFiles - { 107 | val basic = ConfigFactory.parseString(AkkaConf.basic) 108 | val long = ConfigFactory.parseString(AkkaConf.long) 109 | 110 | assert { basic != null && long != null } 111 | 112 | assert { basic.getString("akka.version") == "2.0-SNAPSHOT" } 113 | assert { long.getString("akka.version") == "2.0-SNAPSHOT" } 114 | } 115 | 116 | 'parseDurations - { 117 | val config = ConfigFactory.parseString( 118 | """ a { 119 | |x = 1 ms 120 | |}""".stripMargin 121 | ) 122 | 123 | assert { config != null } 124 | 125 | assert { config.getDuration("a.x").toMillis.toLong == 1L } 126 | assert { config.getDuration("a.x", TimeUnit.NANOSECONDS).toLong == 1000000L } 127 | } 128 | 129 | 'parseBytes - { 130 | val config = ConfigFactory.parseString( 131 | """ a { 132 | |b = 9 b 133 | |B = 9 b 134 | |byte = 1 byte 135 | |bytes = 9 bytes 136 | |kB = 9 kB 137 | |kilobyte = 1 kilobyte 138 | |kilobytes = 9 kilobyte 139 | |MB = 9 MB 140 | |megabyte = 1 megabyte 141 | |megabytes = 9 megabytes 142 | |GB = 9 GB 143 | |gigabyte = 1 gigabyte 144 | |gigabytes = 9 gigabytes 145 | |TB = 9 TB 146 | |terabyte = 1 terabyte 147 | |terabytes = 9 terabytes 148 | |PB = 9 PB 149 | |petabyte = 1 petabyte 150 | |petabytes = 9 petabytes 151 | |EB = 9 EB 152 | |K = 1 K 153 | |k = 1 k 154 | |Ki = 1 Ki 155 | |KiB = 1 KiB 156 | |m = 1 m 157 | |M = 1 M 158 | |Mi = 1 Mi 159 | |MiB = 1 MiB 160 | |g = 1 g 161 | |G = 1 G 162 | |Gi = 1 Gi 163 | |GiB = 1 GiB 164 | |}""".stripMargin 165 | ) 166 | 167 | assert { config != null } 168 | 169 | assert { config.getBytes("a.b") == 9L } 170 | assert { config.getBytes("a.B") == 9L } 171 | assert { config.getBytes("a.byte") == 1L } 172 | assert { config.getBytes("a.bytes") == 9L } 173 | assert { config.getBytes("a.kB") == 9000L } 174 | assert { config.getBytes("a.kilobyte") == 1000L } 175 | assert { config.getBytes("a.kilobytes") == 9000L } 176 | assert { config.getBytes("a.MB") == 9000000L } 177 | assert { config.getBytes("a.megabyte") == 1000000L } 178 | assert { config.getBytes("a.megabytes") == 9000000L } 179 | assert { config.getBytes("a.GB") == 9000000000L } 180 | assert { config.getBytes("a.gigabyte") == 1000000000L } 181 | assert { config.getBytes("a.gigabytes") == 9000000000L } 182 | assert { config.getBytes("a.TB") == 9000000000000L } 183 | assert { config.getBytes("a.terabyte") == 1000000000000L } 184 | assert { config.getBytes("a.terabytes") == 9000000000000L } 185 | assert { config.getBytes("a.PB") == 9000000000000000L } 186 | assert { config.getBytes("a.petabyte") == 1000000000000000L } 187 | assert { config.getBytes("a.petabytes") == 9000000000000000L } 188 | assert { config.getBytes("a.k") == 1024L } 189 | assert { config.getBytes("a.K") == 1024L } 190 | assert { config.getBytes("a.Ki") == 1024L } 191 | assert { config.getBytes("a.KiB") == 1024L } 192 | assert { config.getBytes("a.m") == 1024L * 1024L } 193 | assert { config.getBytes("a.M") == 1024L * 1024L } 194 | assert { config.getBytes("a.Mi") == 1024L * 1024L } 195 | assert { config.getBytes("a.MiB") == 1024L * 1024L } 196 | assert { config.getBytes("a.g") == 1024L * 1024L * 1024L } 197 | assert { config.getBytes("a.G") == 1024L * 1024L * 1024L } 198 | assert { config.getBytes("a.Gi") == 1024L * 1024L * 1024L } 199 | assert { config.getBytes("a.GiB") == 1024L * 1024L * 1024L } 200 | 201 | } 202 | 203 | 'parseBooleans - { 204 | val config = ConfigFactory.parseString( 205 | """ a { 206 | |x1 = true 207 | |x2 = on 208 | |x3 = yes 209 | |x4 = false 210 | |x5 = off 211 | |x6 = no 212 | |}""".stripMargin 213 | ) 214 | 215 | assert { config != null } 216 | 217 | assert { config.getBoolean("a.x1") == true } 218 | assert { config.getBoolean("a.x2") == true } 219 | assert { config.getBoolean("a.x3") == true } 220 | assert { config.getBoolean("a.x4") == false } 221 | assert { config.getBoolean("a.x5") == false } 222 | assert { config.getBoolean("a.x6") == false } 223 | } 224 | 225 | 'parseAkkaConfiguration - { 226 | val config = AkkaConfig.config 227 | 228 | assert { config != null } 229 | 230 | config.getConfig("akka").entrySet() 231 | 232 | () 233 | } 234 | 235 | 'reloadConfigWithFallback - { 236 | val config1 = ConfigFactory.parseString("""{ "a" : [] }""") 237 | val config2 = ConfigFactory.parseString("""{ "b" : [] }""") 238 | 239 | assert { config1 != null && config2 != null } 240 | 241 | val config = config1.withFallback(config2).withFallback(config1) 242 | 243 | assert { config.hasPath("a") == true } 244 | assert { config.hasPath("b") == true } 245 | 246 | } 247 | 248 | 'dottedConfigKey - { 249 | val configAkka = 250 | ConfigFactory.parseString("akka.actor.messages = on") 251 | 252 | assert { configAkka.hasPath("akka.actor.messages") == true } 253 | } 254 | 255 | 'dottedConfigKeyWithFallback - { 256 | val configAkka = 257 | ConfigFactory.parseString("akka.actor.debug.event-stream = on").withFallback( 258 | ConfigFactory.parseString(""" 259 | akka.actor.debug.event-stream = off 260 | akka.actor.messages = on 261 | """)) 262 | 263 | assert { configAkka.getBoolean("akka.actor.messages") == true } 264 | } 265 | 266 | 'loadDefaultConfig - { 267 | val config = ConfigFactory.load() 268 | 269 | assert { config != null } 270 | 271 | assert { config.getString("loaded") == "DONE" } 272 | } 273 | 274 | 'unwrappedToStringInMap - { 275 | val config = ConfigFactory.parseString(""" a="b" """) 276 | val map = configToMap(config) 277 | assert { "b" == map("a") } 278 | } 279 | 280 | 'unwrappedNumber - { 281 | val map = ConfigFactory.parseString(""" a=2 """).root.unwrapped 282 | assert { 2 == map.get("a") } 283 | } 284 | 285 | 'unwrappedDuration - { 286 | val map = ConfigFactory.parseString(""" a=2ns """).root.unwrapped 287 | assert { "2ns" == map.get("a") } // Duration is not automatically unwrapped. 288 | } 289 | 290 | 'unwrappedBoolean - { 291 | val map = ConfigFactory.parseString(""" a=true """).root.unwrapped 292 | assert { true == map.get("a") } 293 | } 294 | 295 | 'reparseKey - { 296 | val key = "foo.bar.baz" 297 | val value = shocon.Config.StringLiteral("quux") 298 | 299 | val reparsed = shocon.Config.Object.reparseKey(key, value) 300 | val expected = shocon.Config.Object( 301 | Map( 302 | "foo" -> shocon.Config.Object(Map("bar" -> shocon.Config.Object( 303 | Map("baz" -> shocon.Config.StringLiteral("quux"))))))) 304 | 305 | assert { expected == reparsed } 306 | } 307 | 308 | 'mergeConfigValues - { 309 | val key1 = "foo.bar.baz" 310 | val value1 = shocon.Config.StringLiteral("quux") 311 | val key2 = "foo.bar.bazz" 312 | val value2 = shocon.Config.StringLiteral("quuxxx") 313 | 314 | val reparsed1 = shocon.Config.Object.reparseKey(key1, value1) 315 | val reparsed2 = shocon.Config.Object.reparseKey(key2, value2) 316 | 317 | import shocon.Config.StringLiteral 318 | val merged = shocon.Config.Object.mergeConfigs(reparsed1, reparsed2) 319 | val expected = shocon.Config.Object( 320 | Map( 321 | "foo" -> shocon.Config.Object( 322 | Map( 323 | "bar" -> shocon.Config.Object(Map( 324 | "baz" -> StringLiteral("quux"), 325 | "bazz" -> StringLiteral("quuxxx") 326 | )))))) 327 | 328 | assert { expected == merged } 329 | 330 | } 331 | 332 | 'concatValues - { 333 | val x = ConfigFactory.parseString( 334 | """x="foo" 335 | |y= z "bar" """.stripMargin) 336 | assert { "foo" == x.getString("x") } 337 | assert { "z bar" == x.getString("y") } 338 | } 339 | 340 | 'properlyFallback - { 341 | val conf1 = ConfigFactory.parseString("""x = "1"""") 342 | val conf2 = ConfigFactory.parseString("""x = "2"""") 343 | 344 | val conf = conf1.withFallback(conf2) 345 | assert { "1" == conf.getString("x") } 346 | } 347 | 348 | 'nestedFallback - { 349 | val conf1 = ConfigFactory.parseString( 350 | """ 351 | |lib { 352 | | message = a 353 | | state = true 354 | | value = 10 355 | |} 356 | |""".stripMargin 357 | ) 358 | val conf2 = ConfigFactory.parseString( 359 | """ 360 | |lib { 361 | | message = b 362 | | state = false 363 | |} 364 | |""".stripMargin 365 | ) 366 | val conf3 = ConfigFactory.parseString( 367 | """ 368 | |lib { 369 | | message = c 370 | |} 371 | |""".stripMargin 372 | ) 373 | 374 | val conf21 = conf2.withFallback(conf1) 375 | assert { conf21.getString("lib.message") == "b" } 376 | assert { conf21.getBoolean("lib.state") == false } 377 | assert { conf21.getInt("lib.value") == 10 } 378 | 379 | val conf321 = conf3.withFallback(conf21) 380 | assert { conf321.getString("lib.message") == "c" } 381 | assert { conf321.getBoolean("lib.state") == false } 382 | assert { conf321.getInt("lib.value") == 10 } 383 | } 384 | 385 | 'parseComments - { 386 | val conf = ConfigFactory.parseString( 387 | """ 388 | // ignored 389 | x = "1" 390 | # ignored 391 | y = "foo" 392 | """) 393 | assert { "1" == conf.getString("x") } 394 | assert { "foo" == conf.getString("y") } 395 | } 396 | 397 | 'parseListOfObjects - { 398 | // protect against having this parsed at compile time 399 | var x = "" 400 | val conf = ConfigFactory.parseString( 401 | s""" 402 | $x 403 | x = [{ 404 | foo = 1 405 | }, { 406 | foo = 2 407 | }] 408 | """) 409 | val res = conf.getConfigList("x") 410 | assert(res.size == 2) 411 | assert(res.asScala(0).getInt("foo") == 1) 412 | assert(res.asScala(1).getInt("foo") == 2) 413 | } 414 | 415 | 'parseListOfObjectsStartingNextLine - { 416 | // protect against having this parsed at compile time 417 | var x = "" 418 | val conf = ConfigFactory.parseString( 419 | s""" 420 | $x 421 | x = [ 422 | { 423 | foo = 1 424 | }, { 425 | foo = 2 426 | }] 427 | """) 428 | val res = conf.getConfigList("x") 429 | assert(res.size == 2) 430 | assert(res.asScala(0).getInt("foo") == 1) 431 | assert(res.asScala(1).getInt("foo") == 2) 432 | } 433 | 434 | 'parseListOfObjectsWithNewLineSeparatedObjects - { 435 | // protect against having this parsed at compile time 436 | var x = "" 437 | val conf = ConfigFactory.parseString( 438 | s""" 439 | $x 440 | x = [ 441 | { 442 | foo = 1 443 | }, 444 | { 445 | foo = 2 446 | }] 447 | """) 448 | val res = conf.getConfigList("x") 449 | assert(res.size == 2) 450 | assert(res.asScala(0).getInt("foo") == 1) 451 | assert(res.asScala(1).getInt("foo") == 2) 452 | } 453 | 454 | 'parseListOfObjectsWithTrailingCommas - { 455 | // protect against having this parsed at compile time 456 | var x = "" 457 | val conf = ConfigFactory.parseString( 458 | s""" 459 | $x 460 | x = [ 461 | { 462 | foo = 1 463 | }, 464 | { 465 | foo = 2 466 | }, 467 | ] 468 | """) 469 | val res = conf.getConfigList("x") 470 | assert(res.size == 2) 471 | assert(res.asScala(0).getInt("foo") == 1) 472 | assert(res.asScala(1).getInt("foo") == 2) 473 | } 474 | 475 | 'mergeConfigObjects - { 476 | val conf1 = ConfigFactory.load(ConfigFactory.parseString(""" 477 | akka.stream.materializer.initial-input-buffer-size = 2 478 | akka.stream.materializer.max-input-buffer-size = 2 479 | """)) 480 | val conf2 = ConfigFactory.load(ConfigFactory.parseString(s""" 481 | akka { 482 | stream { 483 | materializer { 484 | creation-timeout = 20s 485 | initial-input-buffer-size = 4 486 | max-input-buffer-size = 16 487 | blocking-io-dispatcher = "akka.stream.default-blocking-io-dispatcher" 488 | dispatcher = "" 489 | subscription-timeout { 490 | mode = cancel 491 | timeout = 5s 492 | } 493 | } 494 | } 495 | } 496 | """)) 497 | 498 | val conf = ConfigFactory.load(conf1.withFallback(conf2)) 499 | 500 | assert(conf.getInt("akka.stream.materializer.initial-input-buffer-size") == 2) 501 | assert(conf.getConfig("akka.stream.materializer").getInt("initial-input-buffer-size") == 2) 502 | assert(conf.getConfig("akka.stream.materializer").getString("subscription-timeout.mode") == "cancel") 503 | } 504 | } 505 | 506 | private final def configToMap(config: Config): Map[String, String] = { 507 | import scala.collection.JavaConverters._ 508 | config.root.unwrapped.asScala.toMap map { case (k, v) ⇒ (k → v.toString) } 509 | } 510 | } 511 | -------------------------------------------------------------------------------- /plugin/build.sbt: -------------------------------------------------------------------------------- 1 | import xerial.sbt.Sonatype._ 2 | 3 | lazy val plugin = project.in(file(".")) 4 | .settings(sonatypeSettings) 5 | .enablePlugins(ScriptedPlugin) 6 | .settings( 7 | name := "sbt-shocon", 8 | description := "sbt plugin for shocon", 9 | sbtPlugin := true, 10 | scalaVersion := "2.12.10", 11 | crossSbtVersions := Vector("1.3.8"), 12 | addSbtPlugin("org.scala-js" % "sbt-scalajs" % scalaJSVersion), 13 | scalacOptions ++= Seq("-feature", 14 | "-unchecked", 15 | "-language:implicitConversions"), 16 | // configuration for testing with sbt-scripted 17 | scriptedLaunchOpts ++= Seq("-Xmx1024M", 18 | "-Dplugin.version=" + version.value), 19 | scriptedBufferLog := false 20 | ) 21 | -------------------------------------------------------------------------------- /plugin/project/Common.scala: -------------------------------------------------------------------------------- 1 | ../../project/Common.scala -------------------------------------------------------------------------------- /plugin/project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.3.8 2 | -------------------------------------------------------------------------------- /plugin/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("org.scala-js" % "sbt-scalajs" % "1.0.1") 2 | 3 | addSbtPlugin("org.xerial.sbt" % "sbt-sonatype" % "2.3") 4 | 5 | addSbtPlugin("com.jsuereth" % "sbt-pgp" % "1.1.0") 6 | 7 | // addSbtPlugin("com.lucidchart" % "sbt-scalafmt" % "1.15") 8 | 9 | resolvers += Resolver.typesafeIvyRepo("releases") 10 | 11 | resolvers += "Typesafe repository" at "http://repo.typesafe.com/typesafe/releases/" 12 | 13 | resolvers += "sonatype-releases" at "https://oss.sonatype.org/content/repositories/releases/" 14 | 15 | // library for plugin testing 16 | libraryDependencies += { "org.scala-sbt" %% "scripted-plugin" % sbtVersion.value } 17 | -------------------------------------------------------------------------------- /plugin/project/project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.3.8 2 | -------------------------------------------------------------------------------- /plugin/src/main/scala/org/akkajs/shocon/sbtplugin/ShoconPlugin.scala: -------------------------------------------------------------------------------- 1 | package org.akkajs.shocon.sbtplugin 2 | 3 | import java.io.{BufferedInputStream, FileInputStream, FileNotFoundException, InputStream} 4 | import java.net.JarURLConnection 5 | 6 | import sbt.Keys._ 7 | import sbt.{AutoPlugin, Def, _} 8 | 9 | object ShoconPlugin extends AutoPlugin { 10 | 11 | type ShoconFilter = Function1[(String,InputStream),Boolean] 12 | 13 | object autoImport { 14 | val shoconAddLib: SettingKey[Boolean] = 15 | settingKey[Boolean]("If true, add shocon library to project") 16 | 17 | val shoconLoadFromJars: SettingKey[Boolean] = 18 | settingKey[Boolean]("If true, load reference.conf files from dependency JARs") 19 | 20 | val shoconFilter: SettingKey[ShoconFilter] = 21 | settingKey[ShoconFilter]("Filter function applied to each found SHOCON config file") 22 | 23 | val shoconConcatFile: SettingKey[File] = 24 | settingKey[File]("File to which which all detected configuration files are concatenated") 25 | 26 | val shoconFiles: TaskKey[Seq[(String,InputStream)]] = 27 | taskKey[Seq[(String,InputStream)]]("List of HOCON configuration files to be included statically at compile time") 28 | 29 | val shoconConcat: TaskKey[File] = 30 | taskKey[File]("Contains all detected configuration files concatenated") 31 | 32 | } 33 | 34 | import autoImport._ 35 | 36 | override def projectSettings: Seq[Def.Setting[_]] = Seq( 37 | shoconAddLib := true, 38 | 39 | shoconLoadFromJars := true, 40 | 41 | shoconFilter := {_:(String,InputStream) => true}, 42 | 43 | shoconConcatFile := (crossTarget in Compile).value / "shocon.conf", 44 | 45 | scalacOptions += "-Xmacro-settings:shocon.files=" + shoconConcatFile.value, 46 | 47 | shoconFiles := loadConfigs( 48 | shoconLoadFromJars.value, 49 | (dependencyClasspath in Compile).value, 50 | (unmanagedResources in Compile).value, 51 | shoconFilter.value, 52 | streams.value.log), 53 | 54 | shoconConcat := { 55 | val log = streams.value.log 56 | val file = shoconConcatFile.value 57 | 58 | log.debug(s"Assembling SHOCON files for project '${name.value}'") 59 | val config = shoconFiles.value.map( f => s"# SOURCE ${f._1}\n" + IO.readStream(f._2) ).mkString("\n\n") 60 | 61 | log.debug(s"SHOCON statically compiled into current project:\n$config\n\n") 62 | IO.write( file, config ) 63 | file 64 | } 65 | 66 | // Note: adding the shoconConcat task as dependency to compile does not work under Scala.js 67 | // if the ScalaJSPlugin is not declared as a requiredPlugin; however, doing so precludes 68 | // using this plugin for both, JVM and JS projects. Hence, shoconConcat must be either 69 | // called manually, or be defined as a dependency for compile in each project. 70 | // compile in Compile := (compile in Compile).dependsOn(shoconConcat).value 71 | ) 72 | 73 | 74 | private def loadConfigs(loadFromJars: Boolean, 75 | dependecyClassPath: Classpath, 76 | unmanagedResources: Seq[File], 77 | fileFilter: ShoconFilter, 78 | log: Logger): Seq[(String,InputStream)] = 79 | ((if(loadFromJars) loadDepReferenceConfigs(dependecyClassPath,log) 80 | else Nil) ++ loadProjectConfigs(unmanagedResources,log)) 81 | .filter(fileFilter) 82 | 83 | private def loadProjectConfigs(unmanagedResources: Seq[File], log: Logger): Seq[(String,InputStream)] = { 84 | val files = unmanagedResources 85 | .filter( f => f.getName == "reference.conf" || f.getName == "application.conf") 86 | .sorted 87 | .reverse 88 | .map( f => (f.getAbsolutePath,fin(f)) ) 89 | log.debug("SHOCON config files found in current project:\n" + files.map( " "+_._1).mkString("","\n","\n\n")) 90 | files 91 | } 92 | 93 | private def loadDepReferenceConfigs(cp: Classpath, log: Logger): Seq[(String,InputStream)] = { 94 | val (dirs,jars) = cp.files.partition(_.isDirectory) 95 | loadJarReferenceConfigs(jars,log) ++ loadDirReferenceConfigs(dirs,log) 96 | } 97 | 98 | private def loadDirReferenceConfigs(dirs: Seq[File], log: Logger): Seq[(String,InputStream)] = { 99 | val files = dirs 100 | .map( _ / "reference.conf" ) 101 | .filter( _.isFile ) 102 | .map( f => (f.getAbsolutePath, fin(f)) ) 103 | log.debug("SHOCON config files found in project dependencies:\n" + files.map( " "+_._1).mkString("","\n","\n\n")) 104 | files 105 | } 106 | 107 | private def loadJarReferenceConfigs(jars: Seq[File], log: Logger): Seq[(String,InputStream)] = { 108 | val files = jars 109 | .map( f => new URL("jar:" + f.toURI + "!/reference.conf").openConnection() ) 110 | .map { 111 | case c: JarURLConnection => try{ 112 | Some((c.toString,c.getInputStream)) 113 | } catch { 114 | case _: FileNotFoundException => None 115 | } 116 | } 117 | .collect{ 118 | case Some(in) => in 119 | } 120 | 121 | log.debug("SHOCON config files found in JAR dependencies:\n" + files.map( " "+_._1).mkString("","\n","\n\n")) 122 | files 123 | } 124 | 125 | private def fin(file: File): BufferedInputStream = new BufferedInputStream(new FileInputStream(file)) 126 | 127 | } 128 | -------------------------------------------------------------------------------- /plugin/src/sbt-test/shocon/basic/build.sbt: -------------------------------------------------------------------------------- 1 | 2 | lazy val root = crossProject.in(file(".")) 3 | .dependsOn(lib) 4 | .enablePlugins(ShoconPlugin) 5 | .settings( 6 | scalaVersion := "2.12.4", 7 | name := "basic", 8 | version := "0.1.0-SNAPSHOT", 9 | description := "Basic test for the shocon sbt plugin", 10 | libraryDependencies += "org.akka-js" %%% "shocon" % sys.props.getOrElse("plugin.version", sys.error("'plugin.version' environment variable is not set")), 11 | compile in Compile := (compile in Compile).dependsOn(shoconConcat).value 12 | ) 13 | .jsSettings( 14 | scalaJSUseMainModuleInitializer := true 15 | ) 16 | 17 | 18 | lazy val rootJVM = root.jvm 19 | lazy val rootJS = root.js 20 | 21 | lazy val lib = crossProject.in(file("lib")) 22 | .enablePlugins(ShoconPlugin) 23 | .settings( 24 | scalaVersion := "2.12.2", 25 | name := "lib" 26 | ) 27 | 28 | lazy val libJVM = lib.jvm 29 | lazy val libJS = lib.js 30 | -------------------------------------------------------------------------------- /plugin/src/sbt-test/shocon/basic/js/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | 2 | # override reference.conf 3 | basic.overriden = true 4 | 5 | app { 6 | 7 | name = "basic" 8 | 9 | } 10 | -------------------------------------------------------------------------------- /plugin/src/sbt-test/shocon/basic/js/src/main/resources/reference.conf: -------------------------------------------------------------------------------- 1 | # reference.conf 2 | 3 | basic { 4 | 5 | id = 42 6 | 7 | overriden = false 8 | 9 | } 10 | -------------------------------------------------------------------------------- /plugin/src/sbt-test/shocon/basic/jvm/src/main/resources/application.conf: -------------------------------------------------------------------------------- 1 | 2 | # override reference.conf 3 | basic.overriden = true 4 | 5 | app { 6 | 7 | name = "basic" 8 | 9 | } 10 | -------------------------------------------------------------------------------- /plugin/src/sbt-test/shocon/basic/jvm/src/main/resources/reference.conf: -------------------------------------------------------------------------------- 1 | # reference.conf 2 | 3 | basic { 4 | 5 | id = 42 6 | 7 | overriden = false 8 | 9 | } 10 | -------------------------------------------------------------------------------- /plugin/src/sbt-test/shocon/basic/lib/js/src/main/resources/reference.conf: -------------------------------------------------------------------------------- 1 | 2 | lib { 3 | loaded = true 4 | 5 | id = 0 6 | } 7 | -------------------------------------------------------------------------------- /plugin/src/sbt-test/shocon/basic/lib/jvm/src/main/resources/reference.conf: -------------------------------------------------------------------------------- 1 | 2 | lib { 3 | loaded = true 4 | 5 | id = 0 6 | } 7 | -------------------------------------------------------------------------------- /plugin/src/sbt-test/shocon/basic/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("org.portable-scala" % "sbt-scalajs-crossproject" % "1.0.0") 2 | addSbtPlugin("org.scala-js" % "sbt-scalajs" % "1.0.1") 3 | 4 | addSbtPlugin("org.akka-js" % "sbt-shocon" % sys.props.getOrElse("plugin.version", sys.error("'plugin.version' environment variable is not set"))) 5 | -------------------------------------------------------------------------------- /plugin/src/sbt-test/shocon/basic/shared/src/main/scala/Main.scala: -------------------------------------------------------------------------------- 1 | import com.typesafe.config.ConfigFactory 2 | 3 | object Main { 4 | def main(args: Array[String]): Unit = { 5 | val config = ConfigFactory.load() 6 | 7 | // check that reference.conf in lib was loaded 8 | assert( config.getBoolean("lib.loaded") == true ) 9 | 10 | // check that reference.conf in this project was loaded after reference.conf in lib 11 | assert( config.getInt("basic.id") == 42 ) 12 | 13 | // check that application.conf was loaded 14 | assert( config.getBoolean("basic.overriden") == true ) 15 | assert( config.getString("app.name") == "basic" ) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /plugin/src/sbt-test/shocon/basic/test: -------------------------------------------------------------------------------- 1 | > rootJS/run 2 | > rootJVM/run 3 | # $ exists js/target/scala-2.12/shocon.conf 4 | -------------------------------------------------------------------------------- /project/Common.scala: -------------------------------------------------------------------------------- 1 | import sbt._ 2 | import Keys._ 3 | 4 | object Common extends AutoPlugin { 5 | override def trigger = allRequirements 6 | override lazy val projectSettings = super.projectSettings ++ Seq( 7 | name := "shocon", 8 | organization := "org.akka-js", 9 | version := "1.0.0", 10 | scalaVersion := "2.13.1", 11 | crossScalaVersions := 12 | Vector("2.12.10", "2.13.1"), 13 | pomExtra := { 14 | https://github.com/akka-js/shocon 15 | 16 | 17 | Apache 2 18 | http://www.apache.org/licenses/LICENSE-2.0.txt 19 | 20 | 21 | 22 | scm:git:github.com/akka-js/shocon 23 | scm:git:git@github.com:akka-js/shocon 24 | github.com/akka-js/shocon 25 | 26 | 27 | 28 | evacchi 29 | Edoardo Vacchi 30 | https://github.com/evacchi/ 31 | 32 | 33 | andreaTP 34 | Andrea Peruffo 35 | https://github.com/andreaTP/ 36 | 37 | 38 | }, 39 | publishMavenStyle := true, 40 | pomIncludeRepository := { x => false }, 41 | publishTo := { 42 | val nexus = "https://oss.sonatype.org/" 43 | if (isSnapshot.value) 44 | Some("snapshots" at nexus + "content/repositories/snapshots") 45 | else 46 | Some("releases" at nexus + "service/local/staging/deploy/maven2") 47 | }, 48 | credentials += Credentials(Path.userHome / ".ivy2" / "sonatype.credentials") 49 | ) 50 | } 51 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.3.8 2 | -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | 2 | addSbtPlugin("org.xerial.sbt" % "sbt-sonatype" % "2.3") 3 | 4 | addSbtPlugin("com.jsuereth" % "sbt-pgp" % "1.1.0") 5 | 6 | addSbtPlugin("org.portable-scala" % "sbt-scalajs-crossproject" % "1.0.0") 7 | addSbtPlugin("org.scala-js" % "sbt-scalajs" % "1.0.1") 8 | 9 | // addSbtPlugin("com.lucidchart" % "sbt-scalafmt" % "1.15") 10 | 11 | resolvers += "sonatype-releases" at "https://oss.sonatype.org/content/repositories/releases/" 12 | -------------------------------------------------------------------------------- /publish.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | sbt clean 4 | 5 | sbt ";++2.12.10;facadeJVM/publishLocal;parserJVM/publishLocal;facadeJS/publishSigned;facadeJVM/publishSigned;parserJS/publishSigned;parserJVM/publishSigned" 6 | 7 | sbt ";++2.13.1;facadeJVM/publishLocal;parserJVM/publishLocal;facadeJS/publishSigned;facadeJVM/publishSigned;parserJS/publishSigned;parserJVM/publishSigned" 8 | 9 | sbt sonatypeReleaseAll 10 | 11 | cd plugin 12 | 13 | sbt ";^^1.3.5;publishSigned" 14 | 15 | sbt sonatypeReleaseAll 16 | 17 | cd .. 18 | -------------------------------------------------------------------------------- /scalastyle-config.xml: -------------------------------------------------------------------------------- 1 | 2 | Scalastyle standard configuration 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | -------------------------------------------------------------------------------- /shared/src/main/scala/org/akkajs/shocon/ConfigMacroLoader.scala: -------------------------------------------------------------------------------- 1 | package org.akkajs 2 | 3 | import scala.language.experimental.macros 4 | import scala.reflect.macros.blackbox.Context 5 | 6 | import fastparse.Parsed 7 | 8 | object ConfigMacroLoader { 9 | 10 | import org.akkajs.shocon.verboseLog 11 | 12 | def setVerboseLogImpl(c: Context)(): c.Expr[Unit] = { 13 | import c.universe._ 14 | 15 | verboseLog = true 16 | 17 | c.Expr[Unit](q"{}") 18 | } 19 | 20 | def parse(c: Context)(input: c.Expr[String]): c.Expr[shocon.Config.Value] = { 21 | import c.universe._ 22 | 23 | // inspiration from: https://github.com/scala/scala/blob/v2.12.6/src/reflect/scala/reflect/api/StandardLiftables.scala 24 | // thanks @blaisorblade 25 | def lift[T: Liftable](value: T): Tree = implicitly[Liftable[T]].apply(value) 26 | 27 | def selectShocon(names: Name*) = names.foldLeft(q"_root_.org.akkajs.shocon": Tree) { Select(_, _) } 28 | def callPackage(names: Name*)(args: List[Tree]) = Apply(selectShocon(names: _*), args) 29 | def callConfig(name: Name)(args: List[Tree]) = callPackage(TermName("Config"), name)(args) 30 | 31 | def callApply(str: String)(args: List[Tree]) = callConfig(TermName(str))(args) 32 | 33 | def callPackageConst(names: Name*) = selectShocon(names: _*) 34 | def callConfigConst(name: Name) = 35 | callPackageConst(TermName("Config"), name) 36 | 37 | def callConst(str: String) = callConfigConst(TermName(str)) 38 | 39 | implicit def liftConfigValue: Liftable[org.akkajs.shocon.Config.Value] = 40 | Liftable { cfg => 41 | cfg match { 42 | case v: org.akkajs.shocon.Config.SimpleValue => 43 | v match { 44 | case nl: org.akkajs.shocon.Config.NumberLiteral => 45 | callApply("NumberLiteral")(lift(nl.value) :: Nil) 46 | case sl: org.akkajs.shocon.Config.StringLiteral => 47 | callApply("StringLiteral")(lift(sl.value) :: Nil) 48 | case bl: org.akkajs.shocon.Config.BooleanLiteral => 49 | callApply("BooleanLiteral")(lift(bl.value) :: Nil) 50 | case _ => 51 | callConst("NullLiteral") 52 | } 53 | case arr: org.akkajs.shocon.Config.Array => 54 | callApply("Array"){ 55 | val arrayBody = arr.elements.map(lift(_)).toList 56 | 57 | q"Seq( ..$arrayBody )" :: Nil 58 | } 59 | case obj: org.akkajs.shocon.Config.Object => 60 | callApply("Object"){ 61 | val mapBody = obj.fields.map{ 62 | case (k, v) => q"($k, $v)" 63 | } 64 | 65 | q"Map( ..$mapBody )" :: Nil 66 | } 67 | } 68 | } 69 | 70 | def fallback() = { 71 | if (verboseLog) 72 | c.warning(c.enclosingPosition, "[shocon-parser] fallback to runtime parser") 73 | 74 | c.Expr[shocon.Config.Value](q"""{ 75 | org.akkajs.shocon.ConfigParser.root.parse($input) match{ 76 | case fastparse.core.Parsed.Success(v,_) => v 77 | case f: fastparse.core.Parsed.Failure[_, _] => throw new Error(f.msg) 78 | } 79 | }""") 80 | } 81 | 82 | input.tree match { 83 | case q"""$strLit""" => 84 | strLit match { 85 | case Literal(Constant(str)) => 86 | 87 | val config = 88 | org.akkajs.shocon.Config(str.toString) 89 | 90 | val ast = liftConfigValue(config) 91 | 92 | try { 93 | c.typecheck(ast) 94 | 95 | if (verboseLog) 96 | c.info(c.enclosingPosition, "[shocon-parser] optimized at compile time", false) 97 | 98 | c.Expr[shocon.Config.Value](ast) 99 | } catch { 100 | case err: Throwable => 101 | fallback() 102 | } 103 | case _ => 104 | fallback() 105 | } 106 | } 107 | } 108 | 109 | } -------------------------------------------------------------------------------- /shared/src/main/scala/org/akkajs/shocon/ConfigParser.scala: -------------------------------------------------------------------------------- 1 | package org.akkajs.shocon 2 | 3 | import fastparse._ 4 | import NoWhitespace._ // or we can refactor to use an upstream whitespace handler 5 | 6 | object ConfigParser { 7 | case class NamedFunction[T, V](f: T => V, name: String) extends (T => V){ 8 | def apply(t: T) = f(t) 9 | override def toString() = name 10 | } 11 | 12 | val isWhitespace = (c: Char) => 13 | c match { 14 | // try to hit the most common ASCII ones first, then the nonbreaking 15 | // spaces that Java brokenly leaves out of isWhitespace. 16 | case ' '|'\n'|'\u00A0'|'\u2007'|'\u202F'|'\uFEFF' /* BOM */ => true; 17 | case _ => Character.isWhitespace(c); 18 | } 19 | 20 | val isWhitespaceNoNl = (c: Char) => c != '\n' && isWhitespace(c) 21 | 22 | // *** Lexing *** 23 | // val Whitespace = NamedFunction(isWhitespace, "Whitespace") 24 | def letter[_ : P] = P( lowercase | uppercase ) 25 | def lowercase[_ : P] = P( CharIn("a-z") ) 26 | def uppercase[_ : P] = P( CharIn("A-Z") ) 27 | def digit[_ : P] = P( CharIn("0-9") ) 28 | 29 | val Digits = NamedFunction('0' to '9' contains (_: Char), "Digits") 30 | val StringChars = NamedFunction(!"\"\\".contains(_: Char), "StringChars") 31 | val UnquotedStringChars = NamedFunction(!isWhitespaceNoNl(_: Char), "UnquotedStringChars ") 32 | 33 | def keyValueSeparator[_ : P] = P( CharIn(":=")) 34 | 35 | // whitespace 36 | def comment[_ : P] = P( ("//" | "#") ~ CharsWhile(_ != '\n', 0) ) 37 | def nlspace[_ : P] = P( (CharsWhile(isWhitespace, 1) | comment ).rep ) 38 | def space[_ : P] = P( ( CharsWhile(isWhitespaceNoNl, 1) | comment ).rep ) 39 | 40 | def hexDigit[_ : P] = P( CharIn("0-9", "a-f", "A-F") ) 41 | def unicodeEscape[_ : P] = P( "u" ~ hexDigit ~ hexDigit ~ hexDigit ~ hexDigit ) 42 | def escape[_ : P] = P( "\\" ~ (CharIn("\"/\\bfnrt") | unicodeEscape) ) 43 | 44 | // strings 45 | def strChars[_ : P] = P( CharsWhile(StringChars) ) 46 | def quotedString[_ : P] = P( "\"" ~/ (strChars | escape).rep.! ~ "\"") 47 | def unquotedString[_ : P] = P ( ( (letter | digit | "_" | "-" | "." | "/").rep(1).! ).rep(1,CharsWhile(_.isSpaceChar)).! ) 48 | def string[_ : P] = P(nlspace) ~ P(quotedString|unquotedString|CharsWhile(_.isSpaceChar).!) // bit of an hack: this would parse whitespace to the end of line 49 | .rep(1).map(_.mkString.trim) // so we will trim the remaining right-side 50 | .map(Config.StringLiteral) 51 | 52 | // *** Parsing *** 53 | def array[_: P]: P[Seq[Config.Value]] = P( "[" ~ nlspace ~/ jsonExpr.rep(sep=itemSeparator) ~ nlspace ~ ",".? ~ nlspace ~ "]") 54 | 55 | def repeatedArray[_ :P]: P[Config.Array] = 56 | array.rep(min = 1, sep=nlspace).map( ( arrays: Seq[Seq[Config.Value]] ) => Config.Array ( arrays.flatten ) ) 57 | 58 | def pair[_: P]: P[(String, Config.Value)] = P( string.map(_.value) ~/ space ~ 59 | ((keyValueSeparator ~/ jsonExpr ) 60 | |(repeatedObj ~ space)) ) 61 | 62 | def obj[_: P]: P[Seq[(String, Config.Value)]] = P( "{" ~/ objBody ~ "}") 63 | 64 | def repeatedObj[_: P]: P[Config.Object] = 65 | obj.rep(min = 1, sep=nlspace).map(fields => Config.Object(Map( fields.flatten :_*) )) 66 | 67 | def itemSeparator[_: P] = P(("\n" ~ nlspace ~ ",".?)|(("," ~ nlspace).?)) 68 | 69 | def objBody[_: P] = P( pair.rep(sep=itemSeparator) ~ nlspace ) // .log() 70 | 71 | def jsonExpr[_: P] = P( space ~ (repeatedObj | repeatedArray | string) ~ space ) // .log() 72 | 73 | def root[_: P] = P( (&(space ~ "{") ~/ obj )|(objBody) ~ End ).map( x => Config.Object.fromPairs(x) ) // .log() 74 | 75 | def parseString(str: String) = parse(str, root(_)) 76 | 77 | } 78 | -------------------------------------------------------------------------------- /shared/src/main/scala/org/akkajs/shocon/Extractors.scala: -------------------------------------------------------------------------------- 1 | package org.akkajs.shocon 2 | 3 | import java.{util => ju} 4 | import scala.collection.compat._ 5 | import scala.jdk.CollectionConverters._ 6 | 7 | case class Extractor[T](pf: PartialFunction[Config.Value, T], val serial: Int) { 8 | def apply(c: Config.Value) = pf.apply(c) 9 | def applyOrElse(c: Config.Value, fallback: PartialFunction[Config.Value, T]) = 10 | pf.applyOrElse(c, fallback) 11 | } 12 | 13 | trait Extractors { 14 | 15 | implicit val BooleanExtractor: Extractor[Boolean] = Extractor({ 16 | case Config.StringLiteral(v) => v.trim match { 17 | case "true" | "on" | "yes" => true 18 | case "false" | "off" | "no" => false 19 | case _ => throw new IllegalArgumentException(s"Cannot convert '$v' to boolean") 20 | } 21 | }, 1) 22 | implicit val StringExtractor: Extractor[String] = Extractor({ 23 | case Config.StringLiteral(v) => v 24 | }, 2) 25 | implicit val DoubleExtractor: Extractor[Double] = Extractor({ 26 | case Config.StringLiteral(v) => v.toDouble 27 | }, 3) 28 | implicit val LongExtractor: Extractor[Long] = Extractor({ 29 | case Config.StringLiteral(v) => v.toLong 30 | }, 4) 31 | implicit val IntExtractor: Extractor[Int] = Extractor({ 32 | case Config.StringLiteral(v) => v.toInt 33 | }, 5) 34 | implicit def SeqExtractor[T](implicit ex: Extractor[T]): Extractor[Seq[T]] = Extractor({ 35 | case Config.Array(seq) => seq.map(ex.apply(_)) 36 | }, 6) 37 | implicit def juListExtractor[T](implicit ex: Extractor[T]): Extractor[ju.List[T]] = Extractor({ 38 | case Config.Array(seq) => seq.map(ex.apply(_)).asJava 39 | }, 7) 40 | implicit def MapExtractor[T](implicit ex: Extractor[T]): Extractor[Map[String, T]] = Extractor({ 41 | case Config.Object(keyValues) => keyValues.map{ case (k,v) => (k, ex.apply(v)) } 42 | }, 8) 43 | implicit val GenericExtractor: Extractor[Config.Value] = Extractor({ 44 | case x => x 45 | }, 9) 46 | implicit val ObjectExtractor: Extractor[Config.Object] = Extractor({ 47 | case x : Config.Object => x 48 | }, 10) 49 | 50 | } 51 | 52 | object Extractors extends Extractors 53 | -------------------------------------------------------------------------------- /shared/src/main/scala/org/akkajs/shocon/SHocon.scala: -------------------------------------------------------------------------------- 1 | package org.akkajs 2 | 3 | import scala.util.Try 4 | 5 | import scala.language.experimental.macros 6 | import fastparse.Parsed 7 | import scala.collection.compat._ 8 | 9 | package object shocon extends Extractors { 10 | 11 | var verboseLog = false 12 | 13 | def setVerboseLog(): Unit = macro ConfigMacroLoader.setVerboseLogImpl 14 | 15 | object Config { 16 | type Key = String 17 | 18 | sealed trait Value { 19 | def unwrapped: Any 20 | } 21 | 22 | case class Array(elements: Seq[Value]) extends Value { 23 | lazy val unwrapped = elements.map(_.unwrapped) 24 | } 25 | case class Object(fields: Map[Key, Value]) extends Value { 26 | lazy val unwrapped = fields.view.mapValues(_.unwrapped).to(Seq) 27 | } 28 | 29 | trait SimpleValue extends Value 30 | 31 | private def unwrapStringAsNumber(value: String): Try[Any] = 32 | Try { 33 | value.toInt 34 | }.recover { 35 | case _ => value.toLong 36 | }.recover { 37 | case _ => value.toDouble 38 | } 39 | 40 | case class NumberLiteral(value: String) extends SimpleValue { 41 | lazy val unwrapped = unwrapStringAsNumber(value).get 42 | } 43 | case class StringLiteral(value: String) extends SimpleValue { 44 | lazy val unwrapped = 45 | Try(this.as[Boolean].get) 46 | .orElse(unwrapStringAsNumber(value)) 47 | .getOrElse(value) 48 | } 49 | case class BooleanLiteral(value: Boolean) extends SimpleValue { 50 | lazy val unwrapped = value 51 | } 52 | case object NullLiteral extends SimpleValue { 53 | def unwrapped = null 54 | } 55 | 56 | def gen(input: String): Config.Value = macro ConfigMacroLoader.parse 57 | 58 | /* these methods are here only for retro-compatibility and fallbacks */ 59 | def parse(input: String) = ConfigParser.parseString(input) 60 | def apply(input: String): Config.Value = parse(input) match{ 61 | case Parsed.Success(v,_) => v 62 | case f: Parsed.Failure => throw new Error(f.msg) 63 | } 64 | def fromFile(path: String) = apply(io.Source.fromFile(path).mkString) 65 | 66 | object Object { 67 | def fromPairs(pairs: Seq[(Key, Value)]): Object = { 68 | val os = pairs.map{ case (k,v) => reparseKey(k,v) } 69 | os.foldLeft(shocon.Config.Object(Map()))(mergeConfigs) 70 | } 71 | def reparseKey(key: Key, value: Value): Object = { 72 | val pos = key.indexOf('.') 73 | if (pos < 0) shocon.Config.Object(Map(key -> value)) 74 | else { 75 | val splitted = key.split('.').reverse 76 | 77 | splitted.tail.foldLeft(shocon.Config.Object(Map(splitted.head -> value))){ 78 | case (acc, elem) => 79 | shocon.Config.Object(Map(elem -> acc)) 80 | } 81 | } 82 | } 83 | 84 | def mergeValues(base: Value, mergeable: Value): Value = { 85 | if (base == mergeable) base 86 | else 87 | (base, mergeable) match { 88 | case (m1: Object, m2: Object) => 89 | mergeConfigs(m1, m2) 90 | case (Array(seq1), Array(seq2)) => 91 | Array(seq1 ++ seq2) 92 | case (v1, v2) => v2 // always the second wins 93 | } 94 | } 95 | 96 | def mergeConfigs(base: Object, mergeable: Object): Object = { 97 | if (base == mergeable) base 98 | else { 99 | val m1k = base.fields.keys.toSet 100 | // all keys in m2 which are not found in m1 101 | val diff = mergeable.fields.keys.filterNot(m1k.contains).toSet 102 | // m is the map that contains both keys from m2 and m1 103 | // where if a key is in both, their value is merged 104 | 105 | val m = base.fields.map { 106 | case (k, v) => 107 | mergeable.fields.get(k) match { 108 | case Some(v2) => 109 | k -> mergeValues(v, v2) 110 | case _ => 111 | k -> v 112 | } 113 | } ++ mergeable.fields.view.filter(e => diff.contains(e._1)) 114 | Object(m) 115 | } 116 | } 117 | } 118 | } 119 | 120 | 121 | implicit class ConfigOps(val tree: Config.Value) { 122 | def as[T](implicit ev: Extractor[T]): Option[T] = Option( ev.applyOrElse(tree, null) ) 123 | def apply(key: String): Config.Value = get(key).get 124 | def get(key: String): Option[Config.Value] = { 125 | val keys = key.split('.') 126 | def visit(v: Config.Value, keys: Seq[String]): Option[Config.Value] = v match { 127 | case _ if (keys.isEmpty) => Some(v) 128 | case o@Config.Object(fields) => 129 | if (fields.contains(keys.head)) 130 | visit(fields(keys.head), keys.tail) 131 | else { 132 | None 133 | } 134 | } 135 | visit(tree, keys.toIndexedSeq) 136 | } 137 | 138 | // def getOrElse[T](fallback: => Config.Value)(implicit ev: Extractor[T]): T = 139 | // apply(key)(ev).getOrElse(fallback.get(key)(ev)) 140 | } 141 | 142 | } 143 | -------------------------------------------------------------------------------- /travis/setNodeVersion.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ ! -z "$TRAVIS_NODE_VERSION" ]; then 4 | rm -rf ~/.nvm 5 | git clone https://github.com/creationix/nvm.git ~/.nvm 6 | (cd ~/.nvm && git checkout `git describe --abbrev=0 --tags`) 7 | source ~/.nvm/nvm.sh 8 | nvm install $TRAVIS_NODE_VERSION 9 | npm install 10 | fi; 11 | -------------------------------------------------------------------------------- /travis/testSbtPlugin.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ ! -z $SBT_VERSION ]; then source test_plugin.sh 4 | sbt ++$TRAVIS_SCALA_VERSION publishLocal 5 | cd plugin 6 | sbt ^^$SBT_VERSION 'scripted shocon/basic' 7 | fi; 8 | --------------------------------------------------------------------------------