├── .gitattributes
├── .gitignore
├── .scalafmt.conf
├── CNAME
├── LICENSE
├── README.md
├── build.sbt
├── docker-compose.yml
├── docs
├── ETL功能.docx
├── changelogs
│ └── 2018.md
├── doc
│ ├── broker
│ │ ├── core.html
│ │ ├── index.html
│ │ └── leader.html
│ ├── connector
│ │ ├── connector.html
│ │ ├── core.html
│ │ └── index.html
│ ├── console
│ │ ├── console
│ │ │ └── console.html
│ │ ├── index.html
│ │ └── orchestration
│ │ │ └── orchestration.html
│ ├── css
│ │ ├── banner.css
│ │ ├── fonts
│ │ │ ├── icons.eot
│ │ │ ├── icons.svg
│ │ │ ├── icons.ttf
│ │ │ ├── icons.woff
│ │ │ ├── icons.woff2
│ │ │ ├── proxima-nova-bold.eot
│ │ │ ├── proxima-nova-bold.ttf
│ │ │ ├── proxima-nova-bold.woff
│ │ │ ├── proxima-nova-regular.eot
│ │ │ ├── proxima-nova-regular.ttf
│ │ │ ├── proxima-nova-regular.woff
│ │ │ ├── source-code-pro-regular.eot
│ │ │ ├── source-code-pro-regular.ttf
│ │ │ └── source-code-pro-regular.woff
│ │ ├── icons.css
│ │ └── page-6.css
│ ├── design
│ │ ├── code-level.html
│ │ ├── index.html
│ │ ├── mass-design.html
│ │ └── thinking.html
│ ├── images
│ │ ├── akka-icon-reverse.svg
│ │ ├── akka-icon.svg
│ │ ├── akka-logo-reverse.svg
│ │ ├── android-chrome-192x192.png
│ │ ├── android-chrome-512x512.png
│ │ ├── apple-touch-icon.png
│ │ ├── favicon-16x16.png
│ │ ├── favicon-32x32.png
│ │ ├── favicon.ico
│ │ ├── footer-background.jpg
│ │ ├── header-background.jpg
│ │ ├── manifest.json
│ │ ├── mstile-150x150.png
│ │ └── safari-pinned-tab.svg
│ ├── index.html
│ ├── intro
│ │ └── index.html
│ ├── job
│ │ ├── configuration.html
│ │ ├── developer.html
│ │ ├── feature.html
│ │ ├── get_start.html
│ │ ├── index.html
│ │ ├── install.html
│ │ └── intro.html
│ ├── js
│ │ ├── groups.js
│ │ ├── magellan.js
│ │ ├── metadata-toggle.js
│ │ ├── page.js
│ │ └── scrollsneak.js
│ ├── lib
│ │ ├── foundation
│ │ │ └── dist
│ │ │ │ ├── css
│ │ │ │ └── foundation.min.css
│ │ │ │ └── js
│ │ │ │ └── foundation.min.js
│ │ ├── jquery
│ │ │ └── jquery.min.js
│ │ ├── normalize.css
│ │ │ └── normalize.css
│ │ └── prettify
│ │ │ ├── lang-scala.js
│ │ │ ├── prettify.css
│ │ │ └── prettify.js
│ ├── paradox.json
│ ├── rdi
│ │ ├── code
│ │ │ └── workflow
│ │ │ │ └── pg2mysql-graph.xml
│ │ ├── core.html
│ │ ├── extension.html
│ │ ├── img
│ │ │ ├── Reactive-Flow.png
│ │ │ └── Reactive-Flow.svg
│ │ ├── index.html
│ │ └── workflow.html
│ ├── spec
│ │ ├── business_spec
│ │ │ ├── business_spec.html
│ │ │ ├── job.json
│ │ │ ├── job.xml
│ │ │ └── rowFieldConfigure.json
│ │ ├── component_spec
│ │ │ └── component_spec.html
│ │ ├── ic_spec
│ │ │ └── ic_spec.html
│ │ ├── imgs
│ │ │ ├── 白皮书-一体化全流程的数据资产管理体系.svg
│ │ │ ├── 白皮书-服务于全行业和全客户.svg
│ │ │ └── 白皮书-海量数据资产平台(MassData).svg
│ │ ├── index.html
│ │ └── white_paper.html
│ └── static
│ │ ├── BrokerAppSystem.svg
│ │ ├── DataProcessVisual.svg
│ │ ├── JobTaskStageFlow.png
│ │ ├── RDPTechArchitecture.png
│ │ ├── RDPTechArchitecture.svg
│ │ ├── SchedulerPlatformArchitecture.svg
│ │ └── SeaBrokerArchitecture.png
└── index.html
├── example
└── src
│ ├── main
│ └── scala
│ │ └── example
│ │ ├── BalancingPoolDemo.scala
│ │ └── akkastream
│ │ ├── KillSwitch.scala
│ │ ├── basic
│ │ ├── ActorMaterializerLifecycleDemo.scala
│ │ ├── BidiShapeDemo.scala
│ │ ├── CombiningMaterializedValues.scala
│ │ ├── Graph1.scala
│ │ ├── Graph2.scala
│ │ ├── Graph3.scala
│ │ ├── GraphComponent.scala
│ │ ├── PartialGraph.scala
│ │ ├── PartialGraph2.scala
│ │ └── SimplifiedAPI.scala
│ │ ├── buffer
│ │ ├── BufferExample.scala
│ │ ├── BufferProblem.scala
│ │ └── ExtrapolateExpand.scala
│ │ ├── dynamichub
│ │ ├── MergeHubDemo.scala
│ │ └── SimplePublishSubscribe.scala
│ │ ├── graph
│ │ ├── Deadlocks.scala
│ │ ├── MaterializeValue.scala
│ │ └── PartialGraph.scala
│ │ └── streamio
│ │ └── EchoDemo.scala
│ └── multi-jvm
│ └── scala
│ └── sample
│ ├── MultiNodeSampleSpec.scala
│ ├── Sample.scala
│ ├── SampleMultiJvmNode1.opts
│ ├── SampleMultiJvmNode2.opts
│ ├── SampleMultiJvmNode3.opts
│ └── Sepc.scala
├── licenses
└── jsch.txt
├── mass-api-service
├── docs
│ └── api-service-design.md
└── src
│ └── main
│ └── scala
│ └── mass
│ └── apiservice
│ └── boot
│ └── ApiServiceMain.scala
├── mass-common
└── src
│ └── main
│ └── scala
│ └── mass
│ └── common
│ ├── page
│ └── Page.scala
│ └── util
│ └── FileUtils.scala
├── mass-connector
└── src
│ ├── main
│ ├── resources
│ │ └── reference.conf
│ └── scala
│ │ └── mass
│ │ └── connector
│ │ ├── Connector.scala
│ │ ├── ConnectorParser.scala
│ │ ├── ConnectorSetting.scala
│ │ ├── ConnectorSystem.scala
│ │ ├── DataElement.scala
│ │ ├── boot
│ │ └── ConnectorMain.scala
│ │ └── sql
│ │ ├── Databases.scala
│ │ ├── EventDataSql.scala
│ │ ├── JdbcFlow.scala
│ │ ├── JdbcSink.scala
│ │ ├── JdbcSinkResult.scala
│ │ ├── JdbcSinkStage.scala
│ │ ├── JdbcSource.scala
│ │ ├── JdbcSourceStage.scala
│ │ ├── SQLConnector.scala
│ │ ├── SQLConnectorParser.scala
│ │ ├── package.scala
│ │ └── schema
│ │ ├── PostgresSchema.scala
│ │ ├── Schemas.scala
│ │ └── infos.scala
│ └── test
│ ├── resources
│ ├── application.conf
│ └── logback-test.xml
│ └── scala
│ ├── kafkasample
│ └── demo
│ │ ├── Consumer.scala
│ │ ├── Customer.scala
│ │ └── Producer.scala
│ └── mass
│ └── connector
│ └── sql
│ ├── PostgresMySQLTest.scala
│ ├── SQLConnectorTest.scala
│ ├── TestSchema.scala
│ ├── Ys2LocalTest.scala
│ └── schema
│ └── SQLSchemaTest.scala
├── mass-console
└── src
│ ├── main
│ ├── resources
│ │ ├── logback.xml
│ │ └── reference.conf
│ └── scala
│ │ └── mass
│ │ └── console
│ │ ├── ConsoleMain.scala
│ │ └── web
│ │ └── api
│ │ └── Routes.scala
│ ├── test
│ ├── resources
│ │ ├── application.conf
│ │ └── logback-test.xml
│ └── scala
│ │ └── mass
│ │ └── console
│ │ └── AkkaHttpTest.scala
│ └── universal
│ └── conf
│ ├── application.ini
│ ├── dev.conf
│ ├── jvmopts
│ ├── logback.xml
│ ├── prod.conf
│ └── test.conf
├── mass-core-ext
└── src
│ ├── main
│ ├── resources
│ │ └── reference.conf
│ └── scala
│ │ ├── com
│ │ └── github
│ │ │ └── tminglei
│ │ │ └── slickpg
│ │ │ └── ArraySupport.scala
│ │ └── mass
│ │ ├── core
│ │ ├── broker
│ │ │ └── Broker.scala
│ │ ├── ext
│ │ │ └── MassApplication.scala
│ │ └── module
│ │ │ └── MassCoreModule.scala
│ │ ├── db
│ │ └── slick
│ │ │ ├── AggAggregateLibrary.scala
│ │ │ ├── AggFuncSupport.scala
│ │ │ ├── PgJacksonJsonSupport.scala
│ │ │ ├── PgProfile.scala
│ │ │ └── SqlComponent.scala
│ │ ├── job
│ │ ├── JobClassJob.scala
│ │ ├── JobRunner.scala
│ │ ├── JobScheduler.scala
│ │ ├── JobSettings.scala
│ │ ├── db
│ │ │ └── model
│ │ │ │ ├── QrtzBlobTriggersModelTable.scala
│ │ │ │ ├── QrtzCalendarsModelTable.scala
│ │ │ │ ├── QrtzCronTriggersModelTable.scala
│ │ │ │ ├── QrtzFiredTriggersModelTable.scala
│ │ │ │ ├── QrtzJobDetailsModelTable.scala
│ │ │ │ ├── QrtzLocksModelTable.scala
│ │ │ │ ├── QrtzModels.scala
│ │ │ │ ├── QrtzPausedTriggerGrpsModelTable.scala
│ │ │ │ ├── QrtzSchedulerStateModelTable.scala
│ │ │ │ ├── QrtzSimpleTriggersModelTable.scala
│ │ │ │ ├── QrtzSimpropTriggersModelTable.scala
│ │ │ │ ├── QrtzTriggerLogModelTable.scala
│ │ │ │ └── QrtzTriggersModelTable.scala
│ │ ├── repository
│ │ │ └── JobRepo.scala
│ │ └── util
│ │ │ └── JobUtils.scala
│ │ └── workflow
│ │ └── model
│ │ └── WfDetail.scala
│ └── test
│ ├── resources
│ ├── application-test.conf
│ └── logback-test.xml
│ └── scala
│ └── mass
│ ├── STMultiNodeSpec.scala
│ ├── db
│ └── slick
│ │ ├── PgProfileTest.scala
│ │ └── codegen
│ │ └── CodegenMain.scala
│ ├── job
│ ├── JobSchedulerTest.scala
│ └── util
│ │ └── JobUtilsTest.scala
│ └── server
│ └── repository
│ └── JobRepoTest.scala
├── mass-core
└── src
│ ├── main
│ ├── resources
│ │ ├── META-INF
│ │ │ └── services
│ │ │ │ └── com.fasterxml.jackson.databind.Module
│ │ └── reference.conf
│ └── scala
│ │ ├── akka
│ │ └── mass
│ │ │ └── AkkaUtils.scala
│ │ └── mass
│ │ ├── Mass.scala
│ │ ├── MassSettings.scala
│ │ ├── core
│ │ ├── Constants.scala
│ │ ├── MassUtils.scala
│ │ ├── ProgramVersion.scala
│ │ ├── RunMode.scala
│ │ ├── XmlUtils.scala
│ │ ├── actors
│ │ │ ├── AggregateActor.scala
│ │ │ └── MassActor.scala
│ │ ├── component
│ │ │ └── spi
│ │ │ │ ├── BaseComponent.scala
│ │ │ │ ├── CollectResult.scala
│ │ │ │ ├── DataComponent.scala
│ │ │ │ └── FunctionComponent.scala
│ │ ├── event
│ │ │ └── Event.scala
│ │ ├── factory
│ │ │ └── Factory.scala
│ │ ├── job
│ │ │ ├── JobConstants.scala
│ │ │ ├── JobResult.scala
│ │ │ ├── SchedulerContext.scala
│ │ │ └── SchedulerJob.scala
│ │ ├── json
│ │ │ ├── EnumSerializer.scala
│ │ │ ├── JavaTimeSerializers.scala
│ │ │ ├── Json4sFormats.scala
│ │ │ ├── ScalaPBJacksonModule.scala
│ │ │ └── package.scala
│ │ ├── model
│ │ │ └── scheduler
│ │ │ │ └── SchedulerJobResultTrait.scala
│ │ ├── module
│ │ │ └── Module.scala
│ │ ├── script
│ │ │ └── ScriptManager.scala
│ │ └── workflow
│ │ │ └── Workflow.scala
│ │ ├── extension
│ │ └── MassCore.scala
│ │ ├── message
│ │ └── job
│ │ │ ├── JobEvent.scala
│ │ │ └── JobMessage.scala
│ │ ├── model
│ │ ├── ApiResult.java
│ │ ├── CommonStatus.java
│ │ ├── ElementCell.scala
│ │ ├── IApiResult.java
│ │ ├── Provice.scala
│ │ ├── TitleValue.scala
│ │ └── job
│ │ │ ├── JobItem.scala
│ │ │ ├── JobSchedule.scala
│ │ │ ├── JobTrigger.scala
│ │ │ ├── Program.scala
│ │ │ ├── RunStatus.java
│ │ │ ├── RunStatus.scala
│ │ │ └── TriggerType.scala
│ │ └── session
│ │ └── BaseSession.scala
│ └── test
│ ├── resources
│ ├── application-test.conf
│ └── logback-test.xml
│ └── scala
│ └── mass
│ ├── JacksonTest.scala
│ ├── MassSettingsTest.scala
│ └── core
│ └── script
│ └── ScriptManagerTest.scala
├── mass-docs
├── README.md
├── doc
│ └── Sea Data.xmind
└── src
│ └── main
│ ├── drawio
│ ├── Broker.drawio
│ ├── Job.drawio
│ ├── RDP架构.drawio
│ ├── 平台架构.drawio
│ └── 白皮书.drawio
│ ├── paradox
│ ├── broker
│ │ ├── core.md
│ │ ├── index.md
│ │ └── leader.md
│ ├── connector
│ │ ├── connector.md
│ │ ├── core.md
│ │ └── index.md
│ ├── console
│ │ ├── console
│ │ │ └── console.md
│ │ ├── index.md
│ │ └── orchestration
│ │ │ └── orchestration.md
│ ├── design
│ │ ├── code-level.md
│ │ ├── index.md
│ │ ├── mass-design.md
│ │ └── thinking.md
│ ├── index.md
│ ├── intro
│ │ └── index.md
│ ├── job
│ │ ├── configuration.md
│ │ ├── developer.md
│ │ ├── feature.md
│ │ ├── get_start.md
│ │ ├── index.md
│ │ ├── install.md
│ │ └── intro.md
│ ├── rdi
│ │ ├── code
│ │ │ └── workflow
│ │ │ │ └── pg2mysql-graph.xml
│ │ ├── core.md
│ │ ├── extension.md
│ │ ├── img
│ │ │ ├── Reactive-Flow.png
│ │ │ └── Reactive-Flow.svg
│ │ ├── index.md
│ │ └── workflow.md
│ ├── spec
│ │ ├── business_spec
│ │ │ ├── business_spec.md
│ │ │ ├── job.json
│ │ │ ├── job.xml
│ │ │ └── rowFieldConfigure.json
│ │ ├── component_spec
│ │ │ └── component_spec.md
│ │ ├── ic_spec
│ │ │ └── ic_spec.md
│ │ ├── imgs
│ │ │ ├── 白皮书-一体化全流程的数据资产管理体系.svg
│ │ │ ├── 白皮书-服务于全行业和全客户.svg
│ │ │ └── 白皮书-海量数据资产平台(MassData).svg
│ │ ├── index.md
│ │ └── white_paper.md
│ └── static
│ │ ├── BrokerAppSystem.svg
│ │ ├── DataProcessVisual.svg
│ │ ├── JobTaskStageFlow.png
│ │ ├── RDPTechArchitecture.png
│ │ ├── RDPTechArchitecture.svg
│ │ ├── SchedulerPlatformArchitecture.svg
│ │ └── SeaBrokerArchitecture.png
│ └── scala
│ └── ws.sc
├── mass-functest
└── src
│ ├── multi-jvm
│ ├── README.md
│ └── scala
│ │ ├── mass
│ │ └── functest
│ │ │ └── NodesTest.scala
│ │ └── sample
│ │ ├── demo
│ │ └── Demo.scala
│ │ └── multinode
│ │ ├── MultiNodeSampleTest.scala
│ │ ├── MultiNodeSampleTestMultiJvmNode1.opts
│ │ └── MultiNodeSampleTestMultiJvmNode2.opts
│ └── test
│ ├── resources
│ ├── application.conf
│ ├── calculator.conf
│ ├── factorial.conf
│ ├── logback-test.xml
│ ├── remotelookup.conf
│ ├── simple-cluster.conf
│ ├── stats1.conf
│ ├── stats2.conf
│ └── test-common.conf
│ └── scala
│ └── sample
│ ├── cats
│ └── sample
│ │ └── ValidatedDemo.scala
│ ├── cluster
│ ├── factorial
│ │ ├── FactorialApp.scala
│ │ ├── FactorialBackend.scala
│ │ ├── FactorialFrontend.scala
│ │ └── MetricsListener.scala
│ ├── sample
│ │ ├── SimpleClusterApp.scala
│ │ └── SimpleClusterListener.scala
│ ├── stats
│ │ ├── StatsMessages.scala
│ │ ├── StatsSample.scala
│ │ ├── StatsSampleOneMaster.scala
│ │ ├── StatsService.scala
│ │ └── StatsWorker.scala
│ └── transformation
│ │ ├── TransformationApp.scala
│ │ ├── TransformationBackend.scala
│ │ ├── TransformationFrontend.scala
│ │ └── TransformationMessages.scala
│ ├── multinode
│ └── STMultiNodeSpec.scala
│ └── remote
│ └── benchmark
│ ├── Receiver.scala
│ └── Sender.scala
├── mass-job
├── README.md
├── docs
│ └── 作业调度配置参考.png
└── src
│ ├── main
│ ├── resources
│ │ ├── logback.xml
│ │ └── reference.conf
│ └── scala
│ │ └── mass
│ │ └── job
│ │ ├── JobMain.scala
│ │ ├── component
│ │ ├── DefaultSchedulerJob.scala
│ │ └── JobRun.scala
│ │ ├── route
│ │ ├── Routes.scala
│ │ └── api
│ │ │ ├── ApiRoute.scala
│ │ │ ├── MockRoute.scala
│ │ │ └── v1
│ │ │ └── JobRoute.scala
│ │ └── service
│ │ └── job
│ │ ├── JobActor.scala
│ │ ├── JobService.scala
│ │ └── JobServiceComponent.scala
│ ├── test
│ ├── resources
│ │ ├── application-test.conf
│ │ └── logback-test.xml
│ └── scala
│ │ └── mass
│ │ └── job
│ │ ├── SchedulerTestkit.scala
│ │ ├── component
│ │ └── JobRunTest.scala
│ │ ├── route
│ │ └── api
│ │ │ └── v1
│ │ │ └── JobRouteTest.scala
│ │ └── service
│ │ └── job
│ │ └── JobServiceTest.scala
│ └── universal
│ └── examples
│ └── sample-job
│ ├── hello.jar
│ ├── hello.zip
│ ├── sample.conf
│ └── sample2.conf
├── mass-rdi-cli
└── src
│ └── main
│ ├── resources
│ └── reference.conf
│ └── scala
│ └── mass
│ └── rdp
│ └── cli
│ └── boot
│ └── RdpCliMain.scala
├── mass-rdi-core
└── src
│ ├── main
│ ├── resources
│ │ └── reference.conf
│ └── scala
│ │ └── mass
│ │ └── rdp
│ │ ├── RdpSystem.scala
│ │ ├── etl
│ │ ├── EtlJob.scala
│ │ ├── EtlJobResult.scala
│ │ ├── EtlWorkflow.scala
│ │ ├── EtlWorkflowExecution.scala
│ │ └── graph
│ │ │ ├── EtlGraph.scala
│ │ │ ├── EtlGraphException.scala
│ │ │ ├── EtlGraphImpl.scala
│ │ │ ├── EtlGraphParser.scala
│ │ │ └── EtlStreamFactory.scala
│ │ └── module
│ │ ├── RdpJdbcModule.scala
│ │ └── RdpModule.scala
│ └── test
│ └── resources
│ ├── application.conf
│ ├── logback-test.xml
│ └── mass
│ └── core
│ └── workflow
│ └── etl
│ └── EtlWorkflowTest.xml
├── mass-rdi
└── src
│ ├── main
│ ├── resources
│ │ └── reference.conf
│ └── scala
│ │ └── mass
│ │ └── rdp
│ │ └── boot
│ │ └── RdpMain.scala
│ └── test
│ └── scala
│ └── mass
│ └── workflow
│ └── etl
│ ├── EtlSchedulerWorkflowTest.scala
│ ├── EtlWorkflowTest.scala
│ └── TestStub.scala
├── project
├── Commons.scala
├── Dependencies.scala
├── build.properties
├── plugins.sbt
└── project-info.conf
├── sbt
├── sbt-dist
├── bin
│ ├── sbt
│ ├── sbt-launch-lib.bash
│ ├── sbt-launch.jar
│ └── sbt.bat
└── conf
│ ├── sbtconfig.txt
│ └── sbtopts
├── sbt.bat
├── scripts
├── generate-doc.sh
├── publish-doc.sh
└── software
│ ├── dameng
│ ├── Dockerfile
│ ├── Dockerfile-dameng
│ ├── auto_install.xml
│ ├── docker-entrypoint.sh
│ └── init.sql
│ ├── mysql
│ ├── Dockerfile
│ ├── init.sql
│ └── mysql.cnf
│ ├── postgres
│ ├── .pgpass
│ ├── Dockerfile
│ ├── init.sh
│ ├── init.sql
│ ├── job.sql
│ └── workflow.sql
│ └── sqlserver
│ ├── Dockerfile
│ └── init.sql
└── version.sbt
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.scala eol=lf
2 | *.java eol=lf
3 | *.txt eol=lf
4 | *.md eol=lf
5 | *.conf eol=lf
6 | *.json eol=lf
7 | *.js eol=lf
8 | *.css eol=lf
9 | *.jsx eol=lf
10 | *.vue eol=lf
11 | *.xml eol=lf
12 | *.sbt eol=lf
13 | *.less eol=lf
14 | *.yml eol=lf
15 | *.html eol=lf
16 | *.svg eol=lf
17 |
--------------------------------------------------------------------------------
/.scalafmt.conf:
--------------------------------------------------------------------------------
1 | version = 2.2.2
2 | style = defaultWithAlign
3 | lineEndings = unix
4 | encoding = "UTF-8"
5 | project.git = true
6 | docstrings = JavaDoc
7 | maxColumn = 120
8 | indentOperator = spray
9 | unindentTopLevelOperators = true
10 | align.tokens = [{code = "=>", owner = "Case"}]
11 | align.openParenDefnSite = false
12 | align.openParenCallSite = false
13 | optIn.breakChainOnFirstMethodDot = false
14 | optIn.configStyleArguments = false
15 | danglingParentheses = false
16 | spaces.inImportCurlyBraces = true
17 | rewrite.neverInfix.excludeFilters = [
18 | and
19 | min
20 | max
21 | until
22 | to
23 | by
24 | eq
25 | ne
26 | "should.*"
27 | "contain.*"
28 | "must.*"
29 | in
30 | ignore
31 | be
32 | taggedAs
33 | thrownBy
34 | synchronized
35 | have
36 | when
37 | size
38 | only
39 | noneOf
40 | oneElementOf
41 | noElementsOf
42 | atLeastOneElementOf
43 | atMostOneElementOf
44 | allElementsOf
45 | inOrderElementsOf
46 | theSameElementsAs
47 | ]
48 |
--------------------------------------------------------------------------------
/CNAME:
--------------------------------------------------------------------------------
1 | www.yangbajing.me
--------------------------------------------------------------------------------
/docs/ETL功能.docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/ETL功能.docx
--------------------------------------------------------------------------------
/docs/changelogs/2018.md:
--------------------------------------------------------------------------------
1 | # Change Logs
2 |
3 | ### 07-09 ~ 13
4 |
5 | - [*] 适配 达梦数据库
6 | - [] 完善任务调度与工作流:配置数据可持久化
7 | - 任务调度引擎(进程、可扫描配置来调度任务)
8 | - 一个初版可用的rdp-cli:可向任务调度引擎提交任务,可手动执行任务
9 | - ? 设计消息(日志)流转方案,为监控、统计做准备
10 |
11 | 可通过命令行调用的方式实现完整的ETL + Workflow + Scheduler 功能。
12 |
13 | ### 07-06
14 |
15 | ETL工作流与任务调度集成跑通。
16 |
17 | ### 07-05
18 |
19 | 实现任务调度功能,包含:时间间隔和日历时间两种调度方式
20 |
--------------------------------------------------------------------------------
/docs/doc/css/fonts/icons.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/css/fonts/icons.eot
--------------------------------------------------------------------------------
/docs/doc/css/fonts/icons.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/css/fonts/icons.ttf
--------------------------------------------------------------------------------
/docs/doc/css/fonts/icons.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/css/fonts/icons.woff
--------------------------------------------------------------------------------
/docs/doc/css/fonts/icons.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/css/fonts/icons.woff2
--------------------------------------------------------------------------------
/docs/doc/css/fonts/proxima-nova-bold.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/css/fonts/proxima-nova-bold.eot
--------------------------------------------------------------------------------
/docs/doc/css/fonts/proxima-nova-bold.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/css/fonts/proxima-nova-bold.ttf
--------------------------------------------------------------------------------
/docs/doc/css/fonts/proxima-nova-bold.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/css/fonts/proxima-nova-bold.woff
--------------------------------------------------------------------------------
/docs/doc/css/fonts/proxima-nova-regular.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/css/fonts/proxima-nova-regular.eot
--------------------------------------------------------------------------------
/docs/doc/css/fonts/proxima-nova-regular.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/css/fonts/proxima-nova-regular.ttf
--------------------------------------------------------------------------------
/docs/doc/css/fonts/proxima-nova-regular.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/css/fonts/proxima-nova-regular.woff
--------------------------------------------------------------------------------
/docs/doc/css/fonts/source-code-pro-regular.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/css/fonts/source-code-pro-regular.eot
--------------------------------------------------------------------------------
/docs/doc/css/fonts/source-code-pro-regular.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/css/fonts/source-code-pro-regular.ttf
--------------------------------------------------------------------------------
/docs/doc/css/fonts/source-code-pro-regular.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/css/fonts/source-code-pro-regular.woff
--------------------------------------------------------------------------------
/docs/doc/css/icons.css:
--------------------------------------------------------------------------------
1 | @font-face {
2 | font-family: 'icons';
3 | font-weight: normal;
4 | font-style: normal;
5 | src: url('fonts/icons.eot?2');
6 | src: url('fonts/icons.eot?2#iefix') format('embedded-opentype'),
7 | url('fonts/icons.woff2?2') format('woff2'),
8 | url('fonts/icons.woff?2') format('woff'),
9 | url('fonts/icons.ttf?2') format('truetype'),
10 | url('fonts/icons.svg?2#icons') format('svg');
11 | }
12 |
13 | .icon-prev:before,
14 | .icon-next:before,
15 | .icon-up:before,
16 | .icon-down:before,
17 | .icon-link:before {
18 | font-family: "icons";
19 | font-style: normal;
20 | font-weight: normal;
21 | speak: none;
22 | display: inline-block;
23 | text-decoration: inherit;
24 | width: 1em;
25 | margin-right: .2em;
26 | text-align: center;
27 | font-variant: normal;
28 | text-transform: none;
29 | line-height: 1em;
30 | -webkit-font-smoothing: antialiased;
31 | -moz-osx-font-smoothing: grayscale;
32 | }
33 |
34 | .icon-prev:before { content: '\3c'; } /* '<' */
35 | .icon-next:before { content: '\3e'; } /* '>' */
36 | .icon-up:before { content: '\5e'; } /* '^' */
37 | .icon-down:before { content: '\76'; } /* 'v' */
38 | .icon-link:before { content: '\a7'; } /* '§' */
39 |
--------------------------------------------------------------------------------
/docs/doc/images/akka-icon-reverse.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/docs/doc/images/akka-icon.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/docs/doc/images/akka-logo-reverse.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/doc/images/android-chrome-192x192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/images/android-chrome-192x192.png
--------------------------------------------------------------------------------
/docs/doc/images/android-chrome-512x512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/images/android-chrome-512x512.png
--------------------------------------------------------------------------------
/docs/doc/images/apple-touch-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/images/apple-touch-icon.png
--------------------------------------------------------------------------------
/docs/doc/images/favicon-16x16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/images/favicon-16x16.png
--------------------------------------------------------------------------------
/docs/doc/images/favicon-32x32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/images/favicon-32x32.png
--------------------------------------------------------------------------------
/docs/doc/images/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/images/favicon.ico
--------------------------------------------------------------------------------
/docs/doc/images/footer-background.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/images/footer-background.jpg
--------------------------------------------------------------------------------
/docs/doc/images/header-background.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/images/header-background.jpg
--------------------------------------------------------------------------------
/docs/doc/images/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Akka",
3 | "icons": [
4 | {
5 | "src": "android-chrome-192x192.png",
6 | "sizes": "192x192",
7 | "type": "image/png"
8 | },
9 | {
10 | "src": "android-chrome-512x512.png",
11 | "sizes": "512x512",
12 | "type": "image/png"
13 | }
14 | ],
15 | "theme_color": "#15a9ce",
16 | "background_color": "#ffffff",
17 | "display": "standalone"
18 | }
19 |
--------------------------------------------------------------------------------
/docs/doc/images/mstile-150x150.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/images/mstile-150x150.png
--------------------------------------------------------------------------------
/docs/doc/js/magellan.js:
--------------------------------------------------------------------------------
1 | $(function() {
2 |
3 | // add magellan targets to anchor headers, for h1 and h2
4 | $("a.anchor").each(function() {
5 | var anchor = $(this);
6 | var name = anchor.attr("name");
7 | var header = anchor.parent();
8 | header.attr("id", name);
9 | if (header.is("h1") || header.is("h2")) {
10 | header.attr("data-magellan-target", name);
11 | }
12 | });
13 |
14 | // enable magellan plugin on the active page header links in the navigation
15 | var nav = $(".site-nav a.active.page").parent("li");
16 | if (nav.length > 0) {
17 | // strip navigation links down to just the hash fragment
18 | nav.find("a.active.page, a.header").attr('href', function(_, current){
19 | return this.hash ? this.hash : current;
20 | });
21 | new Foundation.Magellan(nav);
22 | }
23 |
24 | });
25 |
--------------------------------------------------------------------------------
/docs/doc/js/metadata-toggle.js:
--------------------------------------------------------------------------------
1 | $(function() {
2 | var hiddenText = "[+] Show project info",
3 | shownText = "[-] Hide project info",
4 | toggle = $('' + hiddenText + ''),
5 | hidden = true,
6 | infotable = $('table.project-info')
7 |
8 | toggle.insertBefore(infotable)
9 | toggle.on("click", function(event) {
10 | if (hidden) {
11 | infotable.css("display", "block")
12 | toggle.text(shownText)
13 | hidden = false
14 | } else {
15 | infotable.css("display", "none")
16 | toggle.text(hiddenText)
17 | hidden = true
18 | }
19 | })
20 | })
--------------------------------------------------------------------------------
/docs/doc/js/page.js:
--------------------------------------------------------------------------------
1 | $(function() {
2 |
3 | // close the overlay navigation when header links are clicked
4 | $(".overlay-nav .nav-toc a.header, .overlay-nav .nav-toc a.active.page").attr("data-toggle", "underlay overlay");
5 |
6 | // TOCs support three styles:
7 | // - box: wrap in a shadowed box and apply TOC styling
8 | // - blocks: section TOCs in boxes in a block grid with equal heights
9 | // - list: regular list of links
10 | var tocs = $(".page-content .toc");
11 | tocs.each(function() {
12 | var toc = $(this);
13 | // if there's no style already set then add .box for TOCs of depth 1 otherwise .blocks
14 | if (!(toc.hasClass("blocks") || toc.hasClass("box") || toc.hasClass("list"))) {
15 | toc.addClass((toc.children("ul").children("li").has("ul").length == 0) ? "box" : "blocks");
16 | }
17 | if (toc.hasClass("blocks")) {
18 | var list = toc.children("ul");
19 | list.addClass("row medium-up-2 large-up-3 toc-grid");
20 | list.children("li").addClass("column column-block toc-block").attr("data-equalizer-watch", "").wrapInner("
");
21 | new Foundation.Equalizer(list, { equalizeByRow: true, equalizeOn: "medium" });
22 | } else if (toc.hasClass("box")) {
23 | toc.wrapInner("");
24 | }
25 | });
26 | });
27 |
--------------------------------------------------------------------------------
/docs/doc/lib/prettify/lang-scala.js:
--------------------------------------------------------------------------------
1 | PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\r \xa0]+/,null,"\t\n\r \u00a0"],["str",/^"(?:""(?:""?(?!")|[^"\\]|\\.)*"{0,3}|(?:[^\n\r"\\]|\\.)*"?)/,null,'"'],["lit",/^`(?:[^\n\r\\`]|\\.)*`?/,null,"`"],["pun",/^[!#%&(--:-@[-^{-~]+/,null,"!#%&()*+,-:;<=>?@[\\]^{|}~"]],[["str",/^'(?:[^\n\r'\\]|\\(?:'|[^\n\r']+))'/],["lit",/^'[$A-Z_a-z][\w$]*(?![\w$'])/],["kwd",/^(?:abstract|case|catch|class|def|do|else|extends|final|finally|for|forSome|if|implicit|import|lazy|match|new|object|override|package|private|protected|requires|return|sealed|super|throw|trait|try|type|val|var|while|with|yield)\b/],
2 | ["lit",/^(?:true|false|null|this)\b/],["lit",/^(?:0(?:[0-7]+|x[\da-f]+)l?|(?:0|[1-9]\d*)(?:(?:\.\d+)?(?:e[+-]?\d+)?f?|l?)|\\.\d+(?:e[+-]?\d+)?f?)/i],["typ",/^[$_]*[A-Z][\d$A-Z_]*[a-z][\w$]*/],["pln",/^[$A-Z_a-z][\w$]*/],["com",/^\/(?:\/.*|\*(?:\/|\**[^*/])*(?:\*+\/?)?)/],["pun",/^(?:\.+|\/)/]]),["scala"]);
3 |
--------------------------------------------------------------------------------
/docs/doc/lib/prettify/prettify.css:
--------------------------------------------------------------------------------
1 | .pln{color:#000}@media screen{.str{color:#080}.kwd{color:#008}.com{color:#800}.typ{color:#606}.lit{color:#066}.pun,.opn,.clo{color:#660}.tag{color:#008}.atn{color:#606}.atv{color:#080}.dec,.var{color:#606}.fun{color:red}}@media print,projection{.str{color:#060}.kwd{color:#006;font-weight:bold}.com{color:#600;font-style:italic}.typ{color:#404;font-weight:bold}.lit{color:#044}.pun,.opn,.clo{color:#440}.tag{color:#006;font-weight:bold}.atn{color:#404}.atv{color:#060}}pre.prettyprint{padding:2px;border:1px solid #888}ol.linenums{margin-top:0;margin-bottom:0}li.L0,li.L1,li.L2,li.L3,li.L5,li.L6,li.L7,li.L8{list-style-type:none}li.L1,li.L3,li.L5,li.L7,li.L9{background:#eee}
--------------------------------------------------------------------------------
/docs/doc/paradox.json:
--------------------------------------------------------------------------------
1 | {
2 | "name" : "massData",
3 | "version" : "1.0.0-SNAPSHOT"
4 | }
--------------------------------------------------------------------------------
/docs/doc/rdi/img/Reactive-Flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/rdi/img/Reactive-Flow.png
--------------------------------------------------------------------------------
/docs/doc/spec/business_spec/job.json:
--------------------------------------------------------------------------------
1 | {
2 | "jobName": "",
3 | "jobVersion": "1.0.0",
4 | "jobDescription": "",
5 | "tasks": [
6 | {
7 | "taskId": "task1",
8 | "nextTask": "task2",
9 | "source": {
10 | "sourceId": "数据流ID:custom, jdbc, ftp, sftp",
11 | "properties": [
12 | {
13 | "key": "jdbcUrl",
14 | "value": "jdbc:postgres://localhost/system"
15 | }
16 | ],
17 | "className": "单sourceId为custom时,通过 className 指定数据流实现类全路径"
18 | },
19 | "stages": [
20 | {
21 | "stageId": "组件ID:custom, csv, jdbcRow",
22 | "properties": [
23 | {
24 | "key": "",
25 | "value": ""
26 | }
27 | ],
28 | "className": "单 stageId 为custom时,通过 className 指定组件类全路径",
29 | // 不同的组件有不同的配置
30 | "configure": {}
31 | }
32 | ]
33 | },
34 | {
35 | "taskId": "task2"
36 | }
37 | ]
38 | }
--------------------------------------------------------------------------------
/docs/doc/spec/business_spec/job.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | job1
4 | job1 name
5 | 1.0.0
6 | job1 description
7 |
8 |
9 |
10 |
11 |
12 | jdbcUrl
13 | jdbc:postgres://localhost/system
14 |
15 |
16 | 单sourceId为custom时,通过 className 指定数据流实现类全路径
17 |
18 |
19 |
20 |
21 |
22 | splitter
23 | \t
24 |
25 |
26 | 单 stageId 为custom时,通过 className 指定组件类全路径
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 | ....
39 |
40 |
--------------------------------------------------------------------------------
/docs/doc/spec/business_spec/rowFieldConfigure.json:
--------------------------------------------------------------------------------
1 | {
2 | "fields": [{
3 | "field": "字段名",
4 | "dataType": "期望的数据类型",
5 | // 数据类型不匹配时的转换函数
6 | "dataTypeConverter": {},
7 | // 字段值进行转换
8 | "transfer": {}
9 | }]
10 | }
--------------------------------------------------------------------------------
/docs/doc/static/JobTaskStageFlow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/static/JobTaskStageFlow.png
--------------------------------------------------------------------------------
/docs/doc/static/RDPTechArchitecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/static/RDPTechArchitecture.png
--------------------------------------------------------------------------------
/docs/doc/static/SeaBrokerArchitecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/docs/doc/static/SeaBrokerArchitecture.png
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Mass Data(海量数据资产管理平台)
6 |
7 |
8 |
11 |
12 |
--------------------------------------------------------------------------------
/example/src/main/scala/example/BalancingPoolDemo.scala:
--------------------------------------------------------------------------------
1 | package example
2 |
3 | import akka.actor.{ Actor, ActorLogging, ActorRef, ActorSystem, Props }
4 | import example.Worker.FibonacciNumber
5 |
6 | import scala.annotation.tailrec
7 | import scala.concurrent.duration._
8 | import scala.io.StdIn
9 |
10 | object Worker {
11 | case class FibonacciNumber(nbr: Int, delay: FiniteDuration)
12 |
13 | case class GetResult(nr: Int, source: ActorRef)
14 |
15 | def props: Props = Props(new Worker)
16 | }
17 |
18 | class Worker extends Actor with ActorLogging {
19 | import Worker._
20 | import context.dispatcher
21 |
22 | override def preStart(): Unit =
23 | log.info(s"$self started")
24 |
25 | override def postStop(): Unit =
26 | log.info(s"$self stopped")
27 |
28 | override def receive: Receive = {
29 | case FibonacciNumber(nr, delay) =>
30 | context.system.scheduler.scheduleOnce(delay, self, GetResult(nr, sender()))
31 |
32 | case GetResult(nr, source) =>
33 | val result = fibonacci(nr)
34 | log.info(s"$nr! = $result")
35 | }
36 |
37 | private def fibonacci(n: Int): Int = {
38 | @tailrec
39 | def fib(n: Int, b: Int, a: Int): Int = n match {
40 | case 0 => a
41 | case _ =>
42 | fib(n - 1, a + b, b)
43 | }
44 | fib(n, 1, 0)
45 | }
46 | }
47 |
48 | object BalancingPoolDemo extends App {
49 | implicit val system = ActorSystem()
50 |
51 | val worker = system.actorOf(Worker.props, "worker")
52 | worker ! FibonacciNumber(50, 50.millis)
53 | worker ! FibonacciNumber(33, 50.millis)
54 | worker ! FibonacciNumber(68, 50.millis)
55 | worker ! FibonacciNumber(53, 50.millis)
56 | worker ! FibonacciNumber(45, 50.millis)
57 |
58 | StdIn.readLine()
59 | system.terminate()
60 | }
61 |
--------------------------------------------------------------------------------
/example/src/main/scala/example/akkastream/KillSwitch.scala:
--------------------------------------------------------------------------------
1 | package example.akkastream
2 |
3 | import akka.actor.ActorSystem
4 | import akka.stream.scaladsl.{ Keep, Sink, Source }
5 | import akka.stream.{ ActorMaterializer, DelayOverflowStrategy, KillSwitches }
6 | import com.typesafe.scalalogging.StrictLogging
7 |
8 | import scala.concurrent.duration._
9 | import scala.io.StdIn
10 |
11 | object KillSwitch extends App with StrictLogging {
12 | implicit val system = ActorSystem()
13 | implicit val mat = ActorMaterializer()
14 | import system.dispatcher
15 |
16 | val countingSrc =
17 | Source(Stream.from(1)).delay(1.second, DelayOverflowStrategy.backpressure)
18 |
19 | countingSrc.runForeach(i => logger.info(s"run: $i"))
20 |
21 | val lastSnk = Sink.last[Int]
22 |
23 | val (killSwitch, last) = countingSrc.viaMat(KillSwitches.single)(Keep.right).toMat(lastSnk)(Keep.both).run()
24 |
25 | Thread.sleep(7000)
26 |
27 | killSwitch.shutdown()
28 |
29 | last.foreach(println)
30 |
31 | StdIn.readLine()
32 | system.terminate()
33 | }
34 |
--------------------------------------------------------------------------------
/example/src/main/scala/example/akkastream/basic/ActorMaterializerLifecycleDemo.scala:
--------------------------------------------------------------------------------
1 | package example.akkastream.basic
2 |
3 | import akka.actor.{ Actor, ActorSystem, Props }
4 | import akka.stream.{ ActorMaterializer, Materializer }
5 | import akka.stream.scaladsl.Sink
6 | import akka.stream.scaladsl.Source
7 |
8 | import scala.io.StdIn
9 | import scala.util.{ Failure, Success }
10 |
11 | /**
12 | * ActorMaterializer由actor上下文创建,actor退出则流退出。
13 | */
14 | class RunWithItself extends Actor {
15 | implicit val mat = ActorMaterializer()
16 |
17 | Source.maybe.runWith(Sink.onComplete {
18 | case Success(done) => println(s"$self Complated: $done")
19 | case Failure(e) => println(s"$self Failed: ${e.getMessage}")
20 | })
21 |
22 | override def receive: Receive = {
23 | case "boom" => context.stop(self)
24 | }
25 | }
26 |
27 | class RunForever(implicit val mat: Materializer) extends Actor {
28 | Source.maybe.runWith(Sink.onComplete {
29 | case Success(done) => println(s"$self Complated: $done")
30 | case Failure(e) => println(s"$self Failed: ${e.getMessage}")
31 | })
32 |
33 | override def receive: Receive = {
34 | case "boom" => context.stop(self)
35 | }
36 | }
37 |
38 | object ActorMaterializerLifecycleDemo extends App {
39 | implicit val system = ActorSystem()
40 | implicit val mat = ActorMaterializer()
41 |
42 | system.actorOf(Props[RunWithItself], "with-itself") ! "boom"
43 | val runForever = system.actorOf(Props(new RunForever), "run-forever")
44 | // Thread.sleep(100)
45 | // mat.shutdown()
46 | // Thread.sleep(200)
47 | runForever ! "boom"
48 |
49 | StdIn.readLine()
50 | system.terminate()
51 | }
52 |
--------------------------------------------------------------------------------
/example/src/main/scala/example/akkastream/basic/Graph1.scala:
--------------------------------------------------------------------------------
1 | package example.akkastream.basic
2 |
3 | import akka.NotUsed
4 | import akka.actor.ActorSystem
5 | import akka.stream.{ ActorMaterializer, ClosedShape }
6 | import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source }
7 |
8 | import scala.collection.immutable
9 | import scala.io.StdIn
10 |
11 | object Graph1 extends App {
12 | implicit val system = ActorSystem()
13 | implicit val mat = ActorMaterializer()
14 |
15 | val graph = g(1 to 2)
16 |
17 | graph.run()
18 |
19 | StdIn.readLine()
20 | system.terminate()
21 |
22 | def g(data: immutable.Iterable[Int]) =
23 | RunnableGraph.fromGraph(GraphDSL.create() { implicit b: GraphDSL.Builder[NotUsed] =>
24 | import GraphDSL.Implicits._
25 | val in = Source(data)
26 | val out = Sink.foreach(println)
27 |
28 | val bcast = b.add(Broadcast[Int](2))
29 | val merge = b.add(Merge[Int](2))
30 |
31 | val f1, f2, f3, f4 = Flow[Int].map(_ + 10)
32 |
33 | in ~> f1 ~> bcast ~> f2 ~> merge ~> f3 ~> out
34 | bcast ~> f4 ~> merge
35 |
36 | ClosedShape
37 | })
38 | }
39 |
--------------------------------------------------------------------------------
/example/src/main/scala/example/akkastream/basic/Graph2.scala:
--------------------------------------------------------------------------------
1 | package example.akkastream.basic
2 |
3 | import akka.actor.ActorSystem
4 | import akka.stream.{ ActorMaterializer, ClosedShape }
5 | import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, RunnableGraph, Sink, Source }
6 |
7 | import scala.io.StdIn
8 |
9 | object Graph2 extends App {
10 | implicit val system = ActorSystem()
11 | implicit val mat = ActorMaterializer()
12 | import system.dispatcher
13 |
14 | val topHeadSink = Sink.head[Int]
15 | val bottomHeadSink = Sink.head[Int]
16 | val sharedDoubler = Flow[Int].map(_ * 2)
17 |
18 | val g = RunnableGraph.fromGraph(GraphDSL.create(topHeadSink, bottomHeadSink)((_, _)) {
19 | implicit builder => (topHS, bottomHS) =>
20 | import GraphDSL.Implicits._
21 |
22 | val broadcast = builder.add(Broadcast[Int](2))
23 | Source.single(1) ~> broadcast.in
24 |
25 | broadcast ~> sharedDoubler ~> topHS.in
26 | broadcast ~> sharedDoubler ~> bottomHS.in
27 |
28 | ClosedShape
29 | })
30 |
31 | val (topF, bottomF) = g.run()
32 | topF.foreach(v => println(s"top is $v"))
33 | bottomF.foreach(v => println(s"bottom is $v"))
34 |
35 | StdIn.readLine()
36 | system.terminate()
37 | }
38 |
--------------------------------------------------------------------------------
/example/src/main/scala/example/akkastream/basic/Graph3.scala:
--------------------------------------------------------------------------------
1 | package example.akkastream.basic
2 |
3 | import akka.actor.ActorSystem
4 | import akka.stream.{ ActorMaterializer, ClosedShape }
5 | import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Keep, RunnableGraph, Sink, Source }
6 |
7 | import scala.collection.immutable
8 | import scala.concurrent.Future
9 | import scala.io.StdIn
10 |
11 | object Graph3 extends App {
12 | implicit val system = ActorSystem()
13 | implicit val mat = ActorMaterializer()
14 | import system.dispatcher
15 |
16 | val sinks: immutable.Seq[Sink[String, Future[String]]] =
17 | List("a", "b", "c").map(prefix =>
18 | Flow[String].filter(str => str.startsWith(prefix)).toMat(Sink.head[String])(Keep.right))
19 |
20 | val g = RunnableGraph.fromGraph(GraphDSL.create(sinks) { implicit b => sinkList =>
21 | import GraphDSL.Implicits._
22 |
23 | val broadcast = b.add(Broadcast[String](sinkList.size))
24 |
25 | Source(List("ax", "bx", "cx")) ~> broadcast
26 | sinkList.foreach(sink => broadcast ~> sink)
27 |
28 | ClosedShape
29 | })
30 |
31 | val matList: immutable.Seq[Future[String]] = g.run()
32 |
33 | Future.sequence(matList).foreach(println)
34 |
35 | StdIn.readLine()
36 | system.terminate()
37 | }
38 |
--------------------------------------------------------------------------------
/example/src/main/scala/example/akkastream/basic/PartialGraph.scala:
--------------------------------------------------------------------------------
1 | package example.akkastream.basic
2 |
3 | import akka.actor.ActorSystem
4 | import akka.stream.scaladsl.{ GraphDSL, RunnableGraph, Sink, Source, ZipWith }
5 | import akka.stream.{ ActorMaterializer, ClosedShape, UniformFanInShape }
6 |
7 | import scala.concurrent.Await
8 | import scala.concurrent.duration._
9 | import scala.io.StdIn
10 |
11 | object PartialGraph extends App {
12 | implicit val system = ActorSystem()
13 | implicit val mat = ActorMaterializer()
14 |
15 | val pickMaxOfThree = GraphDSL.create() { implicit b =>
16 | import GraphDSL.Implicits._
17 |
18 | // ZipWith 最后一个泛型是输出参数类型。
19 | val zip1 = b.add(ZipWith[Int, Int, Int](math.max))
20 | val zip2 = b.add(ZipWith[Int, Int, Int](math.max))
21 | zip1.out ~> zip2.in0
22 | UniformFanInShape(zip2.out, zip1.in0, zip1.in1, zip2.in1)
23 | }
24 |
25 | val resultSink = Sink.head[Int]
26 |
27 | val g = RunnableGraph.fromGraph(GraphDSL.create(resultSink) { implicit b => sink =>
28 | import GraphDSL.Implicits._
29 |
30 | val pm3 = b.add(pickMaxOfThree)
31 |
32 | Source.single(4) ~> pm3.in(0)
33 | Source.single(2) ~> pm3.in(1)
34 | Source.single(3) ~> pm3.in(2)
35 | pm3.out ~> sink.in
36 |
37 | ClosedShape
38 | })
39 |
40 | val result = Await.result(g.run, 300.millis)
41 | println(s"result: $result")
42 |
43 | StdIn.readLine()
44 | system.terminate()
45 | }
46 |
--------------------------------------------------------------------------------
/example/src/main/scala/example/akkastream/basic/PartialGraph2.scala:
--------------------------------------------------------------------------------
1 | package example.akkastream.basic
2 |
3 | import akka.NotUsed
4 | import akka.actor.ActorSystem
5 | import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Sink, Source, Zip }
6 | import akka.stream.{ ActorMaterializer, FlowShape, SourceShape }
7 |
8 | import scala.io.StdIn
9 |
10 | object PartialGraph2 extends App {
11 | implicit val system = ActorSystem()
12 | implicit val mat = ActorMaterializer()
13 | import system.dispatcher
14 |
15 | val pairs: Source[(Int, Int), NotUsed] = Source.fromGraph(GraphDSL.create() { implicit b =>
16 | import GraphDSL.Implicits._
17 |
18 | // prepare graph elements
19 | val zip = b.add(Zip[Int, Int]())
20 |
21 | def ints = Source.fromIterator(() => Iterator.from(1))
22 |
23 | // connect the graph
24 | ints.filter(_ % 2 != 0) ~> zip.in0
25 | ints.filter(_ % 2 == 0) ~> zip.in1
26 |
27 | // expose port
28 | SourceShape(zip.out)
29 | })
30 |
31 | val firstPair = pairs.runWith(Sink.head)
32 | firstPair.foreach(println)
33 |
34 | val pairUpWithToString = Flow.fromGraph(GraphDSL.create() { implicit b =>
35 | import GraphDSL.Implicits._
36 | val broadcast = b.add(Broadcast[Int](2))
37 | val zip = b.add(Zip[Int, String]())
38 |
39 | broadcast.out(0) /*.map(identity)*/ ~> zip.in0
40 | broadcast.out(1).map(_.toString) ~> zip.in1
41 |
42 | FlowShape(broadcast.in, zip.out)
43 | })
44 |
45 | Source(List(1)).via(pairUpWithToString).runWith(Sink.head).foreach(println)
46 |
47 | StdIn.readLine()
48 | system.terminate()
49 | }
50 |
--------------------------------------------------------------------------------
/example/src/main/scala/example/akkastream/basic/SimplifiedAPI.scala:
--------------------------------------------------------------------------------
1 | package example.akkastream.basic
2 |
3 | import akka.actor.{ Actor, ActorSystem, Props }
4 | import akka.stream.ActorMaterializer
5 | import akka.stream.scaladsl.{ Broadcast, Merge, Sink, Source }
6 |
7 | import scala.io.StdIn
8 |
9 | class Remotely extends Actor {
10 | override def receive: Receive = {
11 | case value => println(s"receive: $value")
12 | }
13 | }
14 |
15 | object SimplifiedAPI extends App {
16 | implicit val system = ActorSystem()
17 | implicit val mat = ActorMaterializer()
18 | import system.dispatcher
19 |
20 | val merged = Source.combine(Source(List(1)), Source(List(2)))(Merge(_))
21 | val mergedResult = merged.runWith(Sink.fold(0)(_ + _))
22 | mergedResult.foreach(println)
23 |
24 | val sendRemotely =
25 | Sink.actorRef(system.actorOf(Props[Remotely], "remotely"), "Done")
26 | val localProcessing = Sink.foreach[Int](v => println(s"foreach($v)"))
27 | Source(List(0, 1, 1)).runWith(Sink.combine(sendRemotely, localProcessing)(strategy => Broadcast[Int](strategy)))
28 |
29 | StdIn.readLine()
30 | system.terminate()
31 | }
32 |
--------------------------------------------------------------------------------
/example/src/main/scala/example/akkastream/buffer/BufferExample.scala:
--------------------------------------------------------------------------------
1 | package example.akkastream.buffer
2 |
3 | import akka.actor.ActorSystem
4 | import akka.stream.ActorMaterializer
5 | import akka.stream.scaladsl.{ Sink, Source }
6 |
7 | import scala.io.StdIn
8 |
9 | object BufferExample extends App {
10 | implicit val system = ActorSystem()
11 | implicit val mat = ActorMaterializer()
12 |
13 | Source(1 to 3)
14 | .map { i =>
15 | println(s"A: $i"); i
16 | }
17 | .async
18 | .map { i =>
19 | println(s"B: $i"); i
20 | }
21 | .async
22 | .map { i =>
23 | println(s"C: $i"); i
24 | }
25 | .async
26 | .runWith(Sink.ignore)
27 |
28 | Thread.sleep(1000)
29 | println("------------------------------------")
30 | Source(1 to 3)
31 | .map { i =>
32 | println(s"A: $i"); i
33 | }
34 | .map { i =>
35 | println(s"B: $i"); i
36 | }
37 | .map { i =>
38 | println(s"C: $i"); i
39 | }
40 | .runWith(Sink.ignore)
41 |
42 | StdIn.readLine()
43 | system.terminate()
44 | }
45 |
--------------------------------------------------------------------------------
/example/src/main/scala/example/akkastream/buffer/BufferProblem.scala:
--------------------------------------------------------------------------------
1 | package example.akkastream.buffer
2 | import akka.actor.ActorSystem
3 | import akka.stream.scaladsl.{ GraphDSL, RunnableGraph, Sink, Source, ZipWith }
4 | import akka.stream.{ ActorMaterializer, Attributes, ClosedShape }
5 |
6 | import scala.concurrent.duration._
7 | import scala.io.StdIn
8 |
9 | object BufferProblem extends App {
10 | implicit val system = ActorSystem()
11 | implicit val mat = ActorMaterializer()
12 |
13 | case class Tick()
14 |
15 | val g = RunnableGraph.fromGraph(GraphDSL.create() { implicit b =>
16 | import akka.stream.scaladsl.GraphDSL.Implicits._
17 |
18 | // this is the asynchronous stage in this graph
19 | val zipper =
20 | b.add(ZipWith[Tick, Int, Int]((tick, count) => count).async.addAttributes(Attributes.inputBuffer(1, 1)))
21 | // 用默认缓冲区设置时将只打印 1
22 | // val zipper = b.add(ZipWith[Tick, Int, Int]((tick, count) => count).async)
23 |
24 | Source.tick(initialDelay = 3.second, interval = 3.second, Tick()) ~> zipper.in0
25 |
26 | Source
27 | .tick(initialDelay = 1.second, interval = 1.second, "message!")
28 | .conflateWithSeed(seed = (_) => 1)((count, _) => count + 1) ~> zipper.in1
29 |
30 | zipper.out ~> Sink.foreach(println)
31 | ClosedShape
32 | })
33 |
34 | g.run()
35 |
36 | StdIn.readLine()
37 | system.terminate()
38 | }
39 |
--------------------------------------------------------------------------------
/example/src/main/scala/example/akkastream/buffer/ExtrapolateExpand.scala:
--------------------------------------------------------------------------------
1 | package example.akkastream.buffer
2 |
3 | import akka.actor.ActorSystem
4 | import akka.stream.ActorMaterializer
5 | import akka.stream.scaladsl.{ Flow, Source }
6 |
7 | import scala.io.StdIn
8 |
9 | object ExtrapolateExpand extends App {
10 | implicit val system = ActorSystem()
11 | implicit val mat = ActorMaterializer()
12 |
13 | // val lastFlow = Flow[Double].extrapolate(Iterator.continually(_))
14 | // Source((1 to 10).map(_.toDouble)).via(lastFlow).runWith(Sink.foreach(println))
15 |
16 | // val initial = 2.0
17 | // val seedFlow = Flow[Double].extrapolate(Iterator.continually(_), Some(initial))
18 | // Source((1 to 10).map(_.toDouble)).via(seedFlow).runWith(Sink.foreach(println))
19 |
20 | // val driftFlow = Flow[Double].map(_ -> 0).extrapolate[(Double, Int)] { case (i, _) => Iterator.from(1).map(i -> _) }
21 | // Source((1 to 10).map(_.toDouble)).via(driftFlow).runForeach(println)
22 |
23 | val driftFlow = Flow[Double].expand(i => Iterator.from(0).map(i -> _))
24 | Source((1 to 10).map(_.toDouble)).via(driftFlow).runForeach(println)
25 |
26 | StdIn.readLine()
27 | system.terminate()
28 | }
29 |
--------------------------------------------------------------------------------
/example/src/main/scala/example/akkastream/dynamichub/MergeHubDemo.scala:
--------------------------------------------------------------------------------
1 | package example.akkastream.dynamichub
2 |
3 | import akka.NotUsed
4 | import akka.actor.ActorSystem
5 | import akka.stream.ActorMaterializer
6 | import akka.stream.scaladsl.{ MergeHub, RunnableGraph, Sink, Source }
7 | import com.typesafe.scalalogging.StrictLogging
8 |
9 | import scala.io.StdIn
10 |
11 | object MergeHubDemo extends App with StrictLogging {
12 | implicit val system = ActorSystem()
13 | implicit val mat = ActorMaterializer()
14 |
15 | // A simple consumer that will print to the console for now
16 | val consumer = Sink.foreach[String](v => logger.info(s"consumer: $v"))
17 |
18 | // Attach a MergeHub Source to the consumer. This will materialize to a
19 | // corresponding Sink.
20 | val runnableGraph: RunnableGraph[Sink[String, NotUsed]] =
21 | MergeHub.source[String](perProducerBufferSize = 16).to(consumer)
22 |
23 | // By running/materializing the consumer we get back a Sink, and hence
24 | // now have access to feed elements into it. This Sink can be materialized
25 | // any number of times, and every element that enters the Sink will
26 | // be consumed by our consumer.
27 | val toConsumer: Sink[String, NotUsed] = runnableGraph.run()
28 |
29 | // Feeding two independent sources into the hub.
30 | Source.single("Hello!").runWith(toConsumer)
31 | Source.single("Hub!").runWith(toConsumer)
32 |
33 | StdIn.readLine()
34 | system.terminate()
35 | }
36 |
--------------------------------------------------------------------------------
/example/src/main/scala/example/akkastream/dynamichub/SimplePublishSubscribe.scala:
--------------------------------------------------------------------------------
1 | package example.akkastream.dynamichub
2 |
3 | import akka.actor.ActorSystem
4 | import akka.stream.{ ActorMaterializer, KillSwitches, UniqueKillSwitch }
5 | import akka.stream.scaladsl.{ BroadcastHub, Flow, Keep, MergeHub, Sink, Source }
6 | import com.typesafe.scalalogging.StrictLogging
7 |
8 | import scala.io.StdIn
9 | import scala.concurrent.duration._
10 |
11 | object SimplePublishSubscribe extends App with StrictLogging {
12 | implicit val system = ActorSystem()
13 | implicit val mat = ActorMaterializer()
14 | import system.dispatcher
15 |
16 | val (sink, source) =
17 | MergeHub.source[String](perProducerBufferSize = 16).toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both).run()
18 |
19 | source.runWith(Sink.ignore)
20 |
21 | val busFlow: Flow[String, String, UniqueKillSwitch] = Flow
22 | .fromSinkAndSource(sink, source)
23 | .joinMat(KillSwitches.singleBidi[String, String])(Keep.right)
24 | .backpressureTimeout(3.seconds)
25 |
26 | val switch: UniqueKillSwitch =
27 | Source.repeat("Hello world!").viaMat(busFlow)(Keep.right).to(Sink.foreach(v => logger.info(s"switch: $v"))).run()
28 |
29 | Thread.sleep(200)
30 | switch.shutdown()
31 |
32 | StdIn.readLine()
33 | system.terminate()
34 | }
35 |
--------------------------------------------------------------------------------
/example/src/main/scala/example/akkastream/graph/PartialGraph.scala:
--------------------------------------------------------------------------------
1 | package example.akkastream.graph
2 |
3 | import akka.actor.ActorSystem
4 | import akka.stream.scaladsl.{ Balance, Broadcast, Flow, GraphDSL, Keep, Merge, RunnableGraph, Sink, Source }
5 | import akka.stream.{ ActorMaterializer, FlowShape, SourceShape }
6 |
7 | import scala.concurrent.Future
8 | import scala.io.StdIn
9 |
10 | object PartialGraph extends App {
11 | implicit val system = ActorSystem()
12 | implicit val mat = ActorMaterializer()
13 | import system.dispatcher
14 |
15 | def partial =
16 | GraphDSL
17 | .create() { implicit b =>
18 | import GraphDSL.Implicits._
19 |
20 | val B = b.add(Broadcast[Int](2))
21 | val C = b.add(Merge[Int](2))
22 | val D = Flow[Int].map(_ + 1)
23 | val E = b.add(Balance[Int](2))
24 | val F = b.add(Merge[Int](2))
25 |
26 | C <~ F
27 | B ~> C ~> F
28 | B ~> D ~> E ~> F
29 |
30 | FlowShape(B.in, E.out(1))
31 | }
32 | .named("partial")
33 |
34 | // 转换partial从FlowShape到Flow,可访问流DSL(比如:.filter() 函数)
35 | val flow = Flow.fromGraph(partial)
36 |
37 | val source = Source.fromGraph(GraphDSL.create() { implicit b =>
38 | import GraphDSL.Implicits._
39 | val merge = b.add(Merge[Int](2))
40 | Source.single(0) ~> merge
41 | Source(List(2, 3, 4)) ~> merge
42 | SourceShape(merge.out)
43 | })
44 |
45 | val sink: Sink[Int, Future[Int]] = Flow[Int].map(_ * 2).drop(10).named("nestedFlow").toMat(Sink.head)(Keep.right)
46 |
47 | val closed: RunnableGraph[Future[Int]] =
48 | source.via(flow.filter(_ > 1)).toMat(sink)(Keep.right)
49 |
50 | closed.run().foreach(println)
51 |
52 | StdIn.readLine()
53 | system.terminate()
54 | }
55 |
--------------------------------------------------------------------------------
/example/src/multi-jvm/scala/sample/MultiNodeSampleSpec.scala:
--------------------------------------------------------------------------------
1 | package sample
2 |
3 | import akka.actor.{Actor, Props}
4 | import akka.remote.testconductor.RoleName
5 | import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec}
6 | import akka.testkit.ImplicitSender
7 | import mass.STMultiNodeSpec
8 |
9 | object MultiNodeSampleConfig extends MultiNodeConfig {
10 | val node1: RoleName = role("node1")
11 | val node2: RoleName = role("node2")
12 | }
13 |
14 | object MultiNodeSampleSpec {
15 |
16 | class Ponger extends Actor {
17 | def receive: Receive = {
18 | case "ping" => sender() ! "pong"
19 | }
20 | }
21 |
22 | }
23 |
24 | class MultiNodeSampleSpec extends MultiNodeSpec(MultiNodeSampleConfig) with STMultiNodeSpec with ImplicitSender {
25 |
26 | import MultiNodeSampleSpec._
27 | import MultiNodeSampleConfig._
28 |
29 | // 设置参与者数量,之后的Barrier(enterBarrier)需要满足此数量后才运行之后的代码。
30 | def initialParticipants: Int = roles.size
31 |
32 | "A MultiNodeSampleSpec" must {
33 |
34 | "wait for all nodes to enter a barrier" in {
35 | enterBarrier("startup")
36 | }
37 |
38 | "send to and receive from a remote node" in {
39 | runOn(node1) {
40 | // 进入 deployed barrier,等待另一个节点实例化 actor 完成。
41 | enterBarrier("deployed")
42 | val ponger = system.actorSelection(node(node2) / "user" / "ponger")
43 | ponger ! "ping"
44 | import scala.concurrent.duration._
45 | expectMsg(10.seconds, "pong") // 阻塞接收并assert消息,10秒超时
46 | }
47 |
48 | runOn(node2) {
49 | system.actorOf(Props[Ponger], "ponger")
50 | // 先实例化actor,再进入 deployed barrier
51 | enterBarrier("deployed")
52 | }
53 |
54 | enterBarrier("finished")
55 | }
56 | }
57 | }
58 |
59 | class MultiNodeSampleSpecMultiJvmNode1 extends MultiNodeSampleSpec
60 | class MultiNodeSampleSpecMultiJvmNode2 extends MultiNodeSampleSpec
61 |
--------------------------------------------------------------------------------
/example/src/multi-jvm/scala/sample/Sample.scala:
--------------------------------------------------------------------------------
1 | /**
2 | * multi-jvm:run sample.Sample // {TestName}MultiJvm{NodeName}
3 | */
4 | package sample
5 |
6 | object SampleMultiJvmNode1 {
7 | def main(args: Array[String]): Unit = {
8 | println("Hello from node 1")
9 | }
10 | }
11 |
12 | object SampleMultiJvmNode2 {
13 | def main(args: Array[String]): Unit = {
14 | println("Hello from node 2")
15 | }
16 | }
17 |
18 | object SampleMultiJvmNode3 {
19 | def main(args: Array[String]): Unit = {
20 | println("Hello from node 3")
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/example/src/multi-jvm/scala/sample/SampleMultiJvmNode1.opts:
--------------------------------------------------------------------------------
1 | -Dakka.remote.port=9991 -Xmx256m
2 |
--------------------------------------------------------------------------------
/example/src/multi-jvm/scala/sample/SampleMultiJvmNode2.opts:
--------------------------------------------------------------------------------
1 | -Dakka.remote.port=9992 -Xmx256m
2 |
--------------------------------------------------------------------------------
/example/src/multi-jvm/scala/sample/SampleMultiJvmNode3.opts:
--------------------------------------------------------------------------------
1 | -Dakka.remote.port=9993 -Xmx256m
2 |
--------------------------------------------------------------------------------
/example/src/multi-jvm/scala/sample/Sepc.scala:
--------------------------------------------------------------------------------
1 | package sample
2 |
3 | import org.scalatest.{MustMatchers, WordSpec}
4 |
5 | class SpecMultiJvmNode1 extends WordSpec with MustMatchers {
6 | "A node" should {
7 | "be able to say hello" in {
8 | val message = "Hello from node 1"
9 | message must be("Hello from node 1")
10 | }
11 | }
12 | }
13 |
14 | class SpecMultiJvmNode2 extends WordSpec with MustMatchers {
15 | "A node" should {
16 | "be able to say hello" in {
17 | val message = "Hello from node 2"
18 | message must be("Hello from node 2")
19 | }
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/licenses/jsch.txt:
--------------------------------------------------------------------------------
1 | JSch 0.0.* was released under the GNU LGPL license. Later, we have switched
2 | over to a BSD-style license.
3 |
4 | ------------------------------------------------------------------------------
5 | Copyright (c) 2002-2015 Atsuhiko Yamanaka, JCraft,Inc.
6 | All rights reserved.
7 |
8 | Redistribution and use in source and binary forms, with or without
9 | modification, are permitted provided that the following conditions are met:
10 |
11 | 1. Redistributions of source code must retain the above copyright notice,
12 | this list of conditions and the following disclaimer.
13 |
14 | 2. Redistributions in binary form must reproduce the above copyright
15 | notice, this list of conditions and the following disclaimer in
16 | the documentation and/or other materials provided with the distribution.
17 |
18 | 3. The names of the authors may not be used to endorse or promote products
19 | derived from this software without specific prior written permission.
20 |
21 | THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
22 | INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
23 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
24 | INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
25 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
27 | OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
28 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
29 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 | EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 |
--------------------------------------------------------------------------------
/mass-api-service/docs/api-service-design.md:
--------------------------------------------------------------------------------
1 | # API服务设计
2 |
3 | - 目录(category)。用以区分资源分类,目录可以有层级。
4 | - 资源(resource)。用以唯一确定一个资源数据,资源需要包含在某一目录层级内。
5 |
6 | API服务基于 HTTP 协议提供 Restful 形式的接口,使用JSON格式进行数据序列化。
7 |
8 | **资源获取URI:**
9 |
10 | ```
11 | GET /category/[<目录1>/<目录2>/....]/resource/<资源ID>
12 | ```
13 |
14 | **分页查询:**
15 |
16 | ```
17 | GET /category/[<目录1>/<目录2>/....]?page=&size=&pagerId=
18 | ```
19 |
20 | 或
21 |
22 | ```
23 | PUT /category/[<目录1>/<目录2>/....]/_search
24 |
25 | {
26 | "page": 1,
27 | "size": 30,
28 | "pagerId":
29 | }
30 | ```
31 |
--------------------------------------------------------------------------------
/mass-api-service/src/main/scala/mass/apiservice/boot/ApiServiceMain.scala:
--------------------------------------------------------------------------------
1 | package mass.apiservice.boot
2 |
3 | object ApiServiceMain extends App {}
4 |
--------------------------------------------------------------------------------
/mass-connector/src/main/resources/reference.conf:
--------------------------------------------------------------------------------
1 | fusion.http.default {
2 | server {
3 | host = "127.0.0.1"
4 | port = 0
5 | }
6 | }
7 |
8 | mass.connector {
9 | parsers = ["mass.connector.sql.SQLConnectorParser"]
10 | }
--------------------------------------------------------------------------------
/mass-connector/src/main/scala/mass/connector/Connector.scala:
--------------------------------------------------------------------------------
1 | package mass.connector
2 |
3 | import helloscala.common.Configuration
4 | import mass.connector.ConnectorType.ConnectorType
5 |
6 | /**
7 | * 连接类型
8 | */
9 | object ConnectorType extends Enumeration {
10 | type ConnectorType = Value
11 | val JDBC = Value(1, "jdbc")
12 | val HDFS = Value("hdfs")
13 | val HIVE = Value("hive")
14 | val HBase = Value("hbase")
15 | val CSV = Value("csv")
16 | val Xlsx = Value("xlsx")
17 | val Xls = Value("xls")
18 | val FTP = Value("ftp")
19 | val Elasticsearch = Value("elasticsearch")
20 | val MongoDB = Value("mongodb")
21 | val Cassandra = Value("cassandra")
22 | }
23 |
24 | // #Connector
25 | /**
26 | * Connector
27 | * -> SQL, CSV, Excel ……
28 | * Connector2(Source) ->
29 | * <-> Flow1, Flow2, .... <-> 算是DataElement
30 | * -> Connector2(Sink)
31 | * 数据连接
32 | */
33 | trait Connector extends AutoCloseable {
34 | /**
35 | * 连接名,由用户设置。在整个应用业务生命周期内应保持唯一。
36 | */
37 | def name: String
38 |
39 | /**
40 | * 连接类型。不同的连接类型具有不同的配置选项,数据存取方式
41 | */
42 | def `type`: ConnectorType
43 |
44 | def setting: ConnectorSetting
45 |
46 | def configuration: Configuration = setting.parameters
47 | }
48 | // #Connector
49 |
--------------------------------------------------------------------------------
/mass-connector/src/main/scala/mass/connector/ConnectorParser.scala:
--------------------------------------------------------------------------------
1 | package mass.connector
2 |
3 | trait ConnectorParser {
4 | def `type`: String
5 |
6 | def parseFromXML(node: scala.xml.Node): Connector
7 |
8 | override def toString = s"SQLConnectorParser(${`type`})"
9 | }
10 |
--------------------------------------------------------------------------------
/mass-connector/src/main/scala/mass/connector/ConnectorSetting.scala:
--------------------------------------------------------------------------------
1 | package mass.connector
2 |
3 | import helloscala.common.Configuration
4 |
5 | /**
6 | * 连接配置,包括但不限:
7 | * Source: JDBC
8 | */
9 | case class ConnectorSetting(parameters: Configuration)
10 |
--------------------------------------------------------------------------------
/mass-connector/src/main/scala/mass/connector/DataElement.scala:
--------------------------------------------------------------------------------
1 | package mass.connector
2 |
3 | /**
4 | * 数据元素:SQL(ResultSet), CQL(ResultSet), MongoDB(Document), CSV(Line), Excel(Row)
5 | */
6 | trait DataElement {}
7 |
--------------------------------------------------------------------------------
/mass-connector/src/main/scala/mass/connector/boot/ConnectorMain.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) Yangbajing 2018
3 | *
4 | * This is the custom License of Yangbajing
5 | */
6 |
7 | package mass.connector.boot
8 |
9 | import com.typesafe.config.ConfigFactory
10 | import mass.Mass
11 | import mass.connector.ConnectorSystem
12 |
13 | object ConnectorMain extends App {
14 | ConnectorSystem(Mass.fromConfig(ConfigFactory.load()).classicSystem)
15 | }
16 |
--------------------------------------------------------------------------------
/mass-connector/src/main/scala/mass/connector/sql/Databases.scala:
--------------------------------------------------------------------------------
1 | package mass.connector.sql
2 |
3 | import java.util.concurrent.atomic.AtomicInteger
4 |
5 | case class Database(id: Int, name: String)
6 |
7 | object Databases {
8 | val idSeq = new AtomicInteger()
9 | var databases = Vector.empty[Database]
10 |
11 | def register(name: String): Vector[Database] = {
12 | databases :+= Database(idSeq.getAndIncrement(), name)
13 | databases
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/mass-connector/src/main/scala/mass/connector/sql/EventDataSql.scala:
--------------------------------------------------------------------------------
1 | package mass.connector.sql
2 |
3 | import mass.core.event.{ EventData, Events }
4 |
5 | case class EventDataSql(data: JdbcResultSet) extends EventData {
6 | override def `type`: String = EventDataSql.TYPE
7 | }
8 |
9 | object EventDataSql {
10 | val TYPE = "data/sql"
11 |
12 | Events.registerType(TYPE)
13 | }
14 |
--------------------------------------------------------------------------------
/mass-connector/src/main/scala/mass/connector/sql/JdbcSink.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) Yangbajing 2018
3 | *
4 | * This is the custom License of Yangbajing
5 | */
6 |
7 | package mass.connector.sql
8 |
9 | import java.sql.PreparedStatement
10 |
11 | import akka.stream.scaladsl.Sink
12 | import fusion.jdbc.ConnectionPreparedStatementCreator
13 | import fusion.jdbc.util.JdbcUtils
14 | import javax.sql.DataSource
15 |
16 | import scala.concurrent.Future
17 |
18 | object JdbcSink {
19 | def apply(creator: ConnectionPreparedStatementCreator, args: Iterable[Any], batchSize: Int = 100)(
20 | implicit dataSource: DataSource): Sink[Iterable[Any], Future[JdbcSinkResult]] =
21 | apply(creator, (args, stmt) => JdbcUtils.setStatementParameters(stmt, args), batchSize)
22 |
23 | def apply[T](creator: ConnectionPreparedStatementCreator, action: (T, PreparedStatement) => Unit, batchSize: Int)(
24 | implicit dataSource: DataSource): Sink[T, Future[JdbcSinkResult]] =
25 | Sink.fromGraph(new JdbcSinkStage[T](dataSource, creator, action, batchSize))
26 | }
27 |
--------------------------------------------------------------------------------
/mass-connector/src/main/scala/mass/connector/sql/JdbcSinkResult.scala:
--------------------------------------------------------------------------------
1 | package mass.connector.sql
2 |
3 | import scala.collection.immutable
4 |
5 | case class JdbcSinkResult(count: Long, results: immutable.Seq[immutable.Seq[Int]])
6 |
--------------------------------------------------------------------------------
/mass-connector/src/main/scala/mass/connector/sql/JdbcSource.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) Yangbajing 2018
3 | *
4 | * This is the custom License of Yangbajing
5 | */
6 |
7 | package mass.connector.sql
8 |
9 | import java.sql.ResultSet
10 |
11 | import akka.NotUsed
12 | import akka.stream.scaladsl.Source
13 | import fusion.jdbc.ConnectionPreparedStatementCreator
14 | import fusion.jdbc.util.JdbcUtils
15 | import javax.sql.DataSource
16 |
17 | object JdbcSource {
18 | def apply(sql: String, args: Iterable[Any], fetchRowSize: Int)(
19 | implicit dataSource: DataSource): Source[ResultSet, NotUsed] =
20 | Source.fromGraph(new JdbcSourceStage(dataSource, conn => {
21 | val stmt = conn.prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)
22 | JdbcUtils.setStatementParameters(stmt, args)
23 | }, fetchRowSize))
24 |
25 | def apply(creator: ConnectionPreparedStatementCreator, fetchRowSize: Int)(
26 | implicit dataSource: DataSource): Source[ResultSet, NotUsed] =
27 | Source.fromGraph(new JdbcSourceStage(dataSource, creator, fetchRowSize))
28 | }
29 |
--------------------------------------------------------------------------------
/mass-connector/src/main/scala/mass/connector/sql/SQLConnector.scala:
--------------------------------------------------------------------------------
1 | package mass.connector.sql
2 |
3 | import com.zaxxer.hikari.HikariDataSource
4 | import fusion.jdbc.JdbcTemplate
5 | import fusion.jdbc.util.JdbcUtils
6 | import mass.connector.ConnectorType.ConnectorType
7 | import mass.connector.{ Connector, ConnectorSetting, ConnectorType }
8 |
9 | /**
10 | *
11 | */
12 | final case class SQLConnector(name: String, setting: ConnectorSetting) extends Connector {
13 | override def `type`: ConnectorType = ConnectorType.JDBC
14 |
15 | lazy val dataSource: HikariDataSource = JdbcUtils.createHikariDataSource(configuration)
16 | lazy val jdbcTemplate = JdbcTemplate(
17 | dataSource,
18 | configuration.getOrElse("use-transaction", true),
19 | configuration.getOrElse("ignore-warnings", true),
20 | configuration.getOrElse("allow-print-log", false))
21 |
22 | override def close(): Unit = dataSource.close()
23 | }
24 |
--------------------------------------------------------------------------------
/mass-connector/src/main/scala/mass/connector/sql/SQLConnectorParser.scala:
--------------------------------------------------------------------------------
1 | package mass.connector.sql
2 |
3 | import java.util.Properties
4 |
5 | import helloscala.common.Configuration
6 | import mass.connector.{ ConnectorParser, ConnectorSetting }
7 | import mass.core.XmlUtils
8 |
9 | import scala.xml.Node
10 |
11 | class SQLConnectorParser extends ConnectorParser {
12 | import mass.core.XmlUtils.XmlRich
13 |
14 | override val `type` = "jdbc"
15 |
16 | def parseSettingFromXML(node: Node): ConnectorSetting = {
17 | val props = new Properties()
18 |
19 | val id = node.attr("name")
20 | props.put("poolName", id)
21 | (node \\ "props" \\ "prop").foreach { prop =>
22 | val key = (prop \\ "@key").text
23 | val value = getText(prop)
24 | props.put(key, value)
25 | }
26 | ConnectorSetting(Configuration.load(props))
27 | }
28 |
29 | override def parseFromXML(node: Node): SQLConnector = {
30 | val setting = parseSettingFromXML(node)
31 | SQLConnector(node.attr("name"), setting)
32 | }
33 |
34 | @inline private def getText(prop: Node): String =
35 | prop.getAttr("value").getOrElse(XmlUtils.text(prop \ "value"))
36 | }
37 |
--------------------------------------------------------------------------------
/mass-connector/src/main/scala/mass/connector/sql/package.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) Yangbajing 2018
3 | *
4 | * This is the custom License of Yangbajing
5 | */
6 |
7 | package mass.connector
8 |
9 | package object sql {
10 | // type JdbcSinkResult = immutable.Seq[immutable.Seq[Int]]
11 | }
12 |
--------------------------------------------------------------------------------
/mass-connector/src/main/scala/mass/connector/sql/schema/Schemas.scala:
--------------------------------------------------------------------------------
1 | package mass.connector.sql.schema
2 |
3 | object SqlSchemaType extends Enumeration {
4 | type SqlSchemaType = Value
5 | val Postgres = Value(1)
6 | val MySQL = Value
7 | val Oracle = Value
8 | val DM = Value
9 | }
10 |
11 | object Schemas {}
12 |
--------------------------------------------------------------------------------
/mass-connector/src/test/resources/application.conf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-connector/src/test/resources/application.conf
--------------------------------------------------------------------------------
/mass-connector/src/test/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 | ./logs/application_%d{yyyy-MM-dd}.log
13 |
14 |
15 |
16 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n
17 |
18 |
19 |
20 |
21 |
22 | %date{HH:mm:ss.SSS} %coloredLevel %logger{50} - %message%n%xException{10}
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/mass-connector/src/test/scala/kafkasample/demo/Consumer.scala:
--------------------------------------------------------------------------------
1 | package kafkasample.demo
2 |
3 | import java.util.{ Collections, Properties }
4 | import java.util.concurrent.TimeUnit
5 |
6 | import org.apache.kafka.clients.consumer.KafkaConsumer
7 |
8 | object Consumer {
9 | @volatile private var isStop = false
10 |
11 | def main(args: Array[String]): Unit = {
12 | val props = new Properties()
13 | props.put("bootstrap.servers", "localhost:9092")
14 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
15 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
16 | props.put("group.id", "CountryCounter")
17 | val consumer = new KafkaConsumer[String, String](props)
18 | val thread = new Thread() {
19 | override def run(): Unit = Consumer.run(consumer)
20 | }
21 | try {
22 | thread.start()
23 | } finally {
24 | TimeUnit.SECONDS.sleep(50)
25 | isStop = true
26 | thread.join()
27 | consumer.close()
28 | }
29 | }
30 |
31 | private def run(consumer: KafkaConsumer[String, String]): Unit = {
32 | consumer.subscribe(Collections.singleton("customerCountries"))
33 | while (!isStop && !Thread.currentThread().isInterrupted) {
34 | val records = consumer.poll(java.time.Duration.ofMillis(100))
35 | records.forEach { record =>
36 | println(s"topic = ${record.topic()}, partition = ${record.partition()}, offset = ${record
37 | .offset()}, key: ${record.key()}, value = ${record.value()}")
38 | }
39 | consumer.commitAsync()
40 | }
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/mass-connector/src/test/scala/kafkasample/demo/Customer.scala:
--------------------------------------------------------------------------------
1 | package kafkasample.demo
2 |
3 | import java.nio.ByteBuffer
4 | import java.nio.charset.StandardCharsets
5 | import java.util
6 |
7 | import helloscala.common.util.StringUtils
8 | import org.apache.kafka.common.serialization.Serializer
9 |
10 | case class Customer(customerId: Int, customerName: String) {}
11 |
12 | class CustomerSerializer extends Serializer[Customer] {
13 | private val EMPTY_NAME = Array[Byte]()
14 |
15 | override def configure(configs: util.Map[String, _], isKey: Boolean): Unit = {}
16 |
17 | override def serialize(topic: String, data: Customer): Array[Byte] =
18 | if (data eq null) {
19 | null
20 | } else {
21 | var nameLen = 0
22 | var nameBytes = EMPTY_NAME
23 | if (StringUtils.isNoneBlank(data.customerName)) {
24 | nameLen = data.customerName.length
25 | nameBytes = data.customerName.getBytes(StandardCharsets.UTF_8)
26 | }
27 |
28 | val buf = ByteBuffer.allocate(4 + 4 + nameLen)
29 | buf.putInt(data.customerId)
30 | buf.putInt(nameLen)
31 | buf.put(nameBytes)
32 |
33 | buf.array()
34 | }
35 |
36 | override def close(): Unit = ???
37 | }
38 |
--------------------------------------------------------------------------------
/mass-connector/src/test/scala/kafkasample/demo/Producer.scala:
--------------------------------------------------------------------------------
1 | package kafkasample.demo
2 |
3 | import java.util.Properties
4 | import java.util.concurrent.TimeUnit
5 |
6 | import org.apache.kafka.clients.producer.{ KafkaProducer, ProducerRecord, RecordMetadata }
7 |
8 | object Producer {
9 | def main(args: Array[String]): Unit = {
10 | val props = new Properties()
11 | props.put("bootstrap.servers", "localhost:9092")
12 | props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
13 | props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
14 |
15 | val producer = new KafkaProducer[String, String](props)
16 | try {
17 | run(producer)
18 | } finally {
19 | TimeUnit.SECONDS.sleep(5)
20 | producer.close()
21 | }
22 | }
23 |
24 | private def run[K, V](producer: KafkaProducer[String, String]) {
25 | val record =
26 | new ProducerRecord[String, String]("customerCountries", "羊八井222")
27 | producer.send(record, (metadata: RecordMetadata, e: Exception) => {
28 | if (e ne null) {
29 | e.printStackTrace()
30 | }
31 | println(s"metadata: $metadata")
32 | })
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/mass-connector/src/test/scala/mass/connector/sql/TestSchema.scala:
--------------------------------------------------------------------------------
1 | package mass.connector.sql
2 |
3 | import com.zaxxer.hikari.HikariDataSource
4 | import fusion.jdbc.util.JdbcUtils
5 |
6 | object TestSchema {
7 | lazy val postgres: HikariDataSource = JdbcUtils.createHikariDataSource(
8 | "poolName" -> "postgres",
9 | "maximumPoolSize" -> "2",
10 | "dataSourceClassName" -> "org.postgresql.ds.PGSimpleDataSource",
11 | "dataSource.serverName" -> "localhost",
12 | "dataSource.portNumber" -> "5432",
13 | "dataSource.databaseName" -> "massdata",
14 | "dataSource.user" -> "massdata",
15 | "dataSource.password" -> "massdata")
16 |
17 | lazy val mysql: HikariDataSource = JdbcUtils.createHikariDataSource(
18 | "poolName" -> "mysql",
19 | "maximumPoolSize" -> "2",
20 | "jdbcUrl" -> "jdbc:mysql://localhost:3306/massdata?useSSL=false&characterEncoding=utf8",
21 | "username" -> "massdata",
22 | "password" -> "Massdata.2018",
23 | "dataSource.cachePrepStmts" -> "true",
24 | "dataSource.prepStmtCacheSize" -> "250",
25 | "dataSource.prepStmtCacheSqlLimit" -> "2048")
26 | }
27 |
--------------------------------------------------------------------------------
/mass-connector/src/test/scala/mass/connector/sql/schema/SQLSchemaTest.scala:
--------------------------------------------------------------------------------
1 | package mass.connector.sql.schema
2 |
3 | import fusion.testkit.FusionWordSpecLike
4 | import mass.connector.sql.TestSchema
5 | import org.scalatest.BeforeAndAfterAll
6 |
7 | import scala.collection.immutable
8 |
9 | class SQLSchemaTest extends FusionWordSpecLike with BeforeAndAfterAll {
10 | override protected def afterAll(): Unit = {
11 | TestSchema.postgres.close()
12 | super.afterAll()
13 | }
14 |
15 | "schema-postgres" should {
16 | val schema = PostgresSchema(TestSchema.postgres)
17 | var tables = immutable.Seq.empty[TableInfo]
18 |
19 | "listTable" in {
20 | tables = schema.listTable("public")
21 | tables should not be empty
22 | val table = tables.head
23 | table.schemaName shouldBe "public"
24 | tables.foreach(println)
25 | }
26 |
27 | "listColumn" in {
28 | val columns =
29 | schema.listColumn(tables.head.tableName, tables.head.schemaName)
30 | columns should not be empty
31 | columns.foreach(println)
32 | }
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/mass-console/src/main/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 | ./logs/application_%d{yyyy-MM-dd}.log
13 |
14 |
15 |
16 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n
17 |
18 |
19 |
20 |
21 |
22 | %date{HH:mm:ss.SSS} %coloredLevel %logger{50} - %message%n%xException{10}
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/mass-console/src/main/resources/reference.conf:
--------------------------------------------------------------------------------
1 |
2 | akka {
3 | remote {
4 | artery {
5 | enabled = on
6 | canonical.hostname = "127.0.0.1"
7 | canonical.port = 30001
8 | }
9 | }
10 |
11 | cluster {
12 | roles = ["console"]
13 | }
14 | }
15 |
16 | fusion.http.default.server {
17 | host = "127.0.0.1"
18 | port = 30000
19 | }
20 |
21 | mass {
22 | cluster {
23 | seeds = ["127.0.0.1:30011"]
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/mass-console/src/main/scala/mass/console/ConsoleMain.scala:
--------------------------------------------------------------------------------
1 | package mass.console
2 |
3 | object ConsoleMain {
4 | def main(args: Array[String]): Unit = {}
5 | }
6 |
--------------------------------------------------------------------------------
/mass-console/src/main/scala/mass/console/web/api/Routes.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) Yangbajing 2018
3 | *
4 | * This is the custom License of Yangbajing
5 | */
6 |
7 | package mass.console.web.api
8 |
9 | class Routes {}
10 |
--------------------------------------------------------------------------------
/mass-console/src/test/resources/application.conf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-console/src/test/resources/application.conf
--------------------------------------------------------------------------------
/mass-console/src/test/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 | ./logs/application_%d{yyyy-MM-dd}.log
13 |
14 |
15 |
16 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n
17 |
18 |
19 |
20 |
21 |
22 | %date{HH:mm:ss.SSS} %coloredLevel %logger{50} - %message%n%xException{10}
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/mass-console/src/test/scala/mass/console/AkkaHttpTest.scala:
--------------------------------------------------------------------------------
1 | package mass.console
2 |
3 | import akka.actor.ActorSystem
4 | import akka.http.scaladsl.Http
5 | import akka.http.scaladsl.model._
6 | import akka.http.scaladsl.unmarshalling.Unmarshal
7 | import akka.stream.ActorMaterializer
8 |
9 | import scala.concurrent.Future
10 |
11 | case class SpidFacebookData(
12 | AppId: String,
13 | Type: String,
14 | Application: String,
15 | ExpiresAt: Long,
16 | IsValid: Boolean,
17 | IssuedAt: Long,
18 | Scopes: Array[String],
19 | UserId: String)
20 |
21 | class AkkaHttpTest {
22 | implicit val system = ActorSystem()
23 | implicit val mat = ActorMaterializer()
24 |
25 | import system.dispatcher
26 |
27 | val resultFuture: Future[HttpResponse] = Http().singleRequest(
28 | HttpRequest(
29 | HttpMethods.POST,
30 | "/api/signin",
31 | headers = List(headers.`Content-Type`(ContentTypes.`application/json`)),
32 | entity = """{"account":"yangbajing@gmail.com", "password": "yangbajing"}"""))
33 |
34 | resultFuture.flatMap(httpResponse => Unmarshal(httpResponse.entity).to[String]).foreach { str =>
35 | println(str)
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/mass-console/src/universal/conf/application.ini:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-console/src/universal/conf/application.ini
--------------------------------------------------------------------------------
/mass-console/src/universal/conf/dev.conf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-console/src/universal/conf/dev.conf
--------------------------------------------------------------------------------
/mass-console/src/universal/conf/jvmopts:
--------------------------------------------------------------------------------
1 | -Xms512M
2 | -Xmx2G
3 |
--------------------------------------------------------------------------------
/mass-console/src/universal/conf/logback.xml:
--------------------------------------------------------------------------------
1 |
4 |
5 |
6 |
7 |
8 | ${application.home:-.}/logs/application.log
9 |
10 |
11 | ${application.home:-.}/logs/application_%d{yyyy-MM-dd}.gz
12 |
13 | 30
14 |
15 |
16 | %date{yyyy-MM-dd HH:mm:ss.SSS} [%level] from %logger in %thread - %message%n%xException
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/mass-console/src/universal/conf/prod.conf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-console/src/universal/conf/prod.conf
--------------------------------------------------------------------------------
/mass-console/src/universal/conf/test.conf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-console/src/universal/conf/test.conf
--------------------------------------------------------------------------------
/mass-core-ext/src/main/resources/reference.conf:
--------------------------------------------------------------------------------
1 | akka {
2 | actor {
3 | provider = cluster
4 | }
5 |
6 | remote {
7 | artery {
8 | enabled = on
9 | //canonical.hostname = "127.0.0.1"
10 | //canonical.port = 30011
11 | }
12 | }
13 |
14 | //extensions += "akka.cluster.metrics.ClusterMetricsExtension"
15 | coordinated-shutdown.terminate-actor-system = on
16 |
17 | cluster {
18 | //use-dispatcher = cluster-dispatcher
19 | shutdown-after-unsuccessful-join-seed-nodes = 20s
20 | //roles = ["broker"]
21 | //seeds = ["127.0.0.1:30011"]
22 | //use-dispatcher = cluster-dispatcher
23 | //metrics.native-library-extract-folder = ${user.dir}/target/native
24 | }
25 | }
26 |
27 | cluster-dispatcher {
28 | type = "Dispatcher"
29 | executor = "fork-join-executor"
30 | fork-join-executor {
31 | parallelism-min = 2
32 | parallelism-max = 4
33 | }
34 | }
35 |
36 | fusion.jdbc.default {
37 | poolName = "pg-primary"
38 | jdbcUrl = "jdbc:postgresql://127.0.0.1:5432/massdata?reWriteBatchedInserts=true&targetServerType=master"
39 | username = "massdata"
40 | password = "Massdata.2018"
41 | connectionTestQuery = "select 1;"
42 | maximumPoolSize = 2
43 | allowPrintLog = true
44 | # 以下是 slick 配置参数
45 | numThreads = 2
46 | }
47 | fusion.inject {
48 | modules.enabled += mass.core.module.MassCoreModule
49 | }
50 |
51 | mass {
52 | core.persistence {
53 | }
54 |
55 | job {
56 | job-saved-dir = "/tmp/mass/job/job-saved"
57 | }
58 |
59 | cluster {
60 | protocol = "akka"
61 | seeds = []
62 | }
63 |
64 | }
65 |
--------------------------------------------------------------------------------
/mass-core-ext/src/main/scala/mass/core/ext/MassApplication.scala:
--------------------------------------------------------------------------------
1 | package mass.core.ext
2 |
3 | import akka.actor.ActorSystem
4 | import com.typesafe.config.Config
5 | import fusion.common.config.FusionConfigFactory
6 | import fusion.common.constant.FusionConstants
7 | import mass.Mass
8 |
9 | class MassApplication(val classicSystem: ActorSystem) {
10 | def this(config: Config) =
11 | this(MassApplication.createActorSystem(FusionConfigFactory.arrangeConfig(config, FusionConstants.FUSION)))
12 |
13 | val mass: Mass = Mass.fromActorSystem(classicSystem)
14 | }
15 |
16 | object MassApplication {
17 | def createActorSystem(config: Config): ActorSystem = {
18 | val name = config.getString("fusion.akka-name")
19 | ActorSystem(name, config)
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/mass-core-ext/src/main/scala/mass/core/module/MassCoreModule.scala:
--------------------------------------------------------------------------------
1 | package mass.core.module
2 |
3 | import akka.actor.ActorSystem
4 | import com.google.inject.AbstractModule
5 | import javax.inject.{ Inject, Provider, Singleton }
6 | import mass.db.slick.{ PgProfile, PgProfileExtension }
7 |
8 | @Singleton
9 | class PgProfileProvider @Inject() (system: ActorSystem) extends Provider[PgProfile] {
10 | override def get(): PgProfile = PgProfileExtension(system)
11 | }
12 |
13 | class MassCoreModule extends AbstractModule {
14 | override def configure(): Unit = {
15 | bind(classOf[PgProfile]).toProvider(classOf[PgProfileProvider])
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/mass-core-ext/src/main/scala/mass/db/slick/AggAggregateLibrary.scala:
--------------------------------------------------------------------------------
1 | package mass.db.slick
2 |
3 | import slick.ast.Library.SqlAggregateFunction
4 |
5 | object AggAggregateLibrary {
6 | val ArrayAgg = new SqlAggregateFunction("array_agg")
7 | }
8 |
--------------------------------------------------------------------------------
/mass-core-ext/src/main/scala/mass/db/slick/SqlComponent.scala:
--------------------------------------------------------------------------------
1 | package mass.db.slick
2 |
3 | import akka.Done
4 | import akka.actor.ActorSystem
5 | import com.typesafe.scalalogging.StrictLogging
6 | import com.zaxxer.hikari.HikariDataSource
7 | import fusion.core.extension.FusionCore
8 | import fusion.jdbc.{ FusionJdbc, JdbcTemplate }
9 | import javax.inject.{ Inject, Singleton }
10 | import slick.basic.DatabasePublisher
11 |
12 | import scala.concurrent.Future
13 | import scala.util.Failure
14 |
15 | /**
16 | * Mass系统SQL数据访问管理器
17 | */
18 | @Singleton
19 | class SqlComponent @Inject() (val profile: PgProfile, classicSystem: ActorSystem) extends StrictLogging {
20 | import profile.api._
21 | val dataSource: HikariDataSource = FusionJdbc(classicSystem).component
22 | val db = databaseForDataSource(dataSource)
23 | val jdbcTemplate: JdbcTemplate = JdbcTemplate(dataSource)
24 | FusionCore(classicSystem).shutdowns.beforeActorSystemTerminate("StopSqlManager") { () =>
25 | Future {
26 | db.close()
27 | Done
28 | }(classicSystem.dispatcher)
29 | }
30 |
31 | def runTransaction[R, E <: Effect.Write](a: DBIOAction[R, NoStream, E]): Future[R] =
32 | wrapperLogging(db.run(a.transactionally))
33 |
34 | def run[R](a: DBIOAction[R, NoStream, Nothing]): Future[R] = wrapperLogging(db.run(a))
35 |
36 | def stream[T](a: DBIOAction[_, Streaming[T], Nothing]): DatabasePublisher[T] = db.stream(a)
37 |
38 | def streamTransaction[T, E <: Effect.Write](a: DBIOAction[_, Streaming[T], E]): DatabasePublisher[T] =
39 | db.stream(a.transactionally)
40 |
41 | @inline private def wrapperLogging[T](f: Future[T]): Future[T] =
42 | f.andThen { case Failure(e) => logger.warn(s"Slick run error [${e.toString}].") }(db.ioExecutionContext)
43 |
44 | override def toString = s"SqlSystem($dataSource, $jdbcTemplate, $db)"
45 | }
46 |
--------------------------------------------------------------------------------
/mass-core-ext/src/main/scala/mass/job/JobClassJob.scala:
--------------------------------------------------------------------------------
1 | package mass.job
2 |
3 | import com.typesafe.scalalogging.StrictLogging
4 | import fusion.core.FusionApplication
5 | import fusion.inject.guice.GuiceApplication
6 | import helloscala.common.util.StringUtils
7 | import mass.core.job.JobConstants
8 | import org.quartz.{ Job, JobExecutionContext }
9 |
10 | import scala.jdk.CollectionConverters._
11 |
12 | private[job] class JobClassJob extends Job with StrictLogging {
13 | override def execute(context: JobExecutionContext): Unit =
14 | try {
15 | val jobClass = context.getJobDetail.getJobDataMap.getString(JobConstants.JOB_CLASS)
16 | require(StringUtils.isNoneBlank(jobClass), s"Key: ${JobConstants.JOB_CLASS} 不能为空。")
17 | val data =
18 | context.getJobDetail.getJobDataMap.asScala.filterNot(_._1 == JobConstants.JOB_CLASS).mapValues(_.toString).toMap
19 | val application = FusionApplication.application.asInstanceOf[GuiceApplication]
20 | val jobSystem = application.instance[JobScheduler]
21 | val jobRunner = application.instance[JobRunner]
22 | jobRunner.execute(jobSystem, context.getJobDetail.getKey.getName, data, jobClass)
23 | } catch {
24 | case e: Throwable =>
25 | logger.error(s"作业执行失败。${e.getMessage}", e)
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/mass-core-ext/src/main/scala/mass/job/JobSettings.scala:
--------------------------------------------------------------------------------
1 | package mass.job
2 |
3 | import java.nio.file.{ Files, Path, Paths }
4 |
5 | import mass.MassSettings
6 | import mass.core.job.JobConstants
7 | import mass.core.{ Constants, MassUtils }
8 |
9 | final class JobSettings private (val settings: MassSettings) {
10 | private val configuration = settings.configuration
11 |
12 | private val conf = configuration.getConfiguration(s"${Constants.BASE_CONF}.job")
13 |
14 | val jobSavedDir: Path =
15 | conf.get[Option[Path]]("job-saved-dir").getOrElse(Paths.get(MassUtils.userDir, "share", "job-saved"))
16 |
17 | val jobRunDir: Path = Paths.get(MassUtils.userDir, "run", "job-run")
18 |
19 | if (!Files.isDirectory(jobSavedDir)) {
20 | Files.createDirectories(jobSavedDir)
21 | }
22 | if (!Files.isDirectory(jobRunDir)) {
23 | Files.createDirectories(jobRunDir)
24 | }
25 |
26 | def getJobRunDist(jobKey: String): Path = jobRunDir.resolve(jobKey).resolve(JobConstants.DIST)
27 |
28 | def schedulerRunJar: String = ""
29 | }
30 |
31 | object JobSettings {
32 | def apply(massSettings: MassSettings): JobSettings = new JobSettings(massSettings)
33 | }
34 |
--------------------------------------------------------------------------------
/mass-core-ext/src/main/scala/mass/job/db/model/QrtzModels.scala:
--------------------------------------------------------------------------------
1 | package mass.job.db.model
2 |
3 | import javax.inject.{ Inject, Singleton }
4 | import mass.db.slick.PgProfile
5 |
6 | @Singleton
7 | class QrtzModels @Inject() (val profile: PgProfile)
8 | extends QrtzSchedulerStateModelTable
9 | with QrtzSimpleTriggersModelTable
10 | with QrtzTriggerLogModelTable
11 | with QrtzLocksModelTable
12 | with QrtzFiredTriggersModelTable
13 | with QrtzCronTriggersModelTable
14 | with QrtzCalendarsModelTable
15 | with QrtzPausedTriggerGrpsModelTable
16 | with QrtzBlobTriggersModelTable
17 | with QrtzJobDetailsModelTable
18 | with QrtzSimpropTriggersModelTable
19 | with QrtzTriggersModelTable {
20 | import profile.api._
21 |
22 | /** DDL for all tables. Call .create to execute. */
23 | lazy val schema: profile.SchemaDescription = Array(
24 | QrtzBlobTriggersModel.schema,
25 | QrtzCalendarsModel.schema,
26 | QrtzCronTriggersModel.schema,
27 | QrtzFiredTriggersModel.schema,
28 | QrtzJobDetailsModel.schema,
29 | QrtzLocksModel.schema,
30 | QrtzPausedTriggerGrpsModel.schema,
31 | QrtzSchedulerStateModel.schema,
32 | QrtzSimpleTriggersModel.schema,
33 | QrtzSimpropTriggersModel.schema,
34 | QrtzTriggerLogModel.schema,
35 | QrtzTriggersModel.schema).reduceLeft(_ ++ _)
36 | }
37 |
--------------------------------------------------------------------------------
/mass-core-ext/src/main/scala/mass/workflow/model/WfDetail.scala:
--------------------------------------------------------------------------------
1 | package mass.workflow.model
2 |
3 | import java.time.OffsetDateTime
4 |
5 | case class WfDetail(name: String, content: String, createdAt: OffsetDateTime)
6 |
--------------------------------------------------------------------------------
/mass-core-ext/src/test/resources/application-test.conf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-core-ext/src/test/resources/application-test.conf
--------------------------------------------------------------------------------
/mass-core-ext/src/test/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | %date{HH:mm:ss.SSS} %coloredLevel %logger{50} - %message%n%xException{10}
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/mass-core-ext/src/test/scala/mass/STMultiNodeSpec.scala:
--------------------------------------------------------------------------------
1 | package mass
2 |
3 | import akka.remote.testkit.{ MultiNodeSpec, MultiNodeSpecCallbacks }
4 | import org.scalatest.BeforeAndAfterAll
5 | import org.scalatest.matchers.should.Matchers
6 | import org.scalatest.wordspec.AnyWordSpecLike
7 |
8 | import scala.language.implicitConversions
9 |
10 | /**
11 | * Hooks up MultiNodeSpec with ScalaTest
12 | */
13 | trait STMultiNodeSpec extends MultiNodeSpecCallbacks with AnyWordSpecLike with Matchers with BeforeAndAfterAll {
14 | self: MultiNodeSpec =>
15 |
16 | override def beforeAll(): Unit = multiNodeSpecBeforeAll()
17 |
18 | override def afterAll(): Unit = multiNodeSpecAfterAll()
19 |
20 | // Might not be needed anymore if we find a nice way to tag all logging from a node
21 | implicit override def convertToWordSpecStringWrapper(s: String): WordSpecStringWrapper =
22 | new WordSpecStringWrapper(s"$s (on node '${self.myself.name}', $getClass)")
23 | }
24 |
--------------------------------------------------------------------------------
/mass-core-ext/src/test/scala/mass/db/slick/PgProfileTest.scala:
--------------------------------------------------------------------------------
1 | package mass.db.slick
2 |
3 | import fusion.inject.guice.testkit.GuiceApplicationTestkit
4 | import fusion.jdbc.FusionJdbc
5 | import org.scalatest.wordspec.AnyWordSpecLike
6 |
7 | class PgProfileTest extends GuiceApplicationTestkit with AnyWordSpecLike {
8 | private val profile = injectInstance[PgProfile]
9 | import profile.api._
10 | private val db = databaseForDataSource(FusionJdbc(classicSystem).component)
11 |
12 | "test" in {
13 | val q = sql"select key from job_schedule".as[String]
14 | println("q.head: " + q.head)
15 | val result = db.run(q).futureValue
16 | println(result)
17 | }
18 |
19 | protected override def afterAll(): Unit = {
20 | db.close()
21 | super.afterAll()
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/mass-core-ext/src/test/scala/mass/job/JobSchedulerTest.scala:
--------------------------------------------------------------------------------
1 | package mass.job
2 |
3 | import fusion.inject.guice.testkit.GuiceApplicationTestkit
4 | import org.scalatest.wordspec.AnyWordSpecLike
5 |
6 | class JobSchedulerTest extends GuiceApplicationTestkit with AnyWordSpecLike {
7 | private val jobScheduler = injectInstance[JobScheduler]
8 |
9 | "JobSystem" should {
10 | "toString" in {
11 | println(jobScheduler)
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/mass-core-ext/src/test/scala/mass/server/repository/JobRepoTest.scala:
--------------------------------------------------------------------------------
1 | package mass.server.repository
2 |
3 | import fusion.inject.guice.testkit.GuiceApplicationTestkit
4 | import fusion.json.jackson.ScalaObjectMapper
5 | import mass.db.slick.{ PgProfile, SqlComponent }
6 | import mass.job.db.model.QrtzModels
7 | import mass.model.job.JobTrigger
8 | import org.scalatest.wordspec.AnyWordSpecLike
9 |
10 | class JobRepoTest extends GuiceApplicationTestkit with AnyWordSpecLike {
11 | private val qrtzModels = injectInstance[QrtzModels]
12 | private val objectMapper = injectInstance[ScalaObjectMapper]
13 | private val sqlSystem = injectInstance[SqlComponent]
14 | private val profile = injectInstance[PgProfile]
15 | import profile.api._
16 |
17 | "JobRepoTest" should {
18 | val db = sqlSystem.db
19 |
20 | "filterWhere" in {
21 | val action = qrtzModels.QrtzJobDetailsModel.sortBy(_.createdAt.desc).take(30).result
22 | action.statements.foreach(println)
23 |
24 | val result = db.run(action).futureValue
25 | println(result)
26 | }
27 |
28 | "log" in {
29 | qrtzModels.QrtzTriggerLogModel.schema.createStatements.foreach(println)
30 | }
31 | }
32 |
33 | "JSON" should {
34 | "trigger" in {
35 | val jstr = """{"key":"kettle","triggerType":"CRON","startTime":"2018-09-12T13:00:11+08","endTime":null}"""
36 | val trigger = objectMapper.readValue[JobTrigger](jstr)
37 | println(trigger)
38 | }
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/mass-core/src/main/resources/META-INF/services/com.fasterxml.jackson.databind.Module:
--------------------------------------------------------------------------------
1 | # Let's hope this works as expected
2 |
3 | #mass.core.json.MassCoreJacksonModule
4 |
--------------------------------------------------------------------------------
/mass-core/src/main/resources/reference.conf:
--------------------------------------------------------------------------------
1 | akka {
2 | loglevel = INFO
3 | stdout-loglevel = INFO
4 | loggers = ["akka.event.slf4j.Slf4jLogger"]
5 | logging-filter = "akka.event.slf4j.Slf4jLoggingFilter"
6 |
7 | typed.library-extensions += "mass.extension.MassCore"
8 |
9 | actor {
10 | # Set this to on to enable serialization-bindings defined in
11 | # additional-serialization-bindings. Those are by default not included
12 | # for backwards compatibility reasons. They are enabled by default if
13 | # akka.remote.artery.enabled=on.
14 | //enable-additional-serialization-bindings = on
15 |
16 | //allow-java-serialization = off
17 | }
18 | }
19 |
20 | mass {
21 | name = "mass"
22 | core {
23 | run-mode = "dev"
24 | temp-dir = "mass-temp"
25 | compiles {
26 | scala213 = ${?user.home}"/.sdkman/candidates/scala/2.13.1"
27 | scala212 = ${?user.home}"/.sdkman/candidates/scala/2.12.10"
28 | scala211 = ${?user.home}"/.sdkman/candidates/scala/2.11.12"
29 | }
30 | }
31 | }
--------------------------------------------------------------------------------
/mass-core/src/main/scala/akka/mass/AkkaUtils.scala:
--------------------------------------------------------------------------------
1 | package akka.mass
2 |
3 | import java.util.concurrent.TimeoutException
4 |
5 | import akka.actor.{ ActorSystem, ActorSystemImpl }
6 |
7 | import scala.concurrent.Await
8 | import scala.concurrent.duration._
9 |
10 | object AkkaUtils {
11 | /**
12 | * Shut down an actor system and wait for termination.
13 | * On failure debug output will be logged about the remaining actors in the system.
14 | *
15 | * If verifySystemShutdown is true, then an exception will be thrown on failure.
16 | */
17 | def shutdownActorSystem(
18 | actorSystem: ActorSystem,
19 | duration: Duration = 10.seconds,
20 | verifySystemShutdown: Boolean = false): Unit = {
21 | actorSystem.terminate()
22 | try Await.ready(actorSystem.whenTerminated, duration)
23 | catch {
24 | case _: TimeoutException =>
25 | val msg = "Failed to stop [%s] within [%s] \n%s".format(
26 | actorSystem.name,
27 | duration,
28 | actorSystem.asInstanceOf[ActorSystemImpl].printTree)
29 | if (verifySystemShutdown) throw new RuntimeException(msg)
30 | else println(msg)
31 | }
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/Mass.scala:
--------------------------------------------------------------------------------
1 | package mass
2 |
3 | import akka.actor.typed.scaladsl.adapter._
4 | import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, Props }
5 | import akka.{ actor => classic }
6 | import com.typesafe.config.Config
7 | import fusion.common.config.FusionConfigFactory
8 | import fusion.common.{ ReceptionistFactory, SpawnFactory }
9 | import fusion.core.extension.FusionCore
10 | import helloscala.common.Configuration
11 | import mass.core.Constants
12 |
13 | import scala.concurrent.ExecutionContext
14 |
15 | final class Mass private (val classicSystem: classic.ActorSystem) extends SpawnFactory with ReceptionistFactory {
16 | implicit def executionContext: ExecutionContext = classicSystem.dispatcher
17 |
18 | val configuration: Configuration = FusionCore(classicSystem).configuration
19 |
20 | override def typedSystem: ActorSystem[_] = classicSystem.toTyped
21 |
22 | override def spawn[T](behavior: Behavior[T], props: Props): ActorRef[T] =
23 | classicSystem.spawnAnonymous(behavior, props)
24 |
25 | override def spawn[T](behavior: Behavior[T], name: String, props: Props): ActorRef[T] =
26 | classicSystem.spawn(behavior, name, props)
27 | }
28 |
29 | object Mass {
30 | def fromMergedConfig(config: Config): Mass =
31 | fromActorSystem(classic.ActorSystem(Constants.MASS, config))
32 |
33 | private[mass] def fromActorSystem(system: classic.ActorSystem): Mass = new Mass(system)
34 |
35 | def fromConfig(originalConfig: Config): Mass = {
36 | val config = FusionConfigFactory.arrangeConfig(originalConfig, Constants.MASS, Seq("akka"))
37 | fromMergedConfig(config)
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/MassSettings.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) Yangbajing 2018
3 | *
4 | * This is the custom License of Yangbajing
5 | */
6 |
7 | package mass
8 |
9 | import akka.actor.Address
10 | import akka.actor.typed.ActorSystem
11 | import com.typesafe.config.Config
12 | import com.typesafe.scalalogging.StrictLogging
13 | import helloscala.common.Configuration
14 | import mass.core.Constants._
15 |
16 | final class Compiles(c: Configuration) {
17 | def scala213Home: String = c.getString("scala213")
18 | def scala212Home: String = c.getString("scala212")
19 | def scala211Home: String = c.getString("scala211")
20 | }
21 |
22 | final class MassSettings private (val configuration: Configuration) extends StrictLogging {
23 | val compiles = new Compiles(configuration.getConfiguration(s"$BASE_CONF.core.compiles"))
24 |
25 | def clusterName: String = configuration.getString(BASE_CONF + ".cluster.name")
26 |
27 | def clusterProtocol: String = configuration.getString(BASE_CONF + ".cluster.protocol")
28 |
29 | def clusterSeeds: List[Address] =
30 | configuration
31 | .get[Seq[String]](BASE_CONF + ".cluster.seeds")
32 | .map { seed =>
33 | val Array(host, port) = seed.split(':')
34 | Address(clusterProtocol, clusterName, host, port.toInt)
35 | }
36 | .toList
37 | }
38 |
39 | object MassSettings {
40 | def apply(configuration: Configuration): MassSettings = new MassSettings(configuration)
41 | def apply(config: Config): MassSettings = apply(Configuration(config))
42 | def apply(system: ActorSystem[_]): MassSettings = apply(system.settings.config)
43 | }
44 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/Constants.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) Yangbajing 2018
3 | *
4 | * This is the custom License of Yangbajing
5 | */
6 |
7 | package mass.core
8 |
9 | object Constants {
10 | val BASE_CONF = "mass"
11 | val MASS = "mass"
12 | val JACKSON_JSON = "jackson-json"
13 | val JACKSON_CBOR = "jackson-cbor"
14 | val SHA256_HEX_LENGTH = 64
15 |
16 | val STATUS_DISABLE = 0
17 | val STATUS_ENABLE = 1
18 |
19 | val OUT_LOG_SUFFIX = "out.log"
20 | val ERR_LOG_SUFFIX = "err.log"
21 |
22 | object Roles {
23 | val BROKER = "broker"
24 | val CONSOLE = "console"
25 | }
26 |
27 | object Nodes {
28 | val BROKER_LEADER = "broker-leader"
29 |
30 | val BROKER_LEADER_PROXY = "broker-leader-proxy"
31 |
32 | val BROKER = "mass-broker"
33 |
34 | val CONSOLE = "mass-console"
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/MassUtils.scala:
--------------------------------------------------------------------------------
1 | package mass.core
2 |
3 | object MassUtils {
4 | def userDir: String = sys.props.getOrElse("user.dir", "")
5 | }
6 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/RunMode.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) Yangbajing 2018
3 | *
4 | * This is the custom License of Yangbajing
5 | */
6 |
7 | package mass.core
8 |
9 | object RunMode extends Enumeration {
10 | val Dev = Value(1)
11 | val Test = Value
12 | val Prod = Value
13 | }
14 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/XmlUtils.scala:
--------------------------------------------------------------------------------
1 | package mass.core
2 |
3 | import helloscala.common.util.StringUtils
4 |
5 | import scala.xml.NodeSeq
6 |
7 | object XmlUtils {
8 | implicit class XmlRich(node: NodeSeq) {
9 | @inline def attr(attr: String): String = (node \ s"@$attr").text.trim
10 |
11 | @inline def getAttr(attr: String): Option[String] =
12 | StringUtils.option(this.attr(attr)).map(_.trim)
13 |
14 | @inline def text: String = node.text.trim
15 |
16 | @inline def getText: Option[String] =
17 | StringUtils.option(node.text).map(_.trim)
18 | }
19 |
20 | def attr(ns: NodeSeq, attr: String): String = ns.attr(attr)
21 |
22 | def getAttr(ns: NodeSeq, attr: String): Option[String] = ns.getAttr(attr)
23 |
24 | def text(ns: NodeSeq): String = ns.text
25 |
26 | def getText(ns: NodeSeq): Option[String] = ns.getText
27 | }
28 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/actors/AggregateActor.scala:
--------------------------------------------------------------------------------
1 | //package mass.core.actors
2 | //
3 | //import akka.actor.{ ActorRef, Props, Status }
4 | //import helloscala.common.exception.{ HSBadRequestException, HSNotFoundException }
5 | //import helloscala.common.util.StringUtils
6 | //
7 | ///**
8 | // * 聚合根Actor,通过 Iterable[(Props, Symbol)]将多个业务Actor聚合到一起提供一个门面供上层调用。
9 | // */
10 | //trait AggregateActor extends MassActor {
11 | // val propsList: Iterable[(Props, Symbol)]
12 | // protected var actors: Map[Symbol, ActorRef] = Map()
13 | //
14 | // override def preStart(): Unit = {
15 | // require(propsList.groupBy(_._2).size == propsList.size, "propsList有重复的名字")
16 | // actors = propsList.map { case (props, symbol) => symbol -> context.actorOf(props, symbol.name) }.toMap
17 | // }
18 | //
19 | // override def postStop(): Unit = {
20 | // actors.valuesIterator.foreach(actor => context.stop(actor))
21 | // actors = Map()
22 | // }
23 | //
24 | // override def receive: Receive = {
25 | // case (name: Symbol, msg) => sendMessage(name, msg)
26 | // case msg => // 将消息名的第一部分解析出来作为 Symbol
27 | // StringUtils.extractFirstName(msg) match {
28 | // case Some(name) => sendMessage(Symbol(name), msg)
29 | // case _ => sender() ! Status.Failure(HSBadRequestException(s"未找到可处理此消息的服务,发送消息为:$msg"))
30 | // }
31 | // }
32 | //
33 | // private def sendMessage(name: Symbol, msg: Any): Unit =
34 | // actors.get(name) match {
35 | // case Some(actor) => actor forward msg
36 | // case None => sender() ! Status.Failure(HSNotFoundException(s"服务: $name 未找到,发送消息为:$msg"))
37 | // }
38 | //}
39 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/actors/MassActor.scala:
--------------------------------------------------------------------------------
1 | //package mass.core.actors
2 | //
3 | //import akka.actor.SupervisorStrategy._
4 | //import akka.actor.{ Actor, OneForOneStrategy, SupervisorStrategy }
5 | //import com.typesafe.scalalogging.StrictLogging
6 | //import helloscala.common.exception.HSException
7 | //
8 | ///**
9 | // * 为Actor特质的一些功能添加了默认实现
10 | // */
11 | //trait MassActor extends Actor with StrictLogging {
12 | // override def supervisorStrategy: SupervisorStrategy = {
13 | // OneForOneStrategy() {
14 | // case _: HSException => Resume
15 | // case other => defaultDecider(other)
16 | // }
17 | // }
18 | //}
19 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/component/spi/BaseComponent.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) Yangbajing 2018
3 | *
4 | * This is the custom License of Yangbajing
5 | */
6 |
7 | package mass.core.component.spi
8 |
9 | trait BaseComponent {
10 | def preStart(): Unit
11 |
12 | def postStop(): Unit
13 | }
14 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/component/spi/CollectResult.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) Yangbajing 2018
3 | *
4 | * This is the custom License of Yangbajing
5 | */
6 |
7 | package mass.core.component.spi
8 |
9 | case class CollectResult(code: Int, message: String, data: Option[AnyRef])
10 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/component/spi/DataComponent.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) Yangbajing 2018
3 | *
4 | * This is the custom License of Yangbajing
5 | */
6 |
7 | package mass.core.component.spi
8 |
9 | /**
10 | * 数据组件:数据加载、数据存储
11 | */
12 | trait DataComponent extends BaseComponent {
13 | def preStart(): Unit
14 |
15 | def authorization(): Boolean
16 |
17 | def authroizationAfter(): Unit
18 |
19 | def run(): CollectResult
20 |
21 | def postStop(): Unit
22 | }
23 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/component/spi/FunctionComponent.scala:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) Yangbajing 2018
3 | *
4 | * This is the custom License of Yangbajing
5 | */
6 |
7 | package mass.core.component.spi
8 |
9 | /**
10 | * 转换组件
11 | * @tparam T
12 | */
13 | trait FunctionComponent[T] extends BaseComponent {
14 | def preStart(): Unit
15 |
16 | def process(data: T): T
17 |
18 | def postStop(): Unit
19 | }
20 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/event/Event.scala:
--------------------------------------------------------------------------------
1 | package mass.core.event
2 |
3 | trait Event {
4 | def `type`: String
5 | }
6 |
7 | trait EventData extends Event {
8 | def data: AnyRef
9 | }
10 |
11 | case class EventDataSimple(data: AnyRef) extends EventData {
12 | override def `type`: String = "data/simple"
13 | }
14 |
15 | object Events {
16 | private var _types = Vector.empty[String]
17 |
18 | def registerType(`type`: String): Vector[String] = {
19 | if (!_types.contains(`type`)) {
20 | _types = _types :+ `type`
21 | }
22 | _types
23 | }
24 |
25 | def listTypes: Vector[String] = _types
26 | }
27 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/factory/Factory.scala:
--------------------------------------------------------------------------------
1 | package mass.core.factory
2 |
3 | /**
4 | * 工厂
5 | */
6 | trait Factory {
7 | /**
8 | * 工厂类型
9 | * @return
10 | */
11 | def `type`: String
12 | }
13 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/job/JobConstants.scala:
--------------------------------------------------------------------------------
1 | package mass.core.job
2 |
3 | import scala.concurrent.duration._
4 |
5 | object JobConstants {
6 | private[mass] val JOB_CLASS = "JOB_CLASS"
7 |
8 | val TRIGGER_REPEAT = 0
9 | val TRIGGER_INTERVAL: FiniteDuration = 1.minutes
10 | val RUN_TIMEOUT: FiniteDuration = 2.hours
11 |
12 | val DIST = "dist"
13 | val ENDS_SUFFIX = ".conf"
14 |
15 | object Resources {
16 | val ZIP_PATH = "ZIP_PATH"
17 | }
18 |
19 | object Roles {
20 | val CONTROL = "job-control"
21 | val AGENT = "job-agent"
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/job/JobResult.scala:
--------------------------------------------------------------------------------
1 | package mass.core.job
2 |
3 | trait JobResult
4 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/job/SchedulerContext.scala:
--------------------------------------------------------------------------------
1 | package mass.core.job
2 |
3 | import akka.actor.typed.ActorSystem
4 | import mass.model.job.JobItem
5 |
6 | case class SchedulerContext(key: String, jobItem: JobItem, data: Map[String, String], system: ActorSystem[_])
7 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/job/SchedulerJob.scala:
--------------------------------------------------------------------------------
1 | package mass.core.job
2 |
3 | import scala.concurrent.Future
4 |
5 | // #SchedulerJob
6 | trait SchedulerJob {
7 | def run(context: SchedulerContext): Future[JobResult]
8 | }
9 | // #SchedulerJob
10 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/json/Json4sFormats.scala:
--------------------------------------------------------------------------------
1 | //package mass.core.json
2 | //
3 | //import com.fasterxml.jackson.databind.ObjectMapper
4 | //import fusion.json.jackson.Jackson
5 | //import mass.model.CommonStatus
6 | //import org.json4s.JsonAST.JInt
7 | //import org.json4s.jackson.JsonMethods
8 | //import org.json4s.{CustomSerializer, DefaultFormats, Serializer}
9 | //
10 | //trait Json4sFormats extends DefaultFormats {
11 | // override val customSerializers: List[Serializer[_]] =
12 | // new CustomSerializer[CommonStatus](_ =>
13 | // ({
14 | // case JInt(i) => CommonStatus.fromValue(i.intValue())
15 | // }, {
16 | // case s: CommonStatus => JInt(s.value)
17 | // })) ::
18 | // JavaTimeSerializers.defaults
19 | //}
20 | //
21 | //object Json4sFormats extends Json4sFormats
22 | //
23 | //object Json4sMethods extends JsonMethods {
24 | // override def mapper: ObjectMapper = Jackson.defaultObjectMapper
25 | //}
26 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/json/package.scala:
--------------------------------------------------------------------------------
1 | //package mass.core
2 | //import mass.model.CommonStatus
3 | //import mass.model.job.{ Program, TriggerType }
4 | //import scalapb.GeneratedEnum
5 | //
6 | //package object json {
7 | // val ClassGeneratedEnum: Class[GeneratedEnum] = classOf[scalapb.GeneratedEnum]
8 | //
9 | // val ClassCommonStatus: Class[CommonStatus] = classOf[CommonStatus]
10 | // val ClassTriggerType: Class[TriggerType] = classOf[TriggerType]
11 | // val ClassProgram: Class[Program] = classOf[Program]
12 | //}
13 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/model/scheduler/SchedulerJobResultTrait.scala:
--------------------------------------------------------------------------------
1 | package mass.core.model.scheduler
2 |
3 | import java.nio.file.{ Files, Paths }
4 |
5 | import mass.core.job.JobResult
6 |
7 | trait SchedulerJobResultTrait extends JobResult {
8 | val outPath: String
9 | val errPath: String
10 |
11 | def destroy(): Unit = {
12 | Files.deleteIfExists(Paths.get(outPath))
13 | Files.deleteIfExists(Paths.get(errPath))
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/module/Module.scala:
--------------------------------------------------------------------------------
1 | package mass.core.module
2 |
3 | /**
4 | * Mass 模块
5 | */
6 | trait Module {
7 | def name: String
8 | }
9 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/script/ScriptManager.scala:
--------------------------------------------------------------------------------
1 | package mass.core.script
2 |
3 | import javax.script.{ ScriptEngine, ScriptEngineManager }
4 |
5 | object ScriptEngineType extends Enumeration {
6 | type ScriptEngineType = Value
7 |
8 | val ENGINE_SCALA = Value(1, "scala")
9 | val ENGINE_JAVASCRIPT = Value("nashorn")
10 | }
11 |
12 | object ScriptManager {
13 | import ScriptEngineType._
14 |
15 | val engineManager = new ScriptEngineManager()
16 |
17 | def scriptScala: ScriptEngine =
18 | engineManager.getEngineByName(ENGINE_SCALA.toString)
19 |
20 | def scriptJavascript: ScriptEngine =
21 | engineManager.getEngineByName(ENGINE_JAVASCRIPT.toString)
22 | }
23 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/core/workflow/Workflow.scala:
--------------------------------------------------------------------------------
1 | package mass.core.workflow
2 |
3 | import akka.Done
4 |
5 | import scala.concurrent.Future
6 |
7 | /**
8 | * 工作流
9 | */
10 | trait Workflow[Result] {
11 | /**
12 | * 运行工作流
13 | */
14 | def run(): WorkflowExecution[Result]
15 | }
16 |
17 | /**
18 | * 工作流执行对象
19 | */
20 | trait WorkflowExecution[Result] extends AutoCloseable {
21 | def future: Future[Result]
22 |
23 | /**
24 | * 终止工作流
25 | * @return
26 | */
27 | def terminate(): Future[Done]
28 | }
29 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/extension/MassCore.scala:
--------------------------------------------------------------------------------
1 | package mass.extension
2 |
3 | import java.nio.file.{ Files, Path, Paths }
4 |
5 | import akka.actor.ExtendedActorSystem
6 | import akka.serialization.jackson.JacksonObjectMapperProvider
7 | import com.fasterxml.jackson.databind.ObjectMapper
8 | import com.typesafe.scalalogging.StrictLogging
9 | import fusion.common.extension.{ FusionExtension, FusionExtensionId }
10 | import fusion.core.extension.FusionCore
11 | import mass.MassSettings
12 | import mass.core.Constants
13 |
14 | /**
15 | * MassCore将作为Akka的库扩展自动加载
16 | * akka.typed.library-extensions += "mass.extension.MassCore"
17 | */
18 | final class MassCore private (override val classicSystem: ExtendedActorSystem)
19 | extends FusionExtension
20 | with StrictLogging {
21 | FusionCore(classicSystem)
22 | val settings: MassSettings = MassSettings(classicSystem.settings.config)
23 | val jsonMapper: ObjectMapper = JacksonObjectMapperProvider(classicSystem).getOrCreate(Constants.JACKSON_JSON, None)
24 | val cborMapper: ObjectMapper = JacksonObjectMapperProvider(classicSystem).getOrCreate(Constants.JACKSON_CBOR, None)
25 | val tempDirectory: Path = {
26 | val _tempDirectory = Paths.get(
27 | configuration.getOrElse[String](s"${Constants.BASE_CONF}.core.temp-dir", System.getProperty("java.io.tmpdir")))
28 | if (!Files.isDirectory(_tempDirectory)) {
29 | Files.createDirectories(_tempDirectory)
30 | }
31 | _tempDirectory
32 | }
33 |
34 | logger.info(configuration.getConfig(Constants.BASE_CONF).toString)
35 |
36 | def name: String = configuration.getString(s"${Constants.BASE_CONF}.name")
37 |
38 | override def toString = s"MassCore($classicSystem)"
39 | }
40 |
41 | object MassCore extends FusionExtensionId[MassCore] {
42 | override def createExtension(system: ExtendedActorSystem): MassCore = new MassCore(system)
43 | }
44 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/message/job/JobEvent.scala:
--------------------------------------------------------------------------------
1 | package mass.message.job
2 |
3 | import fusion.json.CborSerializable
4 |
5 | sealed trait JobEvent extends CborSerializable
6 |
7 | case class JobTriggerEvent(key: String) extends JobEvent
8 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/model/IApiResult.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2018 羊八井(yangbajing)(杨景)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package mass.model;
18 |
19 | import com.fasterxml.jackson.annotation.JsonIgnore;
20 |
21 | /**
22 | * Created by yangbajing(yangbajing@gmail.com) on 2017-03-30.
23 | */
24 | public interface IApiResult {
25 | Integer getErrCode();
26 |
27 | String getErrMsg();
28 |
29 | T getData();
30 |
31 | @JsonIgnore
32 | default boolean isSuccess() {
33 | Integer errCode = getErrCode();
34 | return errCode == null || errCode == 0;
35 | }
36 |
37 | }
38 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/model/Provice.scala:
--------------------------------------------------------------------------------
1 | package mass.model
2 |
3 | case class County(count: String, postCode: String)
4 | case class Province(provice: String, couties: Seq[County])
5 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/model/TitleValue.scala:
--------------------------------------------------------------------------------
1 | package mass.model
2 |
3 | case class TitleValue(title: String, value: String, show: String)
4 | case class TitleIdValue(title: String, value: Int, show: String)
5 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/model/job/JobItem.scala:
--------------------------------------------------------------------------------
1 | package mass.model.job
2 |
3 | import fusion.json.CborSerializable
4 | import helloscala.common.Configuration
5 | import helloscala.common.exception.HSBadRequestException
6 | import mass.core.ProgramVersion
7 |
8 | // #JobItem
9 | case class JobItem(
10 | program: Program,
11 | programOptions: Seq[String],
12 | programMain: String,
13 | programArgs: Seq[String] = Seq(),
14 | programVersion: String = "",
15 | resources: Map[String, String] = Map(),
16 | data: Map[String, String] = Map(),
17 | description: Option[String] = None,
18 | dependentJobKeys: Seq[String] = Nil,
19 | name: Option[String] = None)
20 | extends CborSerializable
21 | // #JobItem
22 |
23 | object JobItem {
24 | def apply(item: Configuration): JobItem = {
25 | val program = Program.fromValue(item.getString("program"))
26 | val programMain = item.getString("program-main")
27 | val _version = item.getOrElse[String]("program-version", "")
28 | val programVersion =
29 | ProgramVersion
30 | .get(program, _version)
31 | .getOrElse(
32 | throw HSBadRequestException(s"Configuration key program-version is invalid, current name is ${_version}."))
33 | JobItem(
34 | program,
35 | item.getOrElse[Seq[String]]("program-options", Nil),
36 | programMain,
37 | item.getOrElse[Seq[String]]("program-args", Nil),
38 | programVersion.version,
39 | name = item.get[Option[String]]("name"))
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/model/job/JobTrigger.scala:
--------------------------------------------------------------------------------
1 | package mass.model.job
2 |
3 | import java.time.OffsetDateTime
4 |
5 | import fusion.json.CborSerializable
6 | import helloscala.common.Configuration
7 | import mass.core.job.JobConstants
8 |
9 | import scala.concurrent.duration._
10 |
11 | // #JobTrigger
12 | case class JobTrigger(
13 | triggerType: TriggerType,
14 | triggerEvent: String = "",
15 | startTime: Option[OffsetDateTime] = None,
16 | endTime: Option[OffsetDateTime] = None,
17 | // 重复次数
18 | repeat: Int = JobConstants.TRIGGER_REPEAT,
19 | // 每次重复间隔
20 | interval: FiniteDuration = JobConstants.TRIGGER_INTERVAL,
21 | cronExpress: String = "",
22 | failedRetries: Int = 0,
23 | timeout: FiniteDuration = JobConstants.RUN_TIMEOUT,
24 | alarmEmails: Seq[String] = Nil)
25 | extends CborSerializable
26 | // #JobTrigger
27 |
28 | object JobTrigger {
29 | def apply(c: Configuration): JobTrigger = {
30 | val triggerType = TriggerType.fromValue(c.getString("trigger-type"))
31 | JobTrigger(
32 | triggerType,
33 | c.getOrElse[String]("trigger-event", ""),
34 | c.get[Option[OffsetDateTime]]("start-time"),
35 | c.get[Option[OffsetDateTime]]("end-time"),
36 | c.getOrElse[Int]("repeat", JobConstants.TRIGGER_REPEAT),
37 | c.getOrElse[FiniteDuration]("duration", JobConstants.TRIGGER_INTERVAL),
38 | c.getOrElse[String]("cron-express", ""),
39 | c.getOrElse[Int]("failed-retries", 0),
40 | c.getOrElse[FiniteDuration]("timeout", JobConstants.RUN_TIMEOUT),
41 | c.getOrElse[Seq[String]]("alarm-emails", Nil))
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/model/job/RunStatus.java:
--------------------------------------------------------------------------------
1 | package mass.model.job;
2 |
3 | import com.fasterxml.jackson.annotation.JsonValue;
4 | import fusion.json.CborSerializable;
5 | import helloscala.common.data.IntEnum;
6 |
7 | import java.util.Arrays;
8 | import java.util.Optional;
9 |
10 | public enum RunStatus implements IntEnum, CborSerializable {
11 | JOB_NORMAL(0, "NORMAL"),
12 | JOB_ENABLE(1, "ENABLE"),
13 | JOB_RUNNING(100, "RUNNING"),
14 | JOB_OK(200, "OK"),
15 | JOB_FAILURE(500, "FAILURE"),
16 | ;
17 |
18 | @JsonValue
19 | private Integer value;
20 | private String name;
21 |
22 | RunStatus(Integer value, String name) {
23 | this.value = value;
24 | this.name = name;
25 | }
26 |
27 | public Integer getValue() {
28 | return value;
29 | }
30 |
31 | public String getName() {
32 | return name;
33 | }
34 |
35 | public static Optional optionalFromValue(int value) {
36 | return Arrays.stream(values()).filter(v -> v.value.equals(value)).findFirst();
37 | }
38 |
39 | public static RunStatus fromValue(int value) {
40 | return optionalFromValue(value).orElseThrow();
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/model/job/RunStatus.scala:
--------------------------------------------------------------------------------
1 | //package mass.model.job
2 | //
3 | //import helloscala.common.util.{ EnumTrait, EnumTraitCompanion }
4 | //
5 | //sealed abstract class RunStatus(override val companion: EnumTraitCompanion, override protected val value: Int)
6 | // extends EnumTrait
7 | //
8 | //object RunStatus extends EnumTraitCompanion {
9 | // self =>
10 | // override type Value = RunStatus
11 | //
12 | // case object JOB_NORMAL extends RunStatus(self, 0)
13 | // case object JOB_ENABLE extends RunStatus(self, 1)
14 | // case object JOB_RUNNING extends RunStatus(self, 100)
15 | // case object JOB_OK extends RunStatus(self, 200)
16 | // case object JOB_FAILURE extends RunStatus(self, 500)
17 | //}
18 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/model/job/TriggerType.scala:
--------------------------------------------------------------------------------
1 | package mass.model.job
2 |
3 | import com.fasterxml.jackson.core.{ JsonGenerator, JsonParser }
4 | import com.fasterxml.jackson.databind.annotation.{ JsonDeserialize, JsonSerialize }
5 | import com.fasterxml.jackson.databind.deser.std.StdDeserializer
6 | import com.fasterxml.jackson.databind.ser.std.StdSerializer
7 | import com.fasterxml.jackson.databind.{ DeserializationContext, SerializerProvider }
8 | import helloscala.common.util.{ IEnumTrait, IEnumTraitCompanion }
9 |
10 | @JsonSerialize(using = classOf[TriggerType.EnumSer])
11 | @JsonDeserialize(using = classOf[TriggerType.EnumDeser])
12 | sealed abstract class TriggerType extends IEnumTrait[String] {
13 | override val value: String = name
14 | }
15 |
16 | object TriggerType extends IEnumTraitCompanion[String] {
17 | self =>
18 | override type Value = TriggerType
19 |
20 | case object SIMPLE extends TriggerType
21 | case object CRON extends TriggerType
22 | case object EVENT extends TriggerType
23 |
24 | override val values = Vector(CRON, EVENT, SIMPLE)
25 |
26 | override def optionFromValue(value: String): Option[TriggerType] = super.optionFromValue(value.toUpperCase())
27 |
28 | class EnumSer extends StdSerializer[TriggerType](classOf[TriggerType]) {
29 | override def serialize(value: TriggerType, gen: JsonGenerator, provider: SerializerProvider): Unit =
30 | gen.writeString(value.value)
31 | }
32 | class EnumDeser extends StdDeserializer[TriggerType](classOf[TriggerType]) {
33 | override def deserialize(p: JsonParser, ctxt: DeserializationContext): TriggerType =
34 | TriggerType.fromValue(p.getValueAsString.toUpperCase())
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/mass-core/src/main/scala/mass/session/BaseSession.scala:
--------------------------------------------------------------------------------
1 | package mass.session
2 |
3 | trait BaseSession {
4 | def openId: String
5 |
6 | // 接入的应用程序
7 | def appId: String
8 |
9 | // 接入应用程序自己的用户ID
10 | def userId: String
11 |
12 | def sessionCode: String
13 |
14 | def expiresIn: Long
15 |
16 | /**
17 | * 是否已过期
18 | * @return
19 | */
20 | def isDue: Boolean = expiresIn < (System.currentTimeMillis() / 1000)
21 | }
22 |
--------------------------------------------------------------------------------
/mass-core/src/test/resources/application-test.conf:
--------------------------------------------------------------------------------
1 | test.key = "test.key"
--------------------------------------------------------------------------------
/mass-core/src/test/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 | %date{HH:mm:ss.SSS} %coloredLevel %logger{50} - %message%n%xException{10}
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/mass-core/src/test/scala/mass/MassSettingsTest.scala:
--------------------------------------------------------------------------------
1 | package mass
2 |
3 | import helloscala.common.Configuration
4 | import org.scalatest.matchers.should.Matchers
5 | import org.scalatest.wordspec.AnyWordSpec
6 |
7 | class MassSettingsTest extends AnyWordSpec with Matchers {
8 | "MassSettings" should {
9 | val config = Configuration.generateConfig()
10 | val settings = MassSettings(config)
11 |
12 | "compiles" in {
13 | settings.compiles.scala212Home should not be empty
14 | println(settings.compiles.scala212Home)
15 | }
16 |
17 | "test key" in {
18 | config.getString("test.key") shouldBe "test.key"
19 | }
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/mass-core/src/test/scala/mass/core/script/ScriptManagerTest.scala:
--------------------------------------------------------------------------------
1 | package mass.core.script
2 |
3 | import fusion.testkit.FusionWordSpecLike
4 | import javax.script.SimpleBindings
5 | import mass.core.event.EventData
6 |
7 | class ScriptManagerTest extends FusionWordSpecLike {
8 | case class SqlDataEvent(data: String, `type`: String = "data-sql") extends EventData
9 |
10 | "ScriptManagerTest" should {
11 | "scriptJavascript" in {
12 | val engine = ScriptManager.scriptJavascript
13 | val event = SqlDataEvent("哈哈哈")
14 | val bindings = new SimpleBindings()
15 | bindings.put("event", event)
16 | val result = engine.eval("event.data()", bindings)
17 | println(s"result: ${result.getClass.getSimpleName} = $result")
18 | }
19 |
20 | // "scriptScala" in {
21 | // new ScriptEngineManager().getEngineFactories.forEach(println)
22 | //
23 | // val engine = ScriptManager.scriptScala
24 | // println(s"engine: $engine")
25 | // val event = SqlDataEvent("哈哈哈")
26 | // val bindings = new SimpleBindings()
27 | // bindings.put("event", event)
28 | // val result = engine.eval("event.data", bindings)
29 | // println(s"result = $result")
30 | // }
31 |
32 | "set same" in {
33 | println(Set(1, 2, 3, 4).iterator.sameElements(Set(2, 3, 1, 4)))
34 | println(Set(1, 2, 3, 4) == Set(2, 3, 1, 4))
35 | }
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/mass-docs/README.md:
--------------------------------------------------------------------------------
1 | # mass-docs
2 |
3 | 使用 draw.io-desktop 绘图,可在此下载离线版软件:[http://file.helloscala.com/](http://file.helloscala.com/)。
4 |
--------------------------------------------------------------------------------
/mass-docs/doc/Sea Data.xmind:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-docs/doc/Sea Data.xmind
--------------------------------------------------------------------------------
/mass-docs/src/main/drawio/Broker.drawio:
--------------------------------------------------------------------------------
1 | 1Zhfb5swEMA/jV8rMH9iHkNCtkmbNKkPbR9dcIGVYOQ4Tdin3xkMgZDQaktJFkXifD4b3+/ubAtkLdb7L4IWyQ8esQxhI9oja4kwNq2ZAw+lKWsNsWa1IhZppI0Oivv0N9NKQ2u3acQ2PUPJeSbToq8MeZ6zUPZ0VAi+65u98Kz/1oLGbKC4D2k21D6kkUy01nS9Q8dXlsaJfjXB2r9nGr7Ggm9z/T6ErZfqV3evaTOXdnST0IjvOiorQNZCcC5rab1fsEyxbbDV41Znett1C5bLDw3Q63ij2Vb7Dkw3HEBgN4Mp/GcBUqwkFKyQv0CEoGCG5gGam0rwDERcFFiIgAAaFxHomqHAUZa+24zyTkxYI5Blg70Cx9TSDOjeJalk9wUNVe8OEg10iVxn0DLb0W9MSLY/673ZMoVcZXzNpCjBRA+wtfdlv7nrhLwJVNKJdmNHdZbF7cQH0iBo2GfAmwPwIipGoQPQFSJLJfhz5NmVsFBKgO6BbFT0lyo20DW3EbE7EbrdMFw1DngQh02YsGibMTEaDRv5vvqrsMyrWiAqAsTS8ZnfNPIj4qbpDZCTzyJuDTOfFXyTSq4mGkEOgJ0qp92qAAwdBK9BroTbRd4CLY9SegrmzoA5F5DmGymoTHn+zmYPgufqrd3D7Wb/wJ6/LYObZn6U59iYcmtxB9BjDu7kNA/Hj1fIb1ztLf/B5n1Nwu0t6uA+i+Aip5s5z+Hh94l0vGf7VD4q9R12dPNJW4Hzoqz7Zk7TftLjfjEpS31ppVvJQcWFTHjMc5p957zo0VUrGmcLDvCtCBsrfZWUVMSsMTNOx0CwDOr3rT//PxE1L0O0i7ND8LGxa3Aad851gOKpgOKLADVHgBo3AdSaCqh1oZqfjdQ87pX8nYFb4D+ZSGHNcDmsB12B9Jn9+OKkm5d3zi/g+nryXnztU8h0jy5XxnQXWmx/xik0wUFDTiSXO1UZO59RxleC5kxVkWRQkSyPU0B1c/Vnkw/V319Qgebh+1jV1/kIaQV/AA==
--------------------------------------------------------------------------------
/mass-docs/src/main/drawio/平台架构.drawio:
--------------------------------------------------------------------------------
1 | 7VrLcpswFP0aLdsxyLyW4JB20c50Jou2SwUUoJGRB+TY7tf3CoR5iDhM60ftNptIV1dCOkf36CKM8GK5/VCQVfqZx5QhcxZvEb5Dpmlgx4J/0rKrLS52akNSZLFyag0P2U+qjDNlXWcxLXuOgnMmslXfGPE8p5Ho2UhR8E3f7Ymz/lNXJKGa4SEiTLd+zWKRqlWYTmv/SLMkbZ5s2F7d8kii56Tg61w9D5n4qfqrm5ekGUsttExJzDcdEw4RXhSci7q03C4ok9g2sNX97l9p3c+7oLmY1EHN44WwNW2mXE1M7BowquVQ2WGGcLBJM0EfViSSrRugH2ypWDKoGVB8yhhbcMYLqOc8B6cgKUicwXwGZsKyJIdqBE0U7IE+d7WcF1oIuu2Y1Fo+UL6kotiBi2ptYFXbDtuqvmlJtGbKlnYIdJWNqH2T7EdusYOCgm8cSveWkTTciUga1hGgNBwNOhpDgKoqL0TKE54TFrbWoA9uB0i6zcS3Tvm7dHlvyVoOE/umelSVti0mZVqNJruVghTCl8LSol7Z7jO5CNU/HniApdP+gwqxU1JH1oKDqV3IJ85X6lmvklfydREpONTegikkVHnh2iSBOkhwQRkR2Utf6f6ELVPb+EtSlu8eC/4Mu/GKY8Dux4CJ9RgwzJEYmB8jBNz/IXA4BLAeAvNLhQC+1RAYngNnjYH5raKK3Usqy/wUyqLURGmL0VOWVmi+qwFeV5aIAcVZNBAX49jiYeniYZiXUg/DOg0jzgFKnP+cHObEvsLzdyodb53Tv0uXfSm2LO2kiIkg1QXBcgUrhGXIp654Vgquhr7Ss2OYlDrnPDq8KwyKsyalth4UzqWCwv53gkJLU88ZFc6/g7OWuJ4TZ0/DGYUe8gLkwk63GTwpeITXAjuRJRQ6KAiRrye7sFTRx7QU8o1iAN8I0BqiErgsIsxXDcssjivNGyOxT/MxyJjwEjFChXmMgwCfJmM1uxnrXvy7bc1hYAyPgqkyPjGznaT2TXralXvrYhmrfg+HQhcFM+RiGQ3eHPkuCm3kWdIIBfcO+WEVKBBDcxTOZSGwqyYP+Y7s7hvIXVyzZA3yJewZ51OsZtwOJYySWF5e6HoF6LvI81FoIdeXuLfkAWFgAWIwAgF2DckQ8ORWdELVv6+cnapQMR0YHedGJPeSKHuFFcGWHCQY088rplz7eOGd8ZQauQuUDACNMvqAgTsUmCNBd97E2kBXlljvPwOfMrOuusKayK7jAHlbLsrOyF+kod1t1kBh7MEX1zfcB/5QqCfQbrb9SqbtP0Pbf1HKC8oT+al+N015YHcukGcrofDMG9MHp0/BXJcH0zqVPJwk0g/evupXfb3ofmQ8ej4c3o3L78V3mZKV9HlidKtGDPa/e5ADwXuSzMGmXz2ZI3mX+1eIAfas/tFjHVaDN/z/XA30pLD6hhLxvOSwt0bfmyBbgCwJ1ADSCe++k0g4Ml+s9QGO1NHXrhuShZGfPBxLFqDa/sanprX9IRUOfwE=
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/broker/core.md:
--------------------------------------------------------------------------------
1 | # mass-broker
2 |
3 | **mass-broker** 作为 mass 的集群化的核心节点,每一个节点都具备调度、执行的完整能力。同时,部署到任一节点的组件资源(数据
4 | 处理组件)都将被自动分发到所有节点。
5 |
6 | 
*mass 引擎架构图*
7 |
8 | Sea引擎分3大部分:
9 |
10 | 1. engine:执行引擎,接收Broker Leader指派的业务任务(job-task)并解析 task。根据 task 解析后的每个任务步骤(task-step),
11 | 调用相应数据组件进行处理。
12 | 2. component repository:保存所有的数据组件,引擎将自动同步组件仓库里的组件到每一个节点。
13 |
14 | ## engine(engine-worker)
15 |
16 | engine是一个逻辑概念,每个 mass-engine 节点可启动一个或多个 engine(Worker),默认为主机CPU核数。(同一个节点同时可执行任务数)
17 |
18 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/broker/index.md:
--------------------------------------------------------------------------------
1 | # Broker 节点
2 |
3 | @@toc { depth=2 }
4 |
5 | @@@ index
6 |
7 | * [core](core.md)
8 | * [leader](leader.md)
9 |
10 | @@@
11 |
12 | mass-broker 作为执行节点,每个节点都拥有调度、执行组件、消息通信功能。集群中将启动一个集群单例actor:BrokerLeader 来作为
13 | **协调者**,控制具体的业务调度,所有节点都会通过一个集群单例代理actor:BrokerLeaderProxy。
14 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/broker/leader.md:
--------------------------------------------------------------------------------
1 | # Broker Leader (协调者)
2 |
3 | leader:Sea集群将自动选择一个节点作为 Leader,Leader 将负责调度所有数据业务(job)。解析每个业务的业务定义文件获取每个业务的任务依赖关系。并根据各Broker节点的负载情况来分配任务。
4 |
5 |
6 | - leader 所在节点是否分配 task ?
7 |
8 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/connector/connector.md:
--------------------------------------------------------------------------------
1 | # 扩展连接器
2 |
3 | TODO 扩展数据连接器的方式,从 `Connector` trait 开始描述
4 |
5 | 数据连接器采用模块化、可扩展设计。对于 mass-connector 不支持的数据源,用户可以很方便的对系统进行扩展,支持自定义的数据源。
6 |
7 | ## `Connector`
8 |
9 | `Connector` trait是数据连接器的基础接口,设置自定义数据连接器是需要实现这个接口。
10 |
11 | @@snip [trait Connector](../../../../../mass-connector/src/main/scala/mass/connector/Connector.scala) { #Connector }
12 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/connector/core.md:
--------------------------------------------------------------------------------
1 | # 数据连接器
2 |
3 | 基于 Akka Stream,Massdata提供各种数据源的连接器。包括:读取、转换、写入等功能。
4 | Akka Stream 提供了功能强大的反应式流处理,Massdata数据连接器构建在 Akka Stream 之上。可提供批量、流式ETL数据处理。
5 |
6 | 已有数据连接器:
7 |
8 | - HDFS
9 | - HBase
10 | - JDBC: PostgreSQL、MySQL、Oracle、MS SQL Server、达梦数据库、GBase
11 | - Elasticsearch
12 | - Cassandra
13 | - MongoDB
14 | - FTP/sFTP
15 | - File: txt、csv
16 | - XML
17 | - JSON
18 | - Excel(xls/xlsx)
19 |
20 | 同时,基于Akka Stream良好的扩展性和 msdata 平台的模块化设计,可以很方便的添加新的数据连接器来支持各种数据源。
21 |
22 | ## 通用数据处理模式
23 |
24 | ```
25 | source ~> flow....flow ~> sink
26 | ```
27 |
28 | - source: 数据源
29 | - flow: 处理流程,可有多个。
30 | - sink: 数据汇,收集数据并进行操作。source和sink可以有不同的DataSource,这样就可以实现ETL/ELT等操作。
31 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/connector/index.md:
--------------------------------------------------------------------------------
1 | # 数据连接器(mass-connector)
2 |
3 | 数据连接器基于 Akka Stream 实现,可实现从数据源读取数据、存储数据到数据源。分别使用 Source 和 Sink 来抽象 **extract**、 **persist** 操作。
4 |
5 | @@toc { depth=1 }
6 |
7 | @@@ index
8 |
9 | * [core](core.md)
10 | * [connector](connector.md)
11 |
12 | @@@
13 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/console/console/console.md:
--------------------------------------------------------------------------------
1 | # 监、管系统
2 |
3 | TODO
4 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/console/index.md:
--------------------------------------------------------------------------------
1 | # 管理控制台(mass-console)
2 |
3 | @@toc { depth=2 }
4 |
5 | @@@ index
6 |
7 | * [console/console](console/console.md)
8 | * [orchestration/orchestration](orchestration/orchestration.md)
9 |
10 | @@@
11 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/console/orchestration/orchestration.md:
--------------------------------------------------------------------------------
1 | # 业务编排系统模块
2 |
3 | TODO
4 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/design/code-level.md:
--------------------------------------------------------------------------------
1 | # 代码层次
2 |
3 |
4 | ```
5 | ├── route
6 | │ ├── api
7 | │ │ ├── ApiRoute.scala
8 | │ └── Routes.scala
9 | └── service
10 | └── job
11 | ├── JobActor.scala
12 | ├── JobServiceComponent.scala
13 | └── JobService.scala
14 | ├── component
15 | │ ├── DefaultSchedulerJob.scala
16 | │ └── JobRun.scala
17 | ├── repository
18 | ├── util
19 | ├── JobMain.scala
20 | ├── JobSystem.scala
21 | ```
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/design/index.md:
--------------------------------------------------------------------------------
1 | # 设计
2 |
3 | @@toc { depth=1 }
4 |
5 | @@@ index
6 |
7 | * [thinking](thinking.md)
8 | * [mass-design](mass-design.md)
9 | * [code-level](code-level.md)
10 |
11 | @@@
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/design/mass-design.md:
--------------------------------------------------------------------------------
1 | # MassData 设计
2 |
3 | ## 集群化
4 |
5 | - 集群成员:提供节点的容错能力
6 | - 负载均衡:通过路由算法将任务负载分配到集群中的多个节点上并行执行
7 |
8 | ## 子系统
9 |
10 | 总体上看,MassData 平台分四大部分,有2类程序(每个程序可运行多个节点)。2类程序分别为:Broker节点和控制台(console)。
11 |
12 | - Broker:其中一个做为协调者,实现管理和调度功能。调度Job,从Job中提取Task并分配到引擎执行。接收Task,调用组件执行。
13 | - leader: 集群中的一个单例节点,MassData 将在集群中自动选择一个节点做为 Leader
14 | - engine: 执行实际的业务任务
15 | - Console:基于Web的图形化监查、管理控制程序。可监控 MassData 运行状态,控制系统功能,编辑、配置、部署业务编排文件。
16 | - console: mass-data数据平台管理、监控程序
17 | - orchestration: 业务编排应用
18 |
19 | MassData 在生产运行时只需要启动 broker 集群即可,console 作为管理控制台可选运行。
20 |
21 | ## 数据通信
22 |
23 | **消息、通信** MassData 基于 Akka Cluster 完善而强大的集群支持,不需要再使用一个单独的 **数据总线** 系统来做服务发现、注册,
24 | 进程间消息通信等功能。
25 |
26 | **监控上报** 引擎的各种运行状态可通过 Akka 的分布式发布、订阅功能进行实时上报,console 可选消费相关主题分组的消息。
27 |
28 | **组件与集群通信**,组件与集群单通信主要有两种方式:
29 |
30 | 1. 使用Akka Cluster Client 连接集群:当引擎启动外部组件时,组件并未在集群环境内运行。这时,当组件需要与 MassData 通信时,
31 | 如组件应直接将消息上报给当前运行节点的引擎。
32 | 2. 以规定格式写入日志文件,引擎将解析日志文件并上报。
33 |
34 | ## 仿真、开发、调试
35 |
36 | 在实际工作中我们需要在开发或测试环境模拟线上数据及运行状态,提前编排好业务规则,同时在上线生产环境前预跑流程并提前查找到
37 | 问题及错误。基于本身 MassData 的灵活性和部署简易性,我们可在任务环境(Linux服务器)下方便、快速的搭建出一套于生产环境类似的仿
38 | 真环境。
39 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/design/thinking.md:
--------------------------------------------------------------------------------
1 | # 思路
2 |
3 | 在 mass 开发时有两套设计思路,分别为:*设计B* 和 *设计A*。
4 |
5 | ## *设计B:* 总线、调度服务、引擎 多子系统设计
6 |
7 | **主要特性**
8 |
9 | * 总线:实现服务发现、注册、消息通信功能
10 | * 调度服务:调度节点本身逻辑上是单例,提供多节点实现进行高可用
11 | * 引擎功能同 A 设计
12 | * 传统的分阶段批数据处理能力,读、数据转换、写分别串行执行。
13 |
14 | **优点**
15 |
16 | - 技术难度较低,有较多的成熟组件可利用、实现上相对容易
17 | - 多个子系统有单点故障
18 |
19 | **缺点**
20 |
21 | - 处理数据速度较慢
22 | - 不支持流式、实时数据处理
23 | - 部署、运维复杂,需要手动部署多套系统
24 |
25 | ## *设计A:* 无中心节点设计、节点功能复用
26 |
27 | **主要特性**
28 |
29 | * 无中心节点,所有节点(每个节点称为 broker)都是一样的,都具备集群发现、管理功能(基于Akka Cluster)
30 | * 调度
31 | 1. 使用 Akka Cluster Singleton 选中一个节点作为协调者(调度),总体控制所有Job的执行,使用 Akka Persistence来持久化
32 | 调度过程(推荐)。
33 | 2. 每个节点即是执行任务的引擎,同时也是调度。
34 | * 每个节点都是引擎,调度可分配Job的Task到引擎上执行。引擎在获取到Task后解析业务流程并调用相关组件进行实际的业务操作
35 | * 读、数据转换、写,数据处理中的各阶段流式并行执行。基于 Akka Stream 提供流式数据处理能力,同时还可使用 Akka Stream
36 | Kafka 实现更高性能、高可用、高容错、持久化的数据处理能力。
37 | * 结合 Kafka 和 reactive-kafka 实现 **确定1次** 数据处理模型,支持 ELT 等丰富的数据处理模式。
38 |
39 | **优点**
40 |
41 | - 更快的数据处理速度
42 | - 可进行实时数据ETL
43 | - 支持ETL、ELT等丰富的数据处理模式
44 | - 更好的资源利用率
45 | - 架构统一、清晰
46 | - 部署简单,不需要分别部署多套子系统
47 | - 无单点故障问题
48 |
49 | **缺点**
50 |
51 | - 技术难度较大
52 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/index.md:
--------------------------------------------------------------------------------
1 | # MassData(反应式数据治理)
2 |
3 | *数据治理*
4 |
5 | ## 特性
6 |
7 | - **反应式**设计,高性能、高可用、容错、回压(流控)、可扩展
8 | - 丰富的数据源连接器,支持RDBMS、NoSQL、BigData、TEXT(csv、txt、JSON、XML)、Excel、FTP/sFTP……
9 | - 分布式任务调度
10 | - ETL/ELT
11 | - 工作流
12 | - 数据治理,元数据管理
13 |
14 | @@toc { depth=2 }
15 |
16 | @@@ index
17 |
18 | * [intro/index](intro/index.md)
19 | * [design/index](design/index.md)
20 | * [connector](connector/index.md)
21 | * [job](job/index.md)
22 | * [rdi/index](rdi/index.md)
23 | * [console/index](console/index.md)
24 | * [broker/index](broker/index.md)
25 | * [spec/index](spec/index.md)
26 |
27 | @@@
28 |
29 | - 官网:[http://mass-data.yangbajing.me](http://mass-data.yangbajing.me)
30 | - 作者:杨景(羊八井,yangbajing at gmail com)
31 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/job/configuration.md:
--------------------------------------------------------------------------------
1 | # 配置
2 |
3 | ## DDL (Postgres)
4 |
5 | **Job**
6 |
7 | @@snip [ddl-pg](../../../../../scripts/software/postgres/job.sql)
8 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/job/developer.md:
--------------------------------------------------------------------------------
1 | # 开发
2 |
3 | JobSchedule
4 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/job/get_start.md:
--------------------------------------------------------------------------------
1 | # 开始使用
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/job/index.md:
--------------------------------------------------------------------------------
1 | # 调度(mass-job)
2 |
3 | 任务自动化高度是大数据时代数据整合后台必备的技术。在数据仓库、数据集市及各种数据池、湖中,任务调度让大量数据的进出、存放、清洗、过滤、
4 | 加工等工作有序、高效的展开。
5 |
6 | @@toc { depth=2 }
7 |
8 | @@@ index
9 |
10 | - [intro](intro.md)
11 | - [feature](feature.md)
12 | - [install](install.md)
13 | - [get_start](get_start.md)
14 | - [developer](developer.md)
15 | - [configuration](configuration.md)
16 |
17 | @@@
18 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/job/install.md:
--------------------------------------------------------------------------------
1 | # 安装
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/rdi/core.md:
--------------------------------------------------------------------------------
1 | # 核心概念
2 |
3 | Reactive Data Process,简称:mass-rdi
4 |
5 | mass-rdi 是一个基于akka架构上构建的分布式高可用ETL平台,可以把一个job中子任务按照拓扑关系在集群中不同的节点上并行执行,
6 | 高效利用集群资源;提供多个工具节点,可监控文件数据情况,对数据及任务进行监控告警,异常处理等。其中工作流定义相对简洁轻量级,
7 | 可作为构建数据仓库、或大数据平台上的调度工具。
8 |
9 | ## 技术架构
10 |
11 | 
`MassData 平台架构图`
12 |
13 | 整个 mass-rdi 架构采用伪去中心化的设计,使用角色来区分节点所拥有的功能。当前已有角色有:
14 |
15 | 1. leader: 用于整个RDP集群管理,节点加入、退出,任务分配、调度
16 | 0. worker: 实际执行数据处理任务
17 | 0. console: 管理控制台,包括:任务管理、任务编排、监控等功能
18 |
19 | 每个节点都拥有完整的功能,通过启动时设置role(节点角色)来指明当前节点启用的功能组。这意味着,一个节点可以同时担任多种角色(推荐每个节点角色功能单一)。
20 | 每个角色可以独立部署于不同机器上,支持高可用性(HA)。
21 |
22 | // 节点中包含以下模块:调度模块,执行模块,告警模块,日志模块,持久化模块。
23 |
24 | *由 Akka 强力驱动*
25 |
26 | 
27 |
28 | 支持数据源或存储:
29 |
30 | 1. RMDBs: PostgreSQL、MySQL、Oracle、DM(达梦数据库)
31 | 0. HDFS、Hive、HBase
32 | 0. Elasticsearch
33 | 0. Cassandra
34 | 0. MongoDB
35 | 0. FTP/sFTP
36 | 0. Excel/CSV
37 |
38 | ## 功能
39 |
40 | #### 任务调度
41 |
42 | 1. 定时调度(间隔时间、日历时间)
43 | 0. 任务分拆、分派
44 | 0. 分布式任务^
45 |
46 | #### 数据导入、导出
47 |
48 | 从SQL、NoSQL、文件(Excel、CSV……)将数据导到其它的SQL、NoSQL、文件(Excel、CSV……)。
49 |
50 | #### 数据清洗、转换
51 |
52 | 可在数据EL(Extract, Load)时实时对数据进行清洗、转换,也可对离线数据进行清洗、转换。包括但不限如下功能:
53 |
54 | 1. 数据格式化(去空白字符,时间、日期、数字、金额格式化)
55 | 0. 字段合并、分割
56 | 0. 字段比较、去重
57 | 0. 数据转换,根据自定义函数转换数据为另一种形式的数据
58 | 0. Excel/CSV数据表单动态导入(可配置数据例映射存储的字段)
59 | 0. ……
60 |
61 | #### 图形、可编程化任务配置
62 |
63 | 1. 提供图形化可拖拽任务流程配置
64 | 2. 集成基于Web的数据清洗、转换代码编辑器
65 |
66 | #### 数据处理可视化
67 |
68 | 可视化显示数据流处理过程,可对数据处理过程、状态、处理数据量进行实时监控
69 |
70 | 
71 |
72 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/rdi/extension.md:
--------------------------------------------------------------------------------
1 | # 扩展
2 |
3 | RDP采用模块化设计,支持功能扩展:
4 |
5 | ## 数据连接器
6 |
7 | 见 @ref[连接器功能、扩展](../connector/connector.md)
8 |
9 | ## 数据处理模块
10 |
11 | RDP支持对数据进行各种转换处理,包含但不限于:
12 |
13 | - 去除空格
14 | - 字符替换
15 | - 日期格式化
16 | - 数字格式化
17 | - 货币格式化
18 | - 正则表达式替换
19 |
20 | RDP提供实现自定义数据处理功能,通过实现 `DataTransfer` 接口,用户可以定义自己的数据处理功能函数。也可将自定义数据处理函数以扩展的形式
21 | 嵌入到RDP平台供系统调用。
22 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/rdi/img/Reactive-Flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-docs/src/main/paradox/rdi/img/Reactive-Flow.png
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/rdi/index.md:
--------------------------------------------------------------------------------
1 | # 反应式数据处理(mass-rdi)
2 |
3 | **mass-rdi** 的数据处理部分将基于 Reactive Extension 的思想进行设计,采集 **Akka Stream** 做为实现框架,基于 **Alpakka** 进行落地实现。
4 |
5 | ## 特性
6 |
7 | **有效提供工作效率**
8 |
9 | mass-rdp可数据工程师**提高**对复杂数据的处理能力,**提高**对各种数据来源、数据格式的数据的入库效率,**提高**对业务快速变化的响应。
10 |
11 | **丰富的数据源支持**
12 |
13 | mass-rdi 支持结构化、非结构化数据处理,支持对文本格式数据(csv、XML、JSON等)处理。支持传统关系型数据库:PostgreSQL、MySQL、Oracle、SQL Server,
14 | 支持NoSQL数据存储:HDFS、Hive、HBase、Cassandra、MongoDB。同时,mass-rdp还支持国产数据库:达梦数据库、GBase。
15 |
16 | **反应式架构设计**
17 |
18 | mass-rdi 采用 ReactiveStreams(反应式流处理) 设计数据处理功能(ETL/ELT),拥有高性、能吞吐、可扩展、容错、回压(流速控制)等特性。
19 |
20 | **适用性**
21 |
22 | mass-rdi 做为一款全方位的数据处理工具,适用于多种业务产场。
23 |
24 | - 数据采集
25 | - 数据清洗
26 | - 任务调度管理
27 |
28 | mass-rdi 可以单独使用,也可以集成到其它业务系统里面。
29 |
30 | **可扩展**
31 |
32 | mass-rdi 提供了完善的接口和丰富的扩展点,支持对 mass-rdi 进行二次开发,或将其嵌入到特定的应用系统中。
33 |
34 | @@toc { depth=1 }
35 |
36 | @@@ index
37 |
38 | * [core](core.md)
39 | * [workflow](workflow.md)
40 | * [extension](extension.md)
41 |
42 | @@@
43 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/rdi/workflow.md:
--------------------------------------------------------------------------------
1 | # 工作流
2 |
3 | 工作流包含两部份:
4 |
5 | - connectors: 连接器配置,定义工作流需要使用的数据源连接器
6 | - graph: 流程图(DAG)定义
7 |
8 | 工作流使用XML格式进行定义,示例配置如下:
9 |
10 | @@snip [pg2mysql-graph.xml](code/workflow/pg2mysql-graph.xml)
11 |
12 | ## Connectors
13 |
14 | 见 @ref:[数据连接器](../connector/index.md)
15 |
16 | ## Graph
17 |
18 | 类似Akka Stream的数据流图,这里是一个DAG,定义了ETL的处理流程。整个graph必需为闭合状态才有效并可被执行,不然RDP将分析workflow抛出异常。
19 |
20 | - source: 数据来源,它只有一个out。用来决定从哪里获得数据流。
21 | - flow: 数据流元素将流经的处理过程。flow至少有一个in和out。
22 | - sink: 数据收集汇,它只有一个in。用来决定数据流最终被被存储到哪里。
23 |
24 | @@snip [pg2mysql-graph.xml](../../../../../mass-rdi-core/src/test/resources/mass/core/workflow/etl/EtlWorkflowTest.xml) { #graph_example }
25 |
26 | **graph.source**
27 |
28 | 指Source要引用的connector,若在当前workflow配置文件内找不到connector定义,则从系统全局的connectors库寻找。
29 |
30 | **graph.flows**
31 |
32 | flow是实际对数据进行各种转换、过滤操作的阶段。
33 |
34 | **graph.flows.flow.script**
35 |
36 | script,配置flow怎样处理数据的脚本。包含以下主要功能:
37 |
38 | - **type**:可以是 Scala、Java、Javascript、Python代码
39 | - **src**:或者指定`jar`、`zip`可执行程序包或`.scala`, `.py`、`.js`可执行代码文件文件路径格式支持:`file://`, `http://`,
40 | `classpath://`, `ftp://`, `hdfs://` 若指定了package,则 script 段内嵌代码无效。
41 |
42 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/spec/business_spec/business_spec.md:
--------------------------------------------------------------------------------
1 | # 业务文件规范
2 |
3 | 业务文件规范是指通过业务编排系统配置的 **数据业务文件**(简称 Job)的定义,业务内容使用XML格式进行配置。Job 由如下主要
4 | 部分组件:
5 |
6 | - Job:业务文件,业务只有一个Job。
7 | - Task:一个业务可以定义一个或多个 Task,Task 可以分支、聚合。Task 采用 DAG(有向无环图)的方式来定义依赖关系。每个 Task
8 | 对应一个 Akka Stream 流处理任务。Task 通过 taskId 在同一个 Job 中唯一标识一个 Task,Task 可选 nextTask 来指定本任务执
9 | 行完后紧接着的向一个任务。当 Task 未指定 nextTask 时则认为此 Task 为Job类的最后一个任务,同一个 Job 中只允许一个 Task
10 | 不指定 nextTask 。
11 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/spec/business_spec/job.json:
--------------------------------------------------------------------------------
1 | {
2 | "jobName": "",
3 | "jobVersion": "1.0.0",
4 | "jobDescription": "",
5 | "tasks": [
6 | {
7 | "taskId": "task1",
8 | "nextTask": "task2",
9 | "source": {
10 | "sourceId": "数据流ID:custom, jdbc, ftp, sftp",
11 | "properties": [
12 | {
13 | "key": "jdbcUrl",
14 | "value": "jdbc:postgres://localhost/system"
15 | }
16 | ],
17 | "className": "单sourceId为custom时,通过 className 指定数据流实现类全路径"
18 | },
19 | "stages": [
20 | {
21 | "stageId": "组件ID:custom, csv, jdbcRow",
22 | "properties": [
23 | {
24 | "key": "",
25 | "value": ""
26 | }
27 | ],
28 | "className": "单 stageId 为custom时,通过 className 指定组件类全路径",
29 | // 不同的组件有不同的配置
30 | "configure": {}
31 | }
32 | ]
33 | },
34 | {
35 | "taskId": "task2"
36 | }
37 | ]
38 | }
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/spec/business_spec/job.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | job1
4 | job1 name
5 | 1.0.0
6 | job1 description
7 |
8 |
9 |
10 |
11 |
12 | jdbcUrl
13 | jdbc:postgres://localhost/system
14 |
15 |
16 | 单sourceId为custom时,通过 className 指定数据流实现类全路径
17 |
18 |
19 |
20 |
21 |
22 | splitter
23 | \t
24 |
25 |
26 | 单 stageId 为custom时,通过 className 指定组件类全路径
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 | ....
39 |
40 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/spec/business_spec/rowFieldConfigure.json:
--------------------------------------------------------------------------------
1 | {
2 | "fields": [{
3 | "field": "字段名",
4 | "dataType": "期望的数据类型",
5 | // 数据类型不匹配时的转换函数
6 | "dataTypeConverter": {},
7 | // 字段值进行转换
8 | "transfer": {}
9 | }]
10 | }
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/spec/component_spec/component_spec.md:
--------------------------------------------------------------------------------
1 | # 数据组件规范
2 |
3 |
4 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/spec/ic_spec/ic_spec.md:
--------------------------------------------------------------------------------
1 | # 系统间通信规范
2 |
3 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/spec/index.md:
--------------------------------------------------------------------------------
1 | # 规范
2 |
3 | @@toc { depth=2 }
4 |
5 | @@@ index
6 |
7 | * [white_paper](white_paper.md)
8 | * [business_spec/business_spec](business_spec/business_spec.md)
9 | * [component_spec/component_spec](component_spec/component_spec.md)
10 | * [ic_spec/ic_spec](ic_spec/ic_spec.md)
11 |
12 | @@@
13 |
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/static/JobTaskStageFlow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-docs/src/main/paradox/static/JobTaskStageFlow.png
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/static/RDPTechArchitecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-docs/src/main/paradox/static/RDPTechArchitecture.png
--------------------------------------------------------------------------------
/mass-docs/src/main/paradox/static/SeaBrokerArchitecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-docs/src/main/paradox/static/SeaBrokerArchitecture.png
--------------------------------------------------------------------------------
/mass-docs/src/main/scala/ws.sc:
--------------------------------------------------------------------------------
1 | import java.nio.file.Paths
2 | Set(1, 2, 3, 4) sameElements Set(2, 3, 1, 4)
3 |
4 | Set(1, 2, 3, 4) == Set(2, 3, 1, 4)
5 | Set(1, 2, 3, 4) equals Set(2, 3, 1, 4)
6 |
7 |
8 | val relativePath = Paths.get("a4", "923843298432")
9 |
10 | Paths.get("/opt/").resolve(relativePath)
--------------------------------------------------------------------------------
/mass-functest/src/multi-jvm/README.md:
--------------------------------------------------------------------------------
1 | # multi-jvm
2 |
3 | ```
4 | sbt mass-functest/multi-jvm:testOnly sample.multinode.MultiNodeSampleTest
5 | ```
6 |
--------------------------------------------------------------------------------
/mass-functest/src/multi-jvm/scala/sample/demo/Demo.scala:
--------------------------------------------------------------------------------
1 | package sample.demo
2 |
3 | object DemoMultiJvmNode1 extends App {
4 | println("Hello from node 1")
5 | }
6 |
7 | object DemoMultiJvmNode2 extends App {
8 | println("Hello from node 2")
9 | }
10 |
11 | object DemoMultiJvmNode3 extends App {
12 | println("Hello from node 3")
13 | }
14 |
--------------------------------------------------------------------------------
/mass-functest/src/multi-jvm/scala/sample/multinode/MultiNodeSampleTest.scala:
--------------------------------------------------------------------------------
1 | package sample.multinode
2 |
3 | import akka.actor.{Actor, Props}
4 | import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec}
5 | import akka.testkit.ImplicitSender
6 |
7 | object MultiNodeSampleConfig extends MultiNodeConfig {
8 | val node1 = role("node1")
9 | val node2 = role("node2")
10 | }
11 |
12 | class MultiNodeSampleTestMultiJvmNode1 extends MultiNodeSampleTest
13 |
14 | class MultiNodeSampleTestMultiJvmNode2 extends MultiNodeSampleTest
15 |
16 | object MultiNodeSampleTest {
17 |
18 | class Ponger extends Actor {
19 | def receive = {
20 | case "ping" => sender() ! "pong"
21 | }
22 | }
23 |
24 | }
25 |
26 | class MultiNodeSampleTest
27 | extends MultiNodeSpec(MultiNodeSampleConfig)
28 | with STMultiNodeSpec
29 | with ImplicitSender {
30 |
31 | import MultiNodeSampleConfig._
32 | import MultiNodeSampleTest._
33 |
34 | override def initialParticipants: Int = roles.size
35 |
36 | "A MultiNodeSample" must {
37 |
38 | "wait for all nodes to enter a barrier" in {
39 | enterBarrier("startup") // 当所有节点都发起栅栏消息:startup 后再继续之后代码的运行
40 | }
41 |
42 | "send to and receive from a remote node" in {
43 | runOn(node1) {
44 | enterBarrier("deployed") // 等待另一个节点也发起栅栏 deployed
45 | val ponger = system.actorSelection(node(node2) / "user" / "ponger")
46 | ponger ! "ping"
47 | import scala.concurrent.duration._
48 | expectMsg(10.seconds, "pong")
49 | println(System.getProperty("akka.remote.port") + " received pong")
50 | }
51 |
52 | runOn(node2) {
53 | system.actorOf(Props[Ponger], "ponger")
54 | enterBarrier("deployed")
55 | println(System.getProperty("akka.remote.port") + " started ponger")
56 | }
57 |
58 | enterBarrier("finished")
59 | }
60 | }
61 |
62 | }
63 |
--------------------------------------------------------------------------------
/mass-functest/src/multi-jvm/scala/sample/multinode/MultiNodeSampleTestMultiJvmNode1.opts:
--------------------------------------------------------------------------------
1 | -Dakka.remote.port=9991 -Xmx256m
--------------------------------------------------------------------------------
/mass-functest/src/multi-jvm/scala/sample/multinode/MultiNodeSampleTestMultiJvmNode2.opts:
--------------------------------------------------------------------------------
1 | -Dakka.remote.port=9992 -Xmx256m
--------------------------------------------------------------------------------
/mass-functest/src/test/resources/application.conf:
--------------------------------------------------------------------------------
1 | # Don't terminate ActorSystem in tests
2 | akka.coordinated-shutdown {
3 | run-by-jvm-shutdown-hook = off
4 | coordinated-shutdown.terminate-actor-system = off
5 | }
6 | akka.cluster {
7 | run-coordinated-shutdown-when-down = off
8 | }
--------------------------------------------------------------------------------
/mass-functest/src/test/resources/calculator.conf:
--------------------------------------------------------------------------------
1 | include "test-common"
2 |
3 | akka {
4 | remote.netty.tcp.port = 2552
5 | }
6 |
--------------------------------------------------------------------------------
/mass-functest/src/test/resources/factorial.conf:
--------------------------------------------------------------------------------
1 | include "simple-cluster"
2 |
3 | akka.cluster.min-nr-of-members = 3
4 |
5 | akka.cluster.role {
6 | frontend.min-nr-of-members = 1
7 | backend.min-nr-of-members = 2
8 | }
9 |
10 | akka.actor.deployment {
11 | /factorialFrontend/factorialBackendRouter = {
12 | # Router type provided by metrics extension.
13 | router = cluster-metrics-adaptive-group
14 | # Router parameter specific for metrics extension.
15 | # metrics-selector = heap
16 | # metrics-selector = load
17 | # metrics-selector = cpu
18 | metrics-selector = mix
19 | #
20 | routees.paths = ["/user/factorialBackend"]
21 | cluster {
22 | enabled = on
23 | use-role = backend
24 | allow-local-routees = off
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/mass-functest/src/test/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 | ./logs/application_%d{yyyy-MM-dd}.log
13 |
14 |
15 |
16 | %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n
17 |
18 |
19 |
20 |
21 |
22 | %date{HH:mm:ss.SSS} %coloredLevel %logger{50} - %message%n%xException{10}
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/mass-functest/src/test/resources/remotelookup.conf:
--------------------------------------------------------------------------------
1 | include "test-common"
2 |
3 | akka {
4 | remote.netty.tcp.port = 2553
5 | }
6 |
--------------------------------------------------------------------------------
/mass-functest/src/test/resources/simple-cluster.conf:
--------------------------------------------------------------------------------
1 | akka {
2 | loglevel = INFO
3 | actor {
4 | provider = cluster
5 | }
6 | remote {
7 | netty.tcp {
8 | hostname = "127.0.0.1"
9 | port = 0
10 | }
11 | artery { // UDP
12 | enabled = on
13 | canonical.hostname = "127.0.0.1"
14 | canonical.port = 0
15 | }
16 | }
17 |
18 | cluster {
19 | seed-nodes = [
20 | "akka://ClusterSystem@127.0.0.1:2551",
21 | "akka://ClusterSystem@127.0.0.1:2552"]
22 |
23 | # auto downing is NOT safe for production deployments.
24 | # you may want to use it during development, read more about it in the docs.
25 | // auto-down-unreachable-after = 10s
26 | jmx.multi-mbeans-in-same-jvm = on
27 | }
28 | }
29 |
30 | # Enable metrics extension in akka-cluster-metrics.
31 | akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"]
32 |
33 | # Sigar native library extract location during tests.
34 | # Note: use per-jvm-instance folder when running multiple jvm on one host.
35 | akka.cluster.metrics.native-library-extract-folder=${user.dir}/target/native
36 |
--------------------------------------------------------------------------------
/mass-functest/src/test/resources/stats1.conf:
--------------------------------------------------------------------------------
1 | include "simple-cluster"
2 |
3 | akka.actor.deployment {
4 | /statsService/workerRouter {
5 | router = consistent-hashing-group
6 | routees.paths = ["/user/statsWorker"]
7 | cluster {
8 | enabled = on
9 | allow-local-routees = on
10 | use-role = compute
11 | }
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/mass-functest/src/test/resources/stats2.conf:
--------------------------------------------------------------------------------
1 | include "simple-cluster"
2 |
3 | akka.actor.deployment {
4 | /statsService/singleton/workerRouter {
5 | router = consistent-hashing-pool
6 | cluster {
7 | enabled = on
8 | max-nr-of-instances-per-node = 3
9 | allow-local-routees = on
10 | use-role = compute
11 | }
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/mass-functest/src/test/resources/test-common.conf:
--------------------------------------------------------------------------------
1 | akka {
2 | actor {
3 | provider = remote
4 | }
5 | remote {
6 | netty.tcp {
7 | hostname = "127.0.0.1"
8 | }
9 | }
10 | }
--------------------------------------------------------------------------------
/mass-functest/src/test/scala/sample/cluster/factorial/FactorialApp.scala:
--------------------------------------------------------------------------------
1 | package sample.cluster.factorial
2 |
3 | object FactorialApp {
4 | def main(args: Array[String]): Unit = {
5 | // starting 3 backend nodes and 1 frontend node
6 | FactorialBackend.main(Seq("2551").toArray)
7 | FactorialBackend.main(Seq("2552").toArray)
8 | FactorialBackend.main(Array.empty)
9 | FactorialFrontend.main(Array.empty)
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/mass-functest/src/test/scala/sample/cluster/factorial/FactorialBackend.scala:
--------------------------------------------------------------------------------
1 | package sample.cluster.factorial
2 |
3 | import akka.actor.{ Actor, ActorLogging, ActorSystem, Props }
4 | import akka.pattern.pipe
5 | import com.typesafe.config.ConfigFactory
6 |
7 | import scala.annotation.tailrec
8 | import scala.concurrent.Future
9 |
10 | class FactorialBackend extends Actor with ActorLogging {
11 | import context.dispatcher
12 |
13 | def receive = {
14 | case (n: Int) =>
15 | Future(factorial(n)) map { result =>
16 | (n, result)
17 | } pipeTo sender()
18 | }
19 |
20 | def factorial(n: Int): BigInt = {
21 | @tailrec def factorialAcc(acc: BigInt, n: Int): BigInt =
22 | if (n <= 1) acc
23 | else factorialAcc(acc * n, n - 1)
24 | factorialAcc(BigInt(1), n)
25 | }
26 | }
27 |
28 | object FactorialBackend {
29 | def main(args: Array[String]): Unit = {
30 | // Override the configuration of the port when specified as program argument
31 | val port = if (args.isEmpty) "0" else args(0)
32 | val config = ConfigFactory
33 | .parseString(s"""
34 | akka.remote.netty.tcp.port=$port
35 | akka.remote.artery.canonical.port=$port
36 | """)
37 | .withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]"))
38 | .withFallback(ConfigFactory.load("factorial"))
39 |
40 | val system = ActorSystem("ClusterSystem", config)
41 | system.actorOf(Props[FactorialBackend], name = "factorialBackend")
42 |
43 | system.actorOf(Props[MetricsListener], name = "metricsListener")
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/mass-functest/src/test/scala/sample/cluster/factorial/MetricsListener.scala:
--------------------------------------------------------------------------------
1 | package sample.cluster.factorial
2 |
3 | import akka.actor.{ Actor, ActorLogging }
4 | import akka.cluster.Cluster
5 | import akka.cluster.ClusterEvent.CurrentClusterState
6 | import akka.cluster.metrics.{ ClusterMetricsChanged, ClusterMetricsExtension, NodeMetrics }
7 | import akka.cluster.metrics.StandardMetrics.{ Cpu, HeapMemory }
8 |
9 | class MetricsListener extends Actor with ActorLogging {
10 | val selfAddress = Cluster(context.system).selfAddress
11 | val extension = ClusterMetricsExtension(context.system)
12 |
13 | // Subscribe unto ClusterMetricsEvent events.
14 | override def preStart(): Unit = extension.subscribe(self)
15 |
16 | // Unsubscribe from ClusterMetricsEvent events.
17 | override def postStop(): Unit = extension.unsubscribe(self)
18 |
19 | def receive = {
20 | case ClusterMetricsChanged(clusterMetrics) =>
21 | clusterMetrics.filter(_.address == selfAddress) foreach { nodeMetrics =>
22 | logHeap(nodeMetrics)
23 | logCpu(nodeMetrics)
24 | }
25 | case state: CurrentClusterState => // Ignore.
26 | }
27 |
28 | def logHeap(nodeMetrics: NodeMetrics): Unit = nodeMetrics match {
29 | case HeapMemory(address, timestamp, used, committed, max) =>
30 | log.info("Used heap: {} MB", used.doubleValue / 1024 / 1024)
31 | case _ => // No heap info.
32 | }
33 |
34 | def logCpu(nodeMetrics: NodeMetrics): Unit = nodeMetrics match {
35 | case Cpu(address, timestamp, Some(systemLoadAverage), cpuCombined, cpuStolen, processors) =>
36 | log.info("Load: {} ({} processors)", systemLoadAverage, processors)
37 | case _ => // No cpu info.
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/mass-functest/src/test/scala/sample/cluster/sample/SimpleClusterApp.scala:
--------------------------------------------------------------------------------
1 | package sample.cluster.sample
2 |
3 | import java.util.concurrent.TimeUnit
4 |
5 | import akka.actor.{ ActorSystem, Props }
6 | import akka.cluster.Cluster
7 | import com.typesafe.config.ConfigFactory
8 |
9 | import scala.concurrent.{ ExecutionContext, Future }
10 |
11 | object SimpleClusterApp extends App {
12 | var tm = 0
13 | Seq("2551", "2552", "0").foreach { port =>
14 | // Override the configuration of the port
15 | val config = ConfigFactory.parseString(s"""
16 | akka.remote.netty.tcp.port=$port
17 | akka.remote.artery.canonical.port=$port
18 | """).withFallback(ConfigFactory.load("simple-cluster"))
19 |
20 | // Create an Akka system
21 | val system = ActorSystem("ClusterSystem", config)
22 | val cluster = Cluster(system)
23 | // Create an actor that handles cluster domain events
24 | val simpleClusterListener =
25 | system.actorOf(Props[SimpleClusterListener], name = "clusterListener")
26 |
27 | tm += 5
28 | Future {
29 | TimeUnit.SECONDS.sleep(7 + tm)
30 | simpleClusterListener ! "Leave"
31 | }(ExecutionContext.Implicits.global)
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/mass-functest/src/test/scala/sample/cluster/sample/SimpleClusterListener.scala:
--------------------------------------------------------------------------------
1 | package sample.cluster.sample
2 |
3 | import java.util.concurrent.TimeUnit
4 |
5 | import akka.actor.Actor
6 | import akka.cluster.Cluster
7 | import akka.cluster.ClusterEvent._
8 | import com.typesafe.scalalogging.StrictLogging
9 |
10 | import scala.concurrent.ExecutionContext.Implicits
11 | import scala.concurrent.Future
12 |
13 | class SimpleClusterListener extends Actor with StrictLogging {
14 | val cluster = Cluster(context.system)
15 |
16 | override def preStart(): Unit =
17 | cluster.subscribe(self, InitialStateAsEvents, classOf[MemberEvent], classOf[UnreachableMember])
18 |
19 | override def postStop(): Unit =
20 | cluster.unsubscribe(self)
21 |
22 | override def receive: Receive = {
23 | case MemberUp(member) =>
24 | logger.info(s"${cluster.selfAddress} 节点已可使用:$member")
25 | case UnreachableMember(member) =>
26 | logger.warn(s"${cluster.selfAddress} 节点分离不可达:$member")
27 | case MemberRemoved(member, previousStatus) =>
28 | logger.info(s"${cluster.selfAddress} 成员已移除:$member,之前状态:$previousStatus")
29 | case me: MemberEvent =>
30 | logger.debug(s"${cluster.selfAddress} ignore event: $me")
31 | case "Leave" =>
32 | cluster.leave(cluster.selfAddress)
33 | Future {
34 | TimeUnit.SECONDS.sleep(3)
35 | }(Implicits.global)
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/mass-functest/src/test/scala/sample/cluster/stats/StatsMessages.scala:
--------------------------------------------------------------------------------
1 | package sample.cluster.stats
2 |
3 | final case class StatsJob(text: String)
4 | final case class StatsResult(meanWordLength: Double)
5 | final case class JobFailed(reason: String)
6 |
--------------------------------------------------------------------------------
/mass-functest/src/test/scala/sample/cluster/stats/StatsService.scala:
--------------------------------------------------------------------------------
1 | package sample.cluster.stats
2 |
3 | import akka.actor.{ Actor, ActorRef, Props, ReceiveTimeout }
4 | import akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope
5 | import akka.routing.FromConfig
6 |
7 | import scala.concurrent.duration._
8 |
9 | //#service
10 | class StatsService extends Actor {
11 | // This router is used both with lookup and deploy of routees. If you
12 | // have a router with only lookup of routees you can use Props.empty
13 | // instead of Props[StatsWorker.class].
14 | val workerRouter =
15 | context.actorOf(FromConfig.props(Props[StatsWorker]), name = "workerRouter")
16 |
17 | def receive = {
18 | case StatsJob(text) if text != "" =>
19 | val words = text.split(" ")
20 | val replyTo = sender() // important to not close over sender()
21 | // create actor that collects replies from workers
22 | val aggregator =
23 | context.actorOf(Props(classOf[StatsAggregator], words.size, replyTo))
24 | words foreach { word =>
25 | workerRouter.tell(ConsistentHashableEnvelope(word, word), aggregator)
26 | }
27 | }
28 | }
29 |
30 | class StatsAggregator(expectedResults: Int, replyTo: ActorRef) extends Actor {
31 | var results = IndexedSeq.empty[Int]
32 | context.setReceiveTimeout(3.seconds)
33 |
34 | def receive = {
35 | case wordCount: Int =>
36 | results = results :+ wordCount
37 | if (results.size == expectedResults) {
38 | val meanWordLength = results.sum.toDouble / results.size
39 | replyTo ! StatsResult(meanWordLength)
40 | context.stop(self)
41 | }
42 | case ReceiveTimeout =>
43 | replyTo ! JobFailed("Service unavailable, try again later")
44 | context.stop(self)
45 | }
46 | }
47 | //#service
48 |
--------------------------------------------------------------------------------
/mass-functest/src/test/scala/sample/cluster/stats/StatsWorker.scala:
--------------------------------------------------------------------------------
1 | package sample.cluster.stats
2 |
3 | import akka.actor.Actor
4 |
5 | //#worker
6 | class StatsWorker extends Actor {
7 | var cache = Map.empty[String, Int]
8 |
9 | def receive = {
10 | case word: String =>
11 | val length = cache.get(word) match {
12 | case Some(x) => x
13 | case None =>
14 | val x = word.length
15 | cache += (word -> x)
16 | x
17 | }
18 |
19 | sender() ! length
20 | }
21 | }
22 | //#worker
23 |
--------------------------------------------------------------------------------
/mass-functest/src/test/scala/sample/cluster/transformation/TransformationApp.scala:
--------------------------------------------------------------------------------
1 | package sample.cluster.transformation
2 |
3 | object TransformationApp {
4 | def main(args: Array[String]): Unit = {
5 | // starting 2 frontend nodes and 3 backend nodes
6 | TransformationFrontend.main(Seq("2551").toArray)
7 | TransformationBackend.main(Seq("2552").toArray)
8 | TransformationBackend.main(Array.empty)
9 | TransformationBackend.main(Array.empty)
10 | TransformationFrontend.main(Array.empty)
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/mass-functest/src/test/scala/sample/cluster/transformation/TransformationMessages.scala:
--------------------------------------------------------------------------------
1 | package sample.cluster.transformation
2 |
3 | //#messages
4 | final case class TransformationJob(text: String)
5 | final case class TransformationResult(text: String)
6 | final case class JobFailed(reason: String, job: TransformationJob)
7 | case object BackendRegistration
8 | //#messages
9 |
--------------------------------------------------------------------------------
/mass-functest/src/test/scala/sample/multinode/STMultiNodeSpec.scala:
--------------------------------------------------------------------------------
1 | package sample.multinode
2 |
3 | import akka.remote.testkit.{ MultiNodeSpec, MultiNodeSpecCallbacks }
4 | import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike }
5 |
6 | import scala.language.implicitConversions
7 |
8 | trait STMultiNodeSpec extends MultiNodeSpecCallbacks with WordSpecLike with Matchers with BeforeAndAfterAll {
9 | self: MultiNodeSpec =>
10 |
11 | override protected def beforeAll(): Unit = {
12 | super.beforeAll()
13 | multiNodeSpecBeforeAll()
14 | }
15 |
16 | override protected def afterAll(): Unit = {
17 | multiNodeSpecAfterAll()
18 | super.afterAll()
19 | }
20 |
21 | // Might not be needed anymore if we find a nice way to tag all logging from a node
22 | implicit override def convertToWordSpecStringWrapper(s: String): WordSpecStringWrapper =
23 | new WordSpecStringWrapper(s"$s (on node '${self.myself.name}', $getClass)")
24 | }
25 |
--------------------------------------------------------------------------------
/mass-functest/src/test/scala/sample/remote/benchmark/Receiver.scala:
--------------------------------------------------------------------------------
1 | package sample.remote.benchmark
2 |
3 | import akka.actor.{ Actor, ActorSystem, Props }
4 | import com.typesafe.config.ConfigFactory
5 |
6 | class Receiver extends Actor {
7 | import Sender._
8 |
9 | override def receive: Receive = {
10 | case m: Echo =>
11 | sender() ! m
12 | case Shutdown =>
13 | context.system.terminate()
14 | case _ =>
15 | // 接收到的实际消息数据被忽略掉
16 | }
17 | }
18 |
19 | object Receiver extends App {
20 | val system = ActorSystem("Sys", ConfigFactory.load("remotelookup"))
21 | system.actorOf(Props[Receiver], "rcv") ! "Hello Scala!"
22 | }
23 |
--------------------------------------------------------------------------------
/mass-job/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ## Quartz Table
4 |
5 | | table | 说明 |
6 | | ------------------------- | ------------------------------------------------------------ |
7 | | qrtz_job_details | 存储每一个已配置的 Job 的详细信息 |
8 | | qrtz_triggers | 存储已配置的 Trigger 的信息 |
9 | | qrtz_cron_triggers | 存储 Cron Trigger,包括 Cron 表达式和时区信息 |
10 | | qrtz_simple_triggers | 存储简单的 Trigger,包括重复次数,间隔,以及已触的次数 |
11 | | qrtz_paused_triggers_grps | 存储已暂停的 Trigger 组的信息 |
12 | | qrtz_blob_triggers | 作为 Blob 类型存储(用于 Quartz 用户用 JDBC 创建他们自己定制的 Trigger 类型,JobStore 并不知道如何存储实例的时候) |
13 | | qrtz_scheduler_state | 存储少量的有关 Scheduler 的状态信息,和别的 Scheduler 实例(假如是用于一个集群中) |
14 | | qrtz_calendars | 以 Blob 类型存储 Quartz 的 Calendar 信息 |
15 | | qrtz_fired_triggers | 存储与已触发的 Trigger 相关的状态信息,以及相联 Job 的执行信息 |
16 | | qrtz_locks | 存储程序的非观锁的信息(假如使用了悲观锁) |
17 | | qrtz_simprop_triggers | |
18 |
--------------------------------------------------------------------------------
/mass-job/docs/作业调度配置参考.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-job/docs/作业调度配置参考.png
--------------------------------------------------------------------------------
/mass-job/src/main/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 | %date{HH:mm:ss.SSS} %coloredLevel %traceID %logger{50} - %message%n%xException{10}
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/mass-job/src/main/resources/reference.conf:
--------------------------------------------------------------------------------
1 | fusion.http.default {
2 | server {
3 | host = "127.0.0.1"
4 | port = 22222
5 | }
6 | }
7 | fusion.job.default {
8 | org.quartz.job.instanceName = "MassScheduler"
9 | org.quartz.threadPool.threadCount = "2"
10 | org.quartz.jobStore.class: org.quartz.impl.jdbcjobstore.JobStoreTX
11 | //org.quartz.jobStore.class: org.quartz.simpl.RAMJobStore
12 | # FusionJdbc id
13 | org.quartz.jobStore.dataSource: fusion.jdbc.default
14 | org.quartz.jobStore.driverDelegateClass: org.quartz.impl.jdbcjobstore.PostgreSQLDelegate
15 | org.quartz.jobStore.tablePrefix: "qrtz_"
16 | }
17 |
18 | fusion {
19 | config-modules += "akka"
20 | akka.cluster {
21 | roles += "console"
22 | seed-nodes = ["127.0.0.1:25520"]
23 | }
24 | }
25 |
26 | kamon.util.filters {
27 | "akka.tracked-actor" {
28 | includes = ["**"]
29 | }
30 |
31 | "akka.tracked-dispatcher" {
32 | includes = [ "**" ]
33 | }
34 |
35 | "akka.traced-actor" {
36 | includes = [ "**" ]
37 | }
38 |
39 | // "test" {
40 | // includes = [ "**" ]
41 | // }
42 | }
--------------------------------------------------------------------------------
/mass-job/src/main/scala/mass/job/JobMain.scala:
--------------------------------------------------------------------------------
1 | package mass.job
2 |
3 | import fusion.core.FusionApplication
4 | import fusion.http.FusionHttpServer
5 | import fusion.inject.guice.GuiceApplication
6 | import mass.job.route.Routes
7 |
8 | object JobMain {
9 | def main(args: Array[String]): Unit = {
10 | val application = FusionApplication.start().asInstanceOf[GuiceApplication]
11 | FusionHttpServer(application.classicSystem).component.startBaseRouteSync(application.instance[Routes])
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/mass-job/src/main/scala/mass/job/component/DefaultSchedulerJob.scala:
--------------------------------------------------------------------------------
1 | package mass.job.component
2 |
3 | import java.nio.file.Paths
4 |
5 | import com.typesafe.scalalogging.StrictLogging
6 | import fusion.inject.guice.GuiceApplication
7 | import mass.core.job._
8 | import mass.job.JobScheduler
9 | import mass.message.job.SchedulerJobResult
10 |
11 | import scala.concurrent.Future
12 |
13 | class DefaultSchedulerJob extends SchedulerJob with StrictLogging {
14 | override def run(context: SchedulerContext): Future[JobResult] = {
15 | val jobScheduler = GuiceApplication(context.system).instance[JobScheduler]
16 | // TODO Use job blocking dispatcher
17 | val blockingDispatcher = jobScheduler.executionContext
18 | Future {
19 | context.jobItem.resources.get(JobConstants.Resources.ZIP_PATH) match {
20 | case Some(zipPath) => handleZip(zipPath, jobScheduler, context)
21 | case _ => handle(jobScheduler, context)
22 | }
23 | }(blockingDispatcher)
24 | }
25 |
26 | private def handleZip(zipPath: String, jobSystem: JobScheduler, ctx: SchedulerContext): SchedulerJobResult =
27 | JobRun.runOnZip(Paths.get(zipPath), ctx.key, ctx.jobItem, jobSystem.jobSettings)
28 |
29 | private def handle(jobSystem: JobScheduler, ctx: SchedulerContext): SchedulerJobResult =
30 | JobRun.run(ctx.jobItem, ctx.key, jobSystem.jobSettings)
31 | }
32 |
--------------------------------------------------------------------------------
/mass-job/src/main/scala/mass/job/route/Routes.scala:
--------------------------------------------------------------------------------
1 | package mass.job.route
2 |
3 | import akka.http.scaladsl.server.Route
4 | import fusion.http.server.BaseRoute
5 | import javax.inject.Inject
6 | import mass.job.route.api.{ ApiRoute, MockRoute }
7 |
8 | class Routes @Inject() (apiRoute: ApiRoute, mockRoute: MockRoute) extends BaseRoute {
9 | def route: Route =
10 | pathPrefix("job") {
11 | apiRoute.route
12 | } ~ mockRoute.route
13 | }
14 |
--------------------------------------------------------------------------------
/mass-job/src/main/scala/mass/job/route/api/ApiRoute.scala:
--------------------------------------------------------------------------------
1 | package mass.job.route.api
2 |
3 | import akka.http.scaladsl.server.Route
4 | import fusion.http.server.{ BaseRoute, HttpDirectives }
5 | import javax.inject.{ Inject, Singleton }
6 | import mass.job.route.api.v1.JobRoute
7 |
8 | @Singleton
9 | class ApiRoute @Inject() (jobRoute: JobRoute) extends BaseRoute with HttpDirectives {
10 | override def route: Route = pathPrefix("api") {
11 | pathPrefix("v1") {
12 | jobRoute.route
13 | }
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/mass-job/src/main/scala/mass/job/route/api/MockRoute.scala:
--------------------------------------------------------------------------------
1 | package mass.job.route.api
2 |
3 | import akka.http.scaladsl.server.Route
4 | import fusion.http.server.AbstractRoute
5 | import fusion.json.jackson.http.JacksonSupport
6 | import javax.inject.{ Inject, Singleton }
7 |
8 | @Singleton
9 | class MockRoute @Inject() (val jacksonSupport: JacksonSupport) extends AbstractRoute {
10 | override def route: Route = pathPrefix("api") {
11 | currentUser ~ ruleRoute
12 | }
13 |
14 | def ruleRoute: Route = pathGet("rule") {
15 | objectComplete(jacksonSupport.objectMapper.createArrayNode)
16 | }
17 |
18 | def currentUser: Route = pathGet("currentUser") {
19 | objectComplete(jacksonSupport.objectMapper.readTree(Mock.currentUser))
20 | }
21 | }
22 |
23 | object Mock {
24 | val currentUser =
25 | """{"name":"Serati Ma","avatar":"https://gw.alipayobjects.com/zos/rmsportal/BiazfanxmamNRoxxVxka.png","userid":"00000001","email":"antdesign@alipay.com","signature":"海纳百川,有容乃大","title":"交互专家","group":"蚂蚁金服-某某某事业群-某某平台部-某某技术部-UED","tags":[{"key":"0","label":"很有想法的"},{"key":"1","label":"专注设计"},{"key":"2","label":"辣~"},{"key":"3","label":"大长腿"},{"key":"4","label":"川妹子"},{"key":"5","label":"海纳百川"}],"notifyCount":12,"country":"China","geographic":{"province":{"label":"浙江省","key":"330000"},"city":{"label":"杭州市","key":"330100"}},"address":"西湖区工专路 77 号","phone":"0752-268888888"}"""
26 | }
27 |
--------------------------------------------------------------------------------
/mass-job/src/test/resources/application-test.conf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-job/src/test/resources/application-test.conf
--------------------------------------------------------------------------------
/mass-job/src/test/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 | %date{HH:mm:ss.SSS} %coloredLevel %logger{50} - %message%n%xException{10}
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/mass-job/src/test/scala/mass/job/SchedulerTestkit.scala:
--------------------------------------------------------------------------------
1 | package mass.job
2 |
3 | import akka.actor.ActorSystem
4 | import akka.http.scaladsl.testkit.{ RouteTestTimeout, ScalatestRouteTest }
5 | import com.typesafe.scalalogging.StrictLogging
6 | import fusion.inject.guice.testkit.GuiceApplicationTestkit
7 | import fusion.json.jackson.http.JacksonSupport
8 | import org.scalatest.BeforeAndAfterAll
9 | import org.scalatest.matchers.should.Matchers
10 | import org.scalatest.wordspec.AnyWordSpecLike
11 |
12 | import scala.concurrent.duration._
13 |
14 | abstract class SchedulerTestkit
15 | extends GuiceApplicationTestkit
16 | with AnyWordSpecLike
17 | with Matchers
18 | with BeforeAndAfterAll
19 | with ScalatestRouteTest
20 | with StrictLogging {
21 | override protected def createActorSystem(): ActorSystem = application.classicSystem
22 | implicit def routeTestTimeout: RouteTestTimeout = RouteTestTimeout(10.seconds)
23 | protected val jobScheduler: JobScheduler = injectInstance[JobScheduler]
24 | protected val jacksonSupport: JacksonSupport = injectInstance[JacksonSupport]
25 | }
26 |
--------------------------------------------------------------------------------
/mass-job/src/test/scala/mass/job/component/JobRunTest.scala:
--------------------------------------------------------------------------------
1 | package mass.job.component
2 |
3 | import fusion.inject.guice.testkit.GuiceApplicationTestkit
4 | import fusion.json.jackson.ScalaObjectMapper
5 | import mass.MassSettings
6 | import mass.job.JobSettings
7 | import mass.model.job.{ JobItem, Program }
8 | import org.scalatest.wordspec.AnyWordSpecLike
9 |
10 | class JobRunTest extends GuiceApplicationTestkit with AnyWordSpecLike {
11 | private val jobSettings = JobSettings(MassSettings(config))
12 | private val objectMapper = injectInstance[ScalaObjectMapper]
13 |
14 | "JobRunTest" should {
15 | "run java" in {
16 | val item = JobItem(Program.JAVA, Seq(), "test.JavaMain")
17 | val result = JobRun.run(item, "test-java", jobSettings)
18 | println(objectMapper.prettyStringify(result))
19 | result.exitValue shouldBe 0
20 | result.start should be < result.end
21 | }
22 |
23 | "run scala" in {
24 | val item = JobItem(Program.SCALA, Seq(), "test.ScalaMain")
25 | val result = JobRun.run(item, "test-scala", jobSettings)
26 | println(objectMapper.prettyStringify(result))
27 | result.exitValue shouldBe 0
28 | result.start should be < result.end
29 | }
30 |
31 | "run bash -c" in {
32 | val item = JobItem(Program.SH, Seq("-c"), "echo '哈哈哈'")
33 | val result = JobRun.run(item, "test-bash", jobSettings)
34 | println(objectMapper.prettyStringify(result))
35 | result.exitValue shouldBe 0
36 | result.start should be < result.end
37 | }
38 |
39 | "run python -c" in {
40 | val item = JobItem(Program.PYTHON, Seq("-c"), "print('哈哈哈')")
41 | val result = JobRun.run(item, "test-python", jobSettings)
42 | println(objectMapper.prettyStringify(result))
43 | result.exitValue shouldBe 0
44 | result.start should be < result.end
45 | }
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/mass-job/src/test/scala/mass/job/route/api/v1/JobRouteTest.scala:
--------------------------------------------------------------------------------
1 | package mass.job.route.api.v1
2 |
3 | import akka.http.scaladsl.model.StatusCodes
4 | import mass.job.SchedulerTestkit
5 | import mass.message.job.JobPageResp
6 |
7 | class JobRouteTest extends SchedulerTestkit {
8 | private lazy val route = application.instance[JobRoute].route
9 |
10 | "JobRoute" should {
11 | "page" in {
12 | import jacksonSupport._
13 | Get("/job/page") ~> route ~> check {
14 | status shouldBe StatusCodes.OK
15 | val resp = responseAs[JobPageResp]
16 | println(resp)
17 | resp should not be null
18 | }
19 | }
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/mass-job/src/universal/examples/sample-job/hello.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-job/src/universal/examples/sample-job/hello.jar
--------------------------------------------------------------------------------
/mass-job/src/universal/examples/sample-job/hello.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-job/src/universal/examples/sample-job/hello.zip
--------------------------------------------------------------------------------
/mass-job/src/universal/examples/sample-job/sample.conf:
--------------------------------------------------------------------------------
1 | # 作业唯一key,不设置由服务端生成。若设置,则客户端需要保证全局唯一。
2 | //key=sample
3 |
4 | item {
5 | # 显示名字,可选
6 | name = "Hello world!"
7 |
8 | # Job程序类型,当前支持java,sh(bash),python
9 | program = "java"
10 |
11 | # 可执行Java主类名字。
12 | program-main = "hello.Hello"
13 | }
14 | trigger {
15 | # Job类型,当前支持:simple、cron、event三种
16 | trigger-type = "cron"
17 |
18 | # Job开始执行时间(可选),不设置则为立即开始。yyyy-MM-ddTHH:mm:ss+08
19 | start-time = "2020-03-03T10:10:10+08"
20 |
21 | # Job结束时间(可选)。yyyy-MM-ddTHH:mm:ss+08
22 | end-time = "2020-03-13T10:10:10+08"
23 |
24 | # Job重复次数,job-type为simple时有效
25 | repeat = 4
26 |
27 | # 两次Job之间的时间间隔,job-type为simple时有效
28 | interval = 120.seconds
29 |
30 | # 基于CRON的日历调度配置,job-type为cron时有效
31 | cron-express = "1 0 0 * * ?"
32 | }
--------------------------------------------------------------------------------
/mass-job/src/universal/examples/sample-job/sample2.conf:
--------------------------------------------------------------------------------
1 | key = "sample2"
2 | item {
3 | name = "Job名字"
4 | program = "java" # Job程序类型,当前支持Java,sh(bash),python
5 | program-main = "com.gtan.repox.Main" # 可执行Java主类名字,[需要将程序打成Jar包。job-program为java时有效
6 | program-version = "python2" # Python程序。可选,默认使用python2.7。job-program为python时有效
7 | }
8 | trigger {
9 | trigger-type = "cron" # Job类型,当前支持:simple、cron、event三种
10 | start-time = "yyyy-MM-dd HH:mm:ss" # Job开始执行时间(可选)
11 | end-time = "yyyy-MM-dd HH:mm:ss" # Job结束时间(可选)
12 | repeat = 4 # Job重复次数,job-type为simple时有效
13 | interval = 120.seconds # 两次Job之间的时间间隔,job-type为simple时有效
14 | cron-express = "1 0 0 * * ?" # 基于CRON的日历调度配置,job-type为cron时有效
15 | }
--------------------------------------------------------------------------------
/mass-rdi-cli/src/main/resources/reference.conf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-rdi-cli/src/main/resources/reference.conf
--------------------------------------------------------------------------------
/mass-rdi-cli/src/main/scala/mass/rdp/cli/boot/RdpCliMain.scala:
--------------------------------------------------------------------------------
1 | package mass.rdp.cli.boot
2 |
3 | object RdpCliMain extends App {}
4 |
--------------------------------------------------------------------------------
/mass-rdi-core/src/main/resources/reference.conf:
--------------------------------------------------------------------------------
1 | mass.rdp {
2 | stream-builders = []
3 |
4 | extensions = [
5 | "mass.rdp.module.RdpJdbcModule"
6 | ]
7 | }
8 |
--------------------------------------------------------------------------------
/mass-rdi-core/src/main/scala/mass/rdp/etl/EtlJob.scala:
--------------------------------------------------------------------------------
1 | package mass.rdp.etl
2 |
3 | import mass.core.job.{ JobResult, SchedulerContext, SchedulerJob }
4 | import mass.rdp.RdpSystem
5 | import mass.rdp.etl.graph.EtlGraphException
6 |
7 | import scala.concurrent.Future
8 | import scala.xml.XML
9 |
10 | class EtlJob extends SchedulerJob {
11 | import EtlJob._
12 |
13 | override def run(context: SchedulerContext): Future[JobResult] = {
14 | val rdpSystem = RdpSystem(context.system)
15 | val xmlString = context.data
16 | .getOrElse(WORKFLOW_STRING, throw new EtlGraphException(s"流程配置未设置,SchedulerJob.data.key = $WORKFLOW_STRING"))
17 | val workflow = EtlWorkflow.fromXML(XML.loadString(xmlString), rdpSystem).get
18 | workflow
19 | .run()
20 | .future
21 | .map { result =>
22 | EtlJobResult(result)
23 | }(rdpSystem.classicSystem.dispatcher)
24 | }
25 | }
26 |
27 | object EtlJob {
28 | val WORKFLOW_STRING = "WORKFLOW_STRING"
29 | }
30 |
--------------------------------------------------------------------------------
/mass-rdi-core/src/main/scala/mass/rdp/etl/EtlJobResult.scala:
--------------------------------------------------------------------------------
1 | package mass.rdp.etl
2 |
3 | import mass.core.job.JobResult
4 |
5 | case class EtlJobResult(result: EtlResult) extends JobResult
6 |
--------------------------------------------------------------------------------
/mass-rdi-core/src/main/scala/mass/rdp/etl/EtlWorkflowExecution.scala:
--------------------------------------------------------------------------------
1 | package mass.rdp.etl
2 |
3 | import akka.Done
4 | import mass.connector.sql.JdbcSinkResult
5 | import mass.core.workflow.WorkflowExecution
6 |
7 | import scala.concurrent.{ Future, Promise }
8 |
9 | trait EtlResult
10 |
11 | case class SqlEtlResult(data: JdbcSinkResult) extends EtlResult
12 |
13 | class EtlWorkflowExecution(promise: Promise[EtlResult], funcClose: () => Unit) extends WorkflowExecution[EtlResult] {
14 | override def future: Future[EtlResult] = promise.future
15 |
16 | override def terminate(): Future[Done] = ???
17 |
18 | override def close(): Unit = funcClose()
19 | }
20 |
--------------------------------------------------------------------------------
/mass-rdi-core/src/main/scala/mass/rdp/etl/graph/EtlGraphException.scala:
--------------------------------------------------------------------------------
1 | package mass.rdp.etl.graph
2 |
3 | import helloscala.common.IntStatus
4 | import helloscala.common.exception.HSException
5 |
6 | class EtlGraphException(message: String) extends HSException(IntStatus.BAD_REQUEST, message) {}
7 |
--------------------------------------------------------------------------------
/mass-rdi-core/src/main/scala/mass/rdp/etl/graph/EtlStreamFactory.scala:
--------------------------------------------------------------------------------
1 | package mass.rdp.etl.graph
2 |
3 | import java.sql.PreparedStatement
4 |
5 | import akka.NotUsed
6 | import akka.stream.scaladsl.{ Sink, Source }
7 | import fusion.jdbc.util.JdbcUtils
8 | import mass.connector.Connector
9 | import mass.connector.sql._
10 | import mass.core.event.{ EventData, EventDataSimple }
11 |
12 | import scala.concurrent.Future
13 |
14 | trait EtlStreamFactory {
15 | def `type`: String
16 |
17 | def buildSource(c: Connector, s: EtlSource): Source[EventDataSql, NotUsed]
18 |
19 | def buildSink(c: Connector, s: EtlSink): Sink[EventData, Future[JdbcSinkResult]]
20 | }
21 |
22 | class EtlStreamJdbcFactory extends EtlStreamFactory {
23 | override def `type`: String = "jdbc"
24 |
25 | override def buildSource(c: Connector, s: EtlSource): Source[EventDataSql, NotUsed] =
26 | JdbcSource(s.script.content.get, Nil, 1000)(c.asInstanceOf[SQLConnector].dataSource)
27 | .via(JdbcFlow.flowJdbcResultSet)
28 | .map(jrs => EventDataSql(jrs))
29 |
30 | def buildSink(c: Connector, s: EtlSink): Sink[EventData, Future[JdbcSinkResult]] = {
31 | def action(event: EventData, stmt: PreparedStatement): Unit = {
32 | val args: Iterable[Any] = event match {
33 | case _: EventDataSimple => event.data.asInstanceOf[Iterable[Any]]
34 | case eventDataSql: EventDataSql => eventDataSql.data.values
35 | case _ => throw new EtlGraphException(s"Invalid EventData: $event.")
36 | }
37 | JdbcUtils.setStatementParameters(stmt, args)
38 | }
39 | JdbcSink[EventData](conn => conn.prepareStatement(s.script.content.get), action, 1000)(
40 | c.asInstanceOf[SQLConnector].dataSource)
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/mass-rdi-core/src/main/scala/mass/rdp/module/RdpJdbcModule.scala:
--------------------------------------------------------------------------------
1 | package mass.rdp.module
2 |
3 | import mass.connector.ConnectorType
4 | import mass.rdp.etl.graph.{ EtlGraphParserFactory, EtlGraphXmlParserFactory, EtlStreamFactory, EtlStreamJdbcFactory }
5 |
6 | class RdpJdbcModule extends RdpModule {
7 | override val name: String = "jdbc"
8 |
9 | override def `type`: String = ConnectorType.JDBC.toString
10 |
11 | override def etlStreamBuilders: Vector[EtlStreamFactory] = Vector(new EtlStreamJdbcFactory())
12 |
13 | override def graphParserFactories: Vector[EtlGraphParserFactory] = Vector(new EtlGraphXmlParserFactory())
14 | }
15 |
--------------------------------------------------------------------------------
/mass-rdi-core/src/main/scala/mass/rdp/module/RdpModule.scala:
--------------------------------------------------------------------------------
1 | package mass.rdp.module
2 |
3 | import mass.core.module.Module
4 | import mass.rdp.etl.graph.{ EtlGraphParserFactory, EtlStreamFactory }
5 |
6 | trait RdpModule extends Module {
7 | def `type`: String
8 |
9 | def etlStreamBuilders: Vector[EtlStreamFactory]
10 |
11 | def graphParserFactories: Vector[EtlGraphParserFactory]
12 | }
13 |
--------------------------------------------------------------------------------
/mass-rdi-core/src/test/resources/application.conf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-rdi-core/src/test/resources/application.conf
--------------------------------------------------------------------------------
/mass-rdi-core/src/test/resources/logback-test.xml:
--------------------------------------------------------------------------------
1 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | %date{HH:mm:ss.SSS} %coloredLevel %logger{50} - %message%n%xException{10}
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/mass-rdi/src/main/resources/reference.conf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/mass-rdi/src/main/resources/reference.conf
--------------------------------------------------------------------------------
/mass-rdi/src/main/scala/mass/rdp/boot/RdpMain.scala:
--------------------------------------------------------------------------------
1 | package mass.rdp.boot
2 |
3 | import com.typesafe.config.ConfigFactory
4 | import mass.Mass
5 | import mass.rdp.RdpSystem
6 |
7 | object RdpMain extends App {
8 | RdpSystem(Mass.fromConfig(ConfigFactory.load()).classicSystem)
9 | }
10 |
--------------------------------------------------------------------------------
/mass-rdi/src/test/scala/mass/workflow/etl/EtlSchedulerWorkflowTest.scala:
--------------------------------------------------------------------------------
1 | package mass.workflow.etl
2 |
3 | import fusion.inject.guice.testkit.GuiceApplicationTestkit
4 | import mass.job.JobScheduler
5 | import mass.rdp.RdpSystem
6 | import org.scalatest.wordspec.AnyWordSpecLike
7 |
8 | class EtlSchedulerWorkflowTest extends GuiceApplicationTestkit with AnyWordSpecLike {
9 | private val rdpSystem: RdpSystem = injectInstance[RdpSystem]
10 | private val jobSystem: JobScheduler = injectInstance[JobScheduler]
11 |
12 | "EtlSchedulerWorkflowTest" should {
13 | "scheduler" in {
14 | // val conf = JobConf
15 | // .builder("test", "test")
16 | // .withCronExpress("10 * * * * ?")
17 | // .result
18 | // jobSystem.schedulerJob(conf, classOf[EtlJob], Map(EtlJob.WORKFLOW_STRING -> TestStub.graphConfig))
19 | //
20 | // TimeUnit.MINUTES.sleep(5)
21 | }
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/mass-rdi/src/test/scala/mass/workflow/etl/EtlWorkflowTest.scala:
--------------------------------------------------------------------------------
1 | package mass.workflow.etl
2 |
3 | import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
4 | import mass.rdp.RdpSystem
5 | import mass.rdp.etl.EtlWorkflow
6 | import org.scalatest.wordspec.AnyWordSpecLike
7 |
8 | import scala.concurrent.Await
9 | import scala.concurrent.duration.Duration
10 |
11 | class EtlWorkflowTest extends ScalaTestWithActorTestKit with AnyWordSpecLike {
12 | var rdpSystem: RdpSystem = _
13 | var etlWorkflow: EtlWorkflow = _
14 |
15 | override protected def beforeAll(): Unit = {
16 | super.beforeAll()
17 | rdpSystem = RdpSystem(system)
18 | etlWorkflow = EtlWorkflow.fromXML(TestStub.graphXmlConfig, rdpSystem).get
19 | }
20 |
21 | override protected def afterAll(): Unit = {
22 | etlWorkflow.close()
23 | super.afterAll()
24 | }
25 |
26 | "EtlWorkflow" should {
27 | "show" in {
28 | etlWorkflow.connectors should not be empty
29 | etlWorkflow.connectors.foreach(c => println(s"connector: $c"))
30 | println(etlWorkflow.graph)
31 |
32 | etlWorkflow.connectors.foreach(println)
33 | println(etlWorkflow.graph.name)
34 | println(etlWorkflow.graph.graphSource)
35 | etlWorkflow.graph.graphFlows.foreach(println)
36 | println(etlWorkflow.graph.graphSink)
37 | }
38 |
39 | "run" in {
40 | val execution = etlWorkflow.run()
41 | val result = Await.result(execution.future, Duration.Inf)
42 | println(result)
43 | }
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/mass-rdi/src/test/scala/mass/workflow/etl/TestStub.scala:
--------------------------------------------------------------------------------
1 | package mass.workflow.etl
2 |
3 | import scala.xml.XML
4 |
5 | object TestStub {
6 | lazy val graphConfig = {
7 | val s = scala.io.Source
8 | .fromInputStream(getClass.getClassLoader.getResourceAsStream("mass/workflow/etl/EtlWorkflowTest.xml"))
9 | try {
10 | s.getLines().mkString
11 | } finally s.close()
12 | }
13 |
14 | lazy val graphXmlConfig = XML.loadString(graphConfig)
15 | }
16 |
--------------------------------------------------------------------------------
/project/build.properties:
--------------------------------------------------------------------------------
1 | sbt.version=1.3.8
2 |
--------------------------------------------------------------------------------
/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | logLevel := Level.Info
2 |
3 | resolvers += Resolver.bintrayIvyRepo("2m", "sbt-plugins")
4 |
5 | addSbtPlugin("com.github.mwz" % "sbt-sonar" % "1.6.0")
6 | addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.2.0")
7 | addSbtPlugin("com.typesafe.sbt" % "sbt-multi-jvm" % "0.4.0")
8 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.9")
9 | addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.9.0")
10 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.5.2")
11 | addSbtPlugin("com.lightbend.akka" % "sbt-paradox-akka" % "0.29")
12 | addSbtPlugin("com.dwijnand" % "sbt-dynver" % "4.0.0")
13 | addSbtPlugin("com.eed3si9n" % "sbt-buildinfo" % "0.9.0")
14 | addSbtPlugin("com.typesafe.sbt" % "sbt-git" % "1.0.0")
15 | addSbtPlugin("com.lightbend.sbt" % "sbt-javaagent" % "0.1.5")
16 | //addSbtPlugin("com.lightbend.akka.grpc" % "sbt-akka-grpc" % "0.7.3")
17 | addSbtPlugin("com.lightbend.sbt" % "sbt-java-formatter" % "0.4.4")
18 | addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.2.1")
19 | addSbtPlugin("ch.epfl.scala" % "sbt-scalafix" % "0.9.7-1")
20 | addSbtPlugin("org.foundweekends" % "sbt-bintray" % "0.5.5")
21 |
22 | resolvers += Resolver.bintrayIvyRepo("helloscala", "ivy")
23 | addSbtPlugin("com.helloscala.fusion" % "fusion-sbt-plugin" % "2.0.6")
24 |
--------------------------------------------------------------------------------
/project/project-info.conf:
--------------------------------------------------------------------------------
1 | project-info {
2 | version: "current"
3 | scaladoc: "https://doc.akka.io/api/akka-http/current/akka/http/scaladsl"
4 | javadoc: "https://doc.akka.io/japi/akka-http/current"
5 | shared-info {
6 | jdk-versions: ["Adopt OpenJDK 11"]
7 | // snapshots: { }
8 | issues: {
9 | url: "https://github.com/yangbajing/fusion-data/issues"
10 | text: "Fusion Data - Github issues"
11 | }
12 | release-notes: {
13 | url: "https://github.com/yangbajing/fusion-data/releases"
14 | text: "Fusion Data - Github releases"
15 | }
16 | forums: [
17 | {
18 | text: "yangbajing/fusion-data Gitter channel"
19 | url: "https://gitter.im/yangbajing/fusion-data"
20 | }
21 | ]
22 | levels: [
23 | {
24 | readiness: CommunityDriven
25 | since: "2018-06-01"
26 | since-version: "0.1.0"
27 | }
28 | ]
29 | }
30 | fusion-discoveryx: ${project-info.shared-info} {
31 | title: "Fusion DiscoveryX"
32 | }
33 | discoveryx-client: ${project-info.shared-info} {
34 | title: "Fusion DiscoveryX Client"
35 | }
36 | discoveryx-client-play-ws: ${project-info.shared-info} {
37 | title: "Fusion DiscoveryX Client for Play"
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/sbt:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ./sbt-dist/bin/sbt "$@"
--------------------------------------------------------------------------------
/sbt-dist/bin/sbt-launch.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/sbt-dist/bin/sbt-launch.jar
--------------------------------------------------------------------------------
/sbt-dist/bin/sbt.bat:
--------------------------------------------------------------------------------
1 | @REM SBT launcher script
2 | @REM
3 | @REM Envioronment:
4 | @REM JAVA_HOME - location of a JDK home dir (mandatory)
5 | @REM SBT_OPTS - JVM options (optional)
6 | @REM Configuration:
7 | @REM sbtconfig.txt found in the SBT_HOME.
8 |
9 | @REM ZOMG! We need delayed expansion to build up CFG_OPTS later
10 | @setlocal enabledelayedexpansion
11 |
12 | @echo off
13 | set SBT_HOME=%~dp0
14 |
15 | rem FIRST we load the config file of extra options.
16 | set FN=%SBT_HOME%\..\conf\sbtconfig.txt
17 | set CFG_OPTS=
18 | FOR /F "tokens=* eol=# usebackq delims=" %%i IN ("%FN%") DO (
19 | set DO_NOT_REUSE_ME=%%i
20 | rem ZOMG (Part #2) WE use !! here to delay the expansion of
21 | rem CFG_OPTS, otherwise it remains "" for this loop.
22 | set CFG_OPTS=!CFG_OPTS! !DO_NOT_REUSE_ME!
23 | )
24 |
25 | rem We use the value of the JAVACMD environment variable if defined
26 | set _JAVACMD=%JAVACMD%
27 |
28 | if "%_JAVACMD%"=="" (
29 | if not "%JAVA_HOME%"=="" (
30 | if exist "%JAVA_HOME%\bin\java.exe" set "_JAVACMD=%JAVA_HOME%\bin\java.exe"
31 | )
32 | )
33 |
34 | if "%_JAVACMD%"=="" set _JAVACMD=java
35 |
36 | rem We use the value of the JAVA_OPTS environment variable if defined, rather than the config.
37 | set _JAVA_OPTS=%JAVA_OPTS%
38 | if "%_JAVA_OPTS%"=="" set _JAVA_OPTS=%CFG_OPTS%
39 |
40 | :run
41 |
42 | "%_JAVACMD%" %_JAVA_OPTS% %SBT_OPTS% -cp "%SBT_HOME%sbt-launch.jar" xsbt.boot.Boot %*
43 | if ERRORLEVEL 1 goto error
44 | goto end
45 |
46 | :error
47 | @endlocal
48 | exit /B 1
49 |
50 |
51 | :end
52 | @endlocal
53 | exit /B 0
54 |
--------------------------------------------------------------------------------
/sbt-dist/conf/sbtconfig.txt:
--------------------------------------------------------------------------------
1 | # Set the java args to high
2 |
3 | -Xmx2048M
4 |
5 | -XX:MaxPermSize=256m
6 |
7 | -XX:ReservedCodeCacheSize=128m
8 |
9 | # Set the extra SBT options
10 |
11 | -Dsbt.log.format=true
12 |
13 | -Dsbt.override.build.repos=true
14 |
--------------------------------------------------------------------------------
/sbt-dist/conf/sbtopts:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------ #
2 | # The SBT Configuration file. #
3 | # ------------------------------------------------ #
4 |
5 |
6 | # Disable ANSI color codes
7 | #
8 | #-no-colors
9 |
10 | # Starts sbt even if the current directory contains no sbt project.
11 | #
12 | -sbt-create
13 |
14 | # Path to global settings/plugins directory (default: ~/.sbt)
15 | #
16 | #-sbt-dir /etc/sbt
17 |
18 | # Path to shared boot directory (default: ~/.sbt/boot in 0.11 series)
19 | #
20 | #-sbt-boot ~/.sbt/boot
21 |
22 | # Path to local Ivy repository (default: ~/.ivy2)
23 | #
24 | #-ivy ~/.ivy2
25 |
26 | # set memory options
27 | #
28 | #-mem
29 |
30 | # Use local caches for projects, no sharing.
31 | #
32 | #-no-share
33 |
34 | # Put SBT in offline mode.
35 | #
36 | #-offline
37 |
38 | # Sets the SBT version to use.
39 | #-sbt-version 0.11.3
40 |
41 | # Scala version (default: latest release)
42 | #
43 | #-scala-home
44 | #-scala-version
45 |
46 | # java version (default: java from PATH, currently $(java -version |& grep version))
47 | #
48 | #-java-home
49 |
50 |
--------------------------------------------------------------------------------
/sbt.bat:
--------------------------------------------------------------------------------
1 | @REM SBT launcher script
2 |
3 | .\sbt-dist\bin\sbt.bat %*
4 |
--------------------------------------------------------------------------------
/scripts/generate-doc.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ./sbt "project mass-docs" paradox
4 | ./scripts/publish-doc.sh
5 |
--------------------------------------------------------------------------------
/scripts/publish-doc.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | rm -rf ../docs/doc/
4 | cp -r ../mass-docs/target/paradox/site/main ../docs/doc
5 |
6 |
--------------------------------------------------------------------------------
/scripts/software/dameng/Dockerfile:
--------------------------------------------------------------------------------
1 | # vim:set ft=dockerfile:
2 | FROM centos:6
3 |
4 | RUN set -x \
5 | && echo '* - nofile 65536' >> /etc/security/limits.conf \
6 | && localedef -i zh_CN -c -f UTF-8 -A /usr/share/locale/locale.alias zh_CN.UTF-8 \
7 | && yum -y install wget e2fsprogs \
8 | && mkdir /docker-entrypoint-initdb.d
9 | ENV LANG zh_CN.utf8
10 | ENV TZ Asia/Shanghai
11 | ENV SYSDBA_PWD Massdata.2018
12 | ENV SYSAUDITOR_PWD Massdata.2018
13 | #ENV DB_PATH /opt/dmdbms/data
14 | ENV DB_NAME DAMENG
15 | ENV INSTANCE_NAME DMSERVER
16 | ENV EXTENT_SIZE 16
17 | ENV PAGE_SIZE 16
18 | ENV PORT_NUM 5236
19 | ENV TIME_ZONE +08:00
20 |
21 | COPY auto_install.xml /
22 | COPY docker-entrypoint.sh /usr/local/bin
23 |
24 | COPY DM7Install.bin /
25 | #RUN wget -c https://hl.hualongdata.com/Software/DM/DM7Install.bin
26 |
27 | RUN set -x \
28 | && chmod +x /usr/local/bin/docker-entrypoint.sh && ln -sf /usr/local/bin/docker-entrypoint.sh / \
29 | && . /etc/profile && chmod +x /DM7Install.bin && ./DM7Install.bin -q /auto_install.xml \
30 | && rm auto_install.xml DM7Install.bin && chmod +x /docker-entrypoint.sh
31 | #VOLUME /opt/dmdbms/data
32 | ENTRYPOINT ["/docker-entrypoint.sh"]
33 |
34 | EXPOSE $PORT_NUM
35 | CMD ["dm7"]
36 |
--------------------------------------------------------------------------------
/scripts/software/dameng/Dockerfile-dameng:
--------------------------------------------------------------------------------
1 | FROM centos:6
2 |
3 | COPY auto_install.xml .
4 | COPY DM7Install.bin .
5 |
6 | EXPOSE 5236
7 |
8 | RUN echo '* - nofile 65536' >> /etc/security/limits.conf && \
9 | localedef -i zh_CN -c -f UTF-8 -A /usr/share/locale/locale.alias zh_CN.UTF-8 && \
10 | echo 'export TZ=Asia/Shanghai' >> /etc/profile && echo 'export LANG=zh_CN.UTF-8' >> /etc/profile && . /etc/profile
11 |
12 |
13 |
--------------------------------------------------------------------------------
/scripts/software/dameng/auto_install.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | zh
5 |
6 | +08:00
7 |
8 |
9 |
10 | 0
11 |
12 | /opt/dmdbms
13 |
14 | N
15 |
16 |
17 |
18 |
19 | N
20 |
21 | N
22 |
23 |
--------------------------------------------------------------------------------
/scripts/software/dameng/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -Eeo pipefail
3 | # TODO swap to -Eeuo pipefail above (after handling all potentially-unset variables)
4 |
5 | # usage: file_env VAR [DEFAULT]
6 | # ie: file_env 'XYZ_DB_PASSWORD' 'example'
7 | # (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of
8 | # "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature)
9 | file_env() {
10 | local var="$1"
11 | local fileVar="${var}_FILE"
12 | local def="${2:-}"
13 | if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then
14 | echo >&2 "error: both $var and $fileVar are set (but are exclusive)"
15 | exit 1
16 | fi
17 | local val="$def"
18 | if [ "${!var:-}" ]; then
19 | val="${!var}"
20 | elif [ "${!fileVar:-}" ]; then
21 | val="$(< "${!fileVar}")"
22 | fi
23 | export "$var"="$val"
24 | unset "$fileVar"
25 | }
26 |
27 | if [ "${1:0:1}" = '-' ]; then
28 | set -- dmdba "$@"
29 | fi
30 |
31 | #if [ "$1" = 'dm7' ] && [ "$(id -u)" = '0' ]; then
32 | if [ "$1" = 'dm7' ]; then
33 | if [ ! -f /opt/dmdbms/data/$DB_NAME/dm.ini ]; then
34 | /opt/dmdbms/bin/dminit PATH=/opt/dmdbms/data EXTENT_SIZE=$EXTENT_SIZE PAGE_SIZE=$PAGE_SIZE CHARSET=1 LENGTH_IN_CHAR=1 \
35 | SYSDBA_PWD=$SYSDBA_PWD SYSAUDITOR_PWD=$SYSAUDITOR_PWD DB_NAME=$DB_NAME INSTANCE_NAME=$INSTANCE_NAME \
36 | PORT_NUM=$PORT_NUM TIME_ZONE=$TIME_ZONE
37 | /opt/dmdbms/script/root/dm_service_installer.sh -t dmserver -i /opt/dmdbms/data/$DB_NAME/dm.ini -p $INSTANCE_NAME -m open
38 | fi
39 | sleep 1
40 | /etc/init.d/DmService$INSTANCE_NAME start
41 | sleep 2
42 | # /opt/dmdbms/bin/dmserver /opt/dmdbms/data/$DB_NAME/dm.ini -noconsole
43 | fi
44 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/scripts/software/dameng/init.sql:
--------------------------------------------------------------------------------
1 | create user "MASSDATA" identified by "Massdata.2018";
2 | grant "PUBLIC","RESOURCE" to "MASSDATA";
3 | grant select on V$INSTANCE to MASSDATA;
4 | grant select on V$MPP_CFG_ITEM to MASSDATA;
5 | grant select on V$DATABASE to MASSDATA;
6 | grant select on V$DM_INI to MASSDATA;
--------------------------------------------------------------------------------
/scripts/software/mysql/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mysql:5.7
2 |
3 | #RUN localedef -i zh_CN -c -f UTF-8 -A /usr/share/locale/locale.alias zh_CN.UTF-8
4 |
5 | ENV LANG zh_CN.utf8
6 | ENV TZ Asia/Shanghai
7 |
8 | COPY mysql.cnf /etc/mysql/conf.d/
9 | COPY init.sql /docker-entrypoint-initdb.d/
10 |
--------------------------------------------------------------------------------
/scripts/software/mysql/init.sql:
--------------------------------------------------------------------------------
1 | set names 'utf8mb4';
2 | create database massdata character set = 'utf8mb4';
3 | grant select on mysql.* to 'massdata'@'%';
4 |
5 | use massdata;
6 | -- init tables, views, sequences begin
7 | create table test (
8 | id bigint auto_increment primary key,
9 | name varchar(255),
10 | created_at timestamp
11 | );
12 | -- init tables, views, sequences end
13 |
14 | grant all on massdata.* to 'massdata'@'%';
15 |
--------------------------------------------------------------------------------
/scripts/software/mysql/mysql.cnf:
--------------------------------------------------------------------------------
1 | [mysqld]
2 | character-set-server=utf8mb4
3 | collation-server=utf8mb4_unicode_ci
4 |
5 | [mysql]
6 | default-character-set=utf8mb4
--------------------------------------------------------------------------------
/scripts/software/postgres/.pgpass:
--------------------------------------------------------------------------------
1 | localhost:5432:massdata:massdata:Massdata.2018
--------------------------------------------------------------------------------
/scripts/software/postgres/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM postgres:11.7
2 |
3 | RUN localedef -i zh_CN -c -f UTF-8 -A /usr/share/locale/locale.alias zh_CN.UTF-8
4 |
5 | ENV LANG zh_CN.utf8
6 | ENV TZ Asia/Shanghai
7 |
8 | COPY .pgpass /root/.pgpass
9 | COPY init.sh /docker-entrypoint-initdb.d/
10 | COPY init.sql /data/
11 | COPY job.sql /data/
12 | COPY workflow.sql /data/
13 |
--------------------------------------------------------------------------------
/scripts/software/postgres/init.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | psql -U postgres -d template1 -c "create extension adminpack;create extension hstore;"
4 | psql -U postgres -d postgres -c "create user massdata with nosuperuser replication encrypted password 'Massdata.2018';"
5 | psql -U postgres -d postgres -c "create database massdata owner = massdata template = template1;"
6 |
7 | psql -U massdata -d massdata -f /data/init.sql
8 | psql -U massdata -d massdata -f /data/workflow.sql
9 | psql -U massdata -d massdata -f /data/job.sql
10 |
--------------------------------------------------------------------------------
/scripts/software/postgres/workflow.sql:
--------------------------------------------------------------------------------
1 | set timezone to 'Asia/Chongqing';
2 |
3 | -- #ddl-workflow
4 | drop table if exists public.wf_detail;
5 | create table public.wf_detail
6 | (
7 | name varchar(128) not null primary key,
8 | content text not null,
9 | created_at timestamptz
10 | );
11 | comment on column public.wf_detail.content
12 | is '工作流配置(XML)';
13 | comment on column public.wf_detail.created_at
14 | is '创建时间';
15 | -- #ddl-workflow
16 |
--------------------------------------------------------------------------------
/scripts/software/sqlserver/Dockerfile:
--------------------------------------------------------------------------------
1 | #FROM mcr.microsoft.com/mssql/server:2017-CU19-ubuntu-16.04
2 | FROM mcr.microsoft.com/mssql/server:2017-latest
3 |
--------------------------------------------------------------------------------
/scripts/software/sqlserver/init.sql:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yangbajing/fusion-data-archived/eb5e5ef5d1ec667e6087652bcee4368e633a03e3/scripts/software/sqlserver/init.sql
--------------------------------------------------------------------------------
/version.sbt:
--------------------------------------------------------------------------------
1 | ThisBuild / version := "1.0.0-SNAPSHOT"
2 |
--------------------------------------------------------------------------------