├── types ├── pod.go ├── trace.go ├── log.go ├── service.go ├── specs_test.go ├── config_test.go ├── image.go ├── node_test.go ├── specs.go └── workload_test.go ├── engine ├── types │ ├── network.go │ ├── raw_engine.go │ ├── info.go │ ├── log.go │ ├── params.go │ ├── exec.go │ ├── image.go │ └── virtualization.go ├── systemd │ ├── image.go │ ├── network.go │ ├── container.go │ ├── exec.go │ ├── systemd.go │ └── virtualization.go ├── virt │ └── helper_test.go ├── docker │ ├── helper_test.go │ ├── container_test.go │ ├── tarfile.go │ └── docker.go ├── transform.go └── transform_test.go ├── resource ├── plugins │ ├── types │ │ ├── engine.go │ │ ├── workload.go │ │ ├── metrics.go │ │ ├── calculate.go │ │ └── node.go │ ├── binary │ │ ├── types │ │ │ ├── metrics.go │ │ │ └── calculate.go │ │ ├── binary.go │ │ ├── commands.go │ │ ├── metrics.go │ │ ├── call.go │ │ └── calculate.go │ ├── cpumem │ │ ├── types │ │ │ ├── engine.go │ │ │ ├── errors.go │ │ │ └── cpu.go │ │ ├── metrics_test.go │ │ ├── cpumem.go │ │ └── cpumem_test.go │ └── goplugin │ │ └── plugin.go ├── cobalt │ ├── call.go │ └── metrics.go ├── types │ └── resource_test.go └── manager.go ├── utils ├── contest_test.go ├── sentry.go ├── gopool_test.go ├── service_test.go ├── config.go ├── gopool.go ├── ram.go ├── ram_test.go ├── context.go ├── http_test.go ├── service.go ├── file.go ├── transaction_test.go ├── config_test.go ├── file_test.go └── transaction.go ├── client ├── servicediscovery │ ├── service_discovery.go │ ├── builder.go │ └── resolver.go ├── resolver │ ├── static │ │ ├── builder.go │ │ └── resolver.go │ └── eru │ │ ├── builder.go │ │ └── resolver.go ├── interceptor │ └── types.go └── client.go ├── store ├── etcdv3 │ ├── helper.go │ ├── embedded │ │ ├── embeded_test.go │ │ └── embeded.go │ ├── helper_test.go │ ├── mercury_test.go │ ├── processing_test.go │ ├── deploy_test.go │ ├── pod_test.go │ ├── service_test.go │ ├── deploy.go │ ├── mercury.go │ ├── meta │ │ ├── ephemeral.go │ │ └── meta.go │ └── processing.go ├── options.go ├── redis │ ├── helper_test.go │ ├── lock_test.go │ ├── lock.go │ ├── processing_test.go │ ├── pod_test.go │ ├── deploy_test.go │ ├── helper.go │ ├── deploy.go │ ├── ephemeral.go │ ├── service_test.go │ ├── pod.go │ ├── processing.go │ └── service.go └── factory │ └── factory.go ├── lock ├── lock.go └── redis │ ├── lock_test.go │ └── lock.go ├── discovery ├── discovery.go └── helium │ └── helium_test.go ├── eru-core.service ├── rpc ├── counter_test.go └── counter.go ├── source ├── gitlab │ └── cesium.go ├── source.go ├── github │ └── manganese.go ├── common │ └── helper.go └── mocks │ └── Source.go ├── .github └── workflows │ ├── test.yml │ ├── binary.yml │ ├── goreleaser.yml │ ├── golangci-lint.yml │ └── dockerimage.yml ├── auth ├── simple │ ├── credential_test.go │ ├── credential.go │ └── simple.go ├── auth.go └── mocks │ └── Auth.go ├── .gitignore ├── Dockerfile ├── version └── version.go ├── cluster └── calcium │ ├── hook.go │ ├── raw_engine.go │ ├── metrics.go │ ├── log.go │ ├── log_test.go │ ├── copy.go │ ├── remap.go │ ├── send.go │ ├── helper.go │ ├── remap_test.go │ ├── pod.go │ ├── raw_engine_test.go │ ├── capacity.go │ ├── copy_test.go │ ├── workload.go │ ├── network.go │ └── pod_test.go ├── spec.yaml ├── strategy ├── strategy_test.go ├── drained.go ├── strategy.go ├── average.go ├── average_test.go ├── fill.go ├── drained_test.go ├── communism.go ├── global.go └── fill_test.go ├── wal ├── kv │ ├── mocked_test.go │ └── kv.go ├── event.go ├── wal.go └── mocks │ └── WAL.go ├── metrics └── handler.go ├── LICENSE ├── .golangci.yml ├── log ├── sentry.go ├── inner.go └── field.go ├── make-release └── .goreleaser.yml /types/pod.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // Pod define pod 4 | type Pod struct { 5 | Name string `json:"name"` 6 | Desc string `json:"desc"` 7 | } 8 | -------------------------------------------------------------------------------- /engine/types/network.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // Network is network info 4 | type Network struct { 5 | Name string `json:"name"` 6 | Subnets []string `json:"cidr"` 7 | } 8 | -------------------------------------------------------------------------------- /types/trace.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // ContextValueKey . 4 | type ContextValueKey int 5 | 6 | const ( 7 | // TracingID . 8 | TracingID ContextValueKey = iota 9 | ) 10 | -------------------------------------------------------------------------------- /resource/plugins/types/engine.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import resourcetypes "github.com/projecteru2/core/resource/types" 4 | 5 | // EngineParams used for engine 6 | type EngineParams = resourcetypes.RawParams 7 | -------------------------------------------------------------------------------- /types/log.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // LogStreamOptions log stream options 4 | type LogStreamOptions struct { 5 | ID string 6 | Tail string 7 | Since string 8 | Until string 9 | Follow bool 10 | } 11 | -------------------------------------------------------------------------------- /engine/types/raw_engine.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | type RawEngineOptions struct { 4 | ID string 5 | Op string 6 | Params []byte 7 | } 8 | 9 | type RawEngineResult struct { 10 | ID string 11 | Data []byte 12 | } 13 | -------------------------------------------------------------------------------- /utils/contest_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestInheritTracingInfo(t *testing.T) { 10 | assert.Nil(t, InheritTracingInfo(nil, nil)) 11 | } 12 | -------------------------------------------------------------------------------- /types/service.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "time" 4 | 5 | // ServiceStatus Interval indicates when the expected next push shall reach before 6 | type ServiceStatus struct { 7 | Addresses []string 8 | Interval time.Duration 9 | } 10 | -------------------------------------------------------------------------------- /utils/sentry.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import "github.com/projecteru2/core/log" 4 | 5 | // SentryGo wraps goroutine spawn to capture panic 6 | func SentryGo(f func()) { 7 | go func() { 8 | defer log.SentryDefer() 9 | f() 10 | }() 11 | } 12 | -------------------------------------------------------------------------------- /client/servicediscovery/service_discovery.go: -------------------------------------------------------------------------------- 1 | package servicediscovery 2 | 3 | import "context" 4 | 5 | // ServiceDiscovery notifies current core service addresses 6 | type ServiceDiscovery interface { 7 | Watch(context.Context) (<-chan []string, error) 8 | } 9 | -------------------------------------------------------------------------------- /engine/types/info.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // Info define info response 4 | type Info struct { 5 | Type string 6 | ID string 7 | NCPU int 8 | MemTotal int64 9 | StorageTotal int64 10 | Resources map[string][]byte 11 | } 12 | -------------------------------------------------------------------------------- /engine/types/log.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // VirtualizationLogStreamOptions . 4 | type VirtualizationLogStreamOptions struct { 5 | ID string 6 | Tail string 7 | Since string 8 | Until string 9 | Follow bool 10 | Stdout bool 11 | Stderr bool 12 | } 13 | -------------------------------------------------------------------------------- /utils/gopool_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestNewPool(t *testing.T) { 10 | pool, err := NewPool(20) 11 | assert.NoError(t, err) 12 | assert.Equal(t, pool.Cap(), 20) 13 | } 14 | -------------------------------------------------------------------------------- /store/etcdv3/helper.go: -------------------------------------------------------------------------------- 1 | package etcdv3 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | func parseStatusKey(key string) (string, string, string, string) { 8 | parts := strings.Split(key, "/") 9 | l := len(parts) 10 | return parts[l-4], parts[l-3], parts[l-2], parts[l-1] 11 | } 12 | -------------------------------------------------------------------------------- /resource/plugins/types/workload.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import resourcetypes "github.com/projecteru2/core/resource/types" 4 | 5 | // 带 keepbind 6 | type WorkloadResourceRequest = resourcetypes.RawParams 7 | 8 | // 不带 keepbind 9 | type WorkloadResource = resourcetypes.RawParams 10 | -------------------------------------------------------------------------------- /lock/lock.go: -------------------------------------------------------------------------------- 1 | package lock 2 | 3 | import "context" 4 | 5 | // DistributedLock is a lock based on something 6 | type DistributedLock interface { 7 | Lock(ctx context.Context) (context.Context, error) 8 | TryLock(ctx context.Context) (context.Context, error) 9 | Unlock(ctx context.Context) error 10 | } 11 | -------------------------------------------------------------------------------- /discovery/discovery.go: -------------------------------------------------------------------------------- 1 | package discovery 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/projecteru2/core/types" 7 | 8 | "github.com/google/uuid" 9 | ) 10 | 11 | // Service . 12 | type Service interface { 13 | Subscribe(ctx context.Context) (uuid.UUID, <-chan types.ServiceStatus) 14 | Unsubscribe(ID uuid.UUID) 15 | } 16 | -------------------------------------------------------------------------------- /utils/service_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestGetOutboundAddress(t *testing.T) { 10 | bind := "1.1.1.1:1234" 11 | addr, err := GetOutboundAddress(bind, "8.8.8.8:80") 12 | assert.NoError(t, err) 13 | assert.Contains(t, addr, "1234") 14 | } 15 | -------------------------------------------------------------------------------- /resource/plugins/binary/types/metrics.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // GetMetricsDescriptionRequest . 4 | type GetMetricsDescriptionRequest struct{} 5 | 6 | // GetMetricsRequest . 7 | type GetMetricsRequest struct { 8 | Podname string `json:"podname" mapstructure:"podname"` 9 | Nodename string `json:"nodename" mapstructure:"nodename"` 10 | } 11 | -------------------------------------------------------------------------------- /utils/config.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "github.com/projecteru2/core/types" 5 | 6 | "github.com/jinzhu/configor" 7 | ) 8 | 9 | // LoadConfig load config from yaml 10 | func LoadConfig(configPath string) (types.Config, error) { 11 | config := types.Config{} 12 | 13 | return config, configor.Load(&config, configPath) 14 | } 15 | -------------------------------------------------------------------------------- /eru-core.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Eru Core 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | Environment=GOTRACEBACK=crash 8 | ExecStart=/usr/bin/eru-core --config /etc/eru/core.yaml 9 | LimitNOFILE=10485760 10 | LimitNPROC=10485760 11 | LimitCORE=infinity 12 | MountFlags=slave 13 | TimeoutSec=1200 14 | 15 | [Install] 16 | WantedBy=multi-user.target 17 | -------------------------------------------------------------------------------- /store/options.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | type Op struct { 4 | WithoutEngine bool 5 | } 6 | 7 | type Option func(*Op) 8 | 9 | func WithoutEngineOption() Option { 10 | return func(op *Op) { 11 | op.WithoutEngine = true 12 | } 13 | } 14 | 15 | func NewOp(opts ...Option) *Op { 16 | op := &Op{} 17 | for _, opt := range opts { 18 | opt(op) 19 | } 20 | return op 21 | } 22 | -------------------------------------------------------------------------------- /rpc/counter_test.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestCounter(t *testing.T) { 11 | v := Vibranium{} 12 | task := v.newTask(context.Background(), "test", true) 13 | assert.Equal(t, v.TaskNum, 1) 14 | 15 | task.done() 16 | assert.Equal(t, v.TaskNum, 0) 17 | 18 | v.Wait() 19 | } 20 | -------------------------------------------------------------------------------- /store/etcdv3/embedded/embeded_test.go: -------------------------------------------------------------------------------- 1 | package embedded 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestEmbededCluster(t *testing.T) { 11 | embededETCD := NewCluster(t, "/test") 12 | cliv3 := embededETCD.RandClient() 13 | _, err := cliv3.MemberList(context.Background()) 14 | assert.NoError(t, err) 15 | } 16 | -------------------------------------------------------------------------------- /utils/gopool.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "github.com/panjf2000/ants/v2" 5 | "github.com/projecteru2/core/log" 6 | ) 7 | 8 | // NewPool new a pool 9 | func NewPool(max int) (*ants.PoolWithFunc, error) { 10 | return ants.NewPoolWithFunc(max, func(i any) { 11 | defer log.SentryDefer() 12 | f, _ := i.(func()) 13 | f() 14 | }, ants.WithNonblocking(true)) 15 | } 16 | -------------------------------------------------------------------------------- /types/specs_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestEntrypointValidate(t *testing.T) { 10 | e := Entrypoint{Name: ""} 11 | assert.Error(t, e.Validate()) 12 | e = Entrypoint{Name: "a_b"} 13 | assert.Error(t, e.Validate()) 14 | e = Entrypoint{Name: "c"} 15 | assert.NoError(t, e.Validate()) 16 | } 17 | -------------------------------------------------------------------------------- /source/gitlab/cesium.go: -------------------------------------------------------------------------------- 1 | package gitlab 2 | 3 | import ( 4 | "github.com/projecteru2/core/source/common" 5 | "github.com/projecteru2/core/types" 6 | ) 7 | 8 | // New new a gitlab obj 9 | func New(config types.Config) (*common.GitScm, error) { 10 | gitConfig := config.Git 11 | authHeaders := map[string]string{"PRIVATE-TOKEN": gitConfig.Token} 12 | return common.NewGitScm(gitConfig, authHeaders) 13 | } 14 | -------------------------------------------------------------------------------- /store/redis/helper_test.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestParseStatusKey(t *testing.T) { 10 | key := "/deploy/appname/entry/node/id" 11 | p1, p2, p3, p4 := parseStatusKey(key) 12 | assert.Equal(t, p1, "appname") 13 | assert.Equal(t, p2, "entry") 14 | assert.Equal(t, p3, "node") 15 | assert.Equal(t, p4, "id") 16 | } 17 | -------------------------------------------------------------------------------- /store/redis/lock_test.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | "time" 6 | ) 7 | 8 | func (s *RediaronTestSuite) TestCreateLock() { 9 | ctx := context.Background() 10 | 11 | lock, err := s.rediaron.CreateLock("test", time.Second) 12 | s.NoError(err) 13 | s.NotNil(lock) 14 | 15 | _, err = lock.Lock(ctx) 16 | s.NoError(err) 17 | 18 | err = lock.Unlock(ctx) 19 | s.NoError(err) 20 | } 21 | -------------------------------------------------------------------------------- /types/config_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestIdentifier(t *testing.T) { 10 | config := Config{} 11 | config.Etcd = EtcdConfig{ 12 | Machines: []string{ 13 | "1.1.1.1", 14 | "2.2.2.2", 15 | }, 16 | } 17 | r, err := config.Identifier() 18 | assert.NoError(t, err) 19 | assert.NotEmpty(t, r) 20 | } 21 | -------------------------------------------------------------------------------- /store/etcdv3/helper_test.go: -------------------------------------------------------------------------------- 1 | package etcdv3 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestParseStatusKey(t *testing.T) { 10 | key := "/deploy/appname/entry/node/id" 11 | p1, p2, p3, p4 := parseStatusKey(key) 12 | assert.Equal(t, p1, "appname") 13 | assert.Equal(t, p2, "entry") 14 | assert.Equal(t, p3, "node") 15 | assert.Equal(t, p4, "id") 16 | } 17 | -------------------------------------------------------------------------------- /resource/plugins/cpumem/types/engine.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // EngineParams . 4 | type EngineParams struct { 5 | CPU float64 `json:"cpu" mapstructure:"cpu"` 6 | CPUMap CPUMap `json:"cpu_map" mapstructure:"cpu_map"` 7 | NUMANode string `json:"numa_node" mapstructure:"numa_node"` 8 | Memory int64 `json:"memory" mapstructure:"memory"` 9 | Remap bool `json:"remap" mapstructure:"remap"` 10 | } 11 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | on: 4 | push: 5 | tags: 6 | - '!v*' 7 | branches: 8 | - '*' 9 | pull_request: 10 | 11 | jobs: 12 | unittests: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v4 16 | - uses: actions/setup-go@v4 17 | with: 18 | go-version-file: 'go.mod' 19 | - name: unit tests 20 | run: make test 21 | -------------------------------------------------------------------------------- /store/redis/lock.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/projecteru2/core/lock" 8 | redislock "github.com/projecteru2/core/lock/redis" 9 | ) 10 | 11 | // CreateLock creates a redis based lock 12 | func (r *Rediaron) CreateLock(key string, ttl time.Duration) (lock.DistributedLock, error) { 13 | lockKey := fmt.Sprintf("%s/%s", r.config.Redis.LockPrefix, key) 14 | return redislock.New(r.cli, lockKey, ttl, ttl) 15 | } 16 | -------------------------------------------------------------------------------- /source/source.go: -------------------------------------------------------------------------------- 1 | package source 2 | 3 | import "context" 4 | 5 | // Source defines SCM funcions 6 | type Source interface { 7 | // Get source code from repository into path by revision 8 | SourceCode(ctx context.Context, repository, path, revision string, submodule bool) error 9 | // Get related artifact by artifact into path 10 | Artifact(ctx context.Context, artifact, path string) error 11 | // Keep code security 12 | Security(path string) error 13 | } 14 | -------------------------------------------------------------------------------- /source/github/manganese.go: -------------------------------------------------------------------------------- 1 | package github 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/projecteru2/core/source/common" 7 | "github.com/projecteru2/core/types" 8 | ) 9 | 10 | // New new a github obj 11 | func New(config types.Config) (*common.GitScm, error) { 12 | gitConfig := config.Git 13 | token := fmt.Sprintf("token %s", gitConfig.Token) 14 | authHeaders := map[string]string{"Authorization": token} 15 | return common.NewGitScm(gitConfig, authHeaders) 16 | } 17 | -------------------------------------------------------------------------------- /resource/plugins/cpumem/types/errors.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "github.com/cockroachdb/errors" 4 | 5 | var ( 6 | ErrInvalidCapacity = errors.New("invalid resource capacity") 7 | ErrInvalidCPUMap = errors.New("invalid cpu map") 8 | ErrInvalidNUMACPU = errors.New("invalid numa cpu") 9 | ErrInvalidNUMAMemory = errors.New("invalid numa memory") 10 | ErrInvalidMemory = errors.New("invalid memory") 11 | ErrInvalidCPU = errors.New("invalid cpu") 12 | ) 13 | -------------------------------------------------------------------------------- /auth/simple/credential_test.go: -------------------------------------------------------------------------------- 1 | package simple 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestBasicCredential(t *testing.T) { 11 | sc := NewBasicCredential("test", "password") 12 | m, err := sc.GetRequestMetadata(context.Background(), "a.com", "b.com") 13 | assert.NoError(t, err) 14 | v, ok := m["test"] 15 | assert.True(t, ok) 16 | assert.Equal(t, v, "password") 17 | assert.False(t, sc.RequireTransportSecurity()) 18 | } 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | core 2 | dev*.yaml 3 | eru-core 4 | core.yaml 5 | core.yml 6 | .ropeproject 7 | build.yaml 8 | *.pyc 9 | *.rpm 10 | *.deb 11 | *.itr 12 | *.so 13 | default.etcd/* 14 | vendor/* 15 | clean.sh 16 | .vscode/* 17 | *.test 18 | test*.py 19 | set* 20 | test.yaml 21 | local.yaml 22 | tools/test*.go 23 | tools/update 24 | tools/fix.go 25 | tools/check*.go 26 | cscope.* 27 | *~ 28 | .DS_Store 29 | tools/updatev4.go 30 | *.swp 31 | dist/* 32 | non-live.yaml 33 | __debug_bin/* 34 | coverage.out 35 | core.wal 36 | -------------------------------------------------------------------------------- /store/factory/factory.go: -------------------------------------------------------------------------------- 1 | package factory 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/projecteru2/core/store" 7 | "github.com/projecteru2/core/store/etcdv3" 8 | "github.com/projecteru2/core/store/redis" 9 | "github.com/projecteru2/core/types" 10 | ) 11 | 12 | // NewStore creates a store 13 | func NewStore(config types.Config, t *testing.T) (stor store.Store, err error) { 14 | switch config.Store { 15 | case types.Redis: 16 | stor, err = redis.New(config, t) 17 | default: 18 | stor, err = etcdv3.New(config, t) 19 | } 20 | return stor, err 21 | } 22 | -------------------------------------------------------------------------------- /client/resolver/static/builder.go: -------------------------------------------------------------------------------- 1 | package static 2 | 3 | import "google.golang.org/grpc/resolver" 4 | 5 | type staticResolverBuilder struct{} 6 | 7 | func init() { //nolint 8 | resolver.Register(&staticResolverBuilder{}) 9 | } 10 | 11 | // Scheme for interface 12 | func (b *staticResolverBuilder) Scheme() string { 13 | return "static" 14 | } 15 | 16 | // Build for interface 17 | func (b *staticResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { 18 | return New(cc, target.URL.Path), nil 19 | } 20 | -------------------------------------------------------------------------------- /client/resolver/eru/builder.go: -------------------------------------------------------------------------------- 1 | package eru 2 | 3 | import ( 4 | "google.golang.org/grpc/resolver" 5 | ) 6 | 7 | type eruResolverBuilder struct{} 8 | 9 | func init() { //nolint 10 | resolver.Register(&eruResolverBuilder{}) 11 | } 12 | 13 | // Scheme for interface 14 | func (b *eruResolverBuilder) Scheme() string { 15 | return "eru" 16 | } 17 | 18 | // Build for interface 19 | func (b *eruResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { 20 | return New(cc, target.URL.Path, target.URL.Host), nil 21 | } 22 | -------------------------------------------------------------------------------- /.github/workflows/binary.yml: -------------------------------------------------------------------------------- 1 | name: dev binary 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | publish: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: checkout 13 | uses: actions/checkout@v4 14 | with: 15 | fetch-depth: 0 16 | 17 | - name: "Setup go" 18 | uses: actions/setup-go@v4 19 | 20 | - name: "Build binary" 21 | run: | 22 | make binary 23 | 24 | - uses: actions/upload-artifact@v3 25 | with: 26 | name: eru-core-ubuntu 27 | path: eru-core -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine AS BUILD 2 | 3 | # make binary 4 | RUN apk add --no-cache build-base musl-dev git curl make cmake binutils-gold 5 | COPY . /go/src/github.com/projecteru2/core 6 | WORKDIR /go/src/github.com/projecteru2/core 7 | ARG KEEP_SYMBOL 8 | RUN make build && ./eru-core --version 9 | 10 | FROM alpine:latest 11 | 12 | RUN mkdir /etc/eru/ 13 | LABEL ERU=1 14 | RUN apk --no-cache add libcurl libssh2 && rm -rf /var/cache/apk/* 15 | COPY --from=BUILD /go/src/github.com/projecteru2/core/eru-core /usr/bin/eru-core 16 | COPY --from=BUILD /go/src/github.com/projecteru2/core/core.yaml.sample /etc/eru/core.yaml.sample 17 | -------------------------------------------------------------------------------- /engine/types/params.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/hex" 6 | "fmt" 7 | ) 8 | 9 | type Params struct { 10 | Nodename string 11 | Endpoint string 12 | CA string 13 | Cert string 14 | Key string 15 | } 16 | 17 | func (p *Params) CacheKey() string { 18 | return fmt.Sprintf("%+v-%+v", p.Endpoint, sha256String(fmt.Sprintf(":%+v:%+v:%+v", p.CA, p.Cert, p.Key))[:8]) 19 | } 20 | 21 | // to avoid import cycle, don't use utils.SHA256 22 | func sha256String(input string) string { 23 | c := sha256.New() 24 | c.Write([]byte(input)) 25 | bytes := c.Sum(nil) 26 | return hex.EncodeToString(bytes) 27 | } 28 | -------------------------------------------------------------------------------- /engine/systemd/image.go: -------------------------------------------------------------------------------- 1 | package systemd 2 | 3 | import ( 4 | "context" 5 | "io" 6 | 7 | enginetypes "github.com/projecteru2/core/engine/types" 8 | coresource "github.com/projecteru2/core/source" 9 | "github.com/projecteru2/core/types" 10 | ) 11 | 12 | // BuildRefs builds images refs 13 | func (e *Engine) BuildRefs(context.Context, *enginetypes.BuildRefOptions) (refs []string) { 14 | return 15 | } 16 | 17 | // BuildContent builds image content 18 | func (e *Engine) BuildContent(context.Context, coresource.Source, *enginetypes.BuildContentOptions) (dir string, reader io.Reader, err error) { 19 | err = types.ErrEngineNotImplemented 20 | return 21 | } 22 | -------------------------------------------------------------------------------- /resource/plugins/binary/binary.go: -------------------------------------------------------------------------------- 1 | package binary 2 | 3 | import ( 4 | "context" 5 | "path/filepath" 6 | 7 | ppath "path" 8 | 9 | coretypes "github.com/projecteru2/core/types" 10 | ) 11 | 12 | // Plugin 13 | type Plugin struct { 14 | name string 15 | path string 16 | config coretypes.Config 17 | } 18 | 19 | // NewPlugin . 20 | func NewPlugin(_ context.Context, path string, config coretypes.Config) (*Plugin, error) { 21 | p, err := filepath.Abs(path) 22 | if err != nil { 23 | return nil, err 24 | } 25 | plugin := &Plugin{name: ppath.Base(path), path: p, config: config} 26 | return plugin, nil 27 | } 28 | 29 | // Name . 30 | func (p Plugin) Name() string { 31 | return p.name 32 | } 33 | -------------------------------------------------------------------------------- /utils/ram.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "strconv" 5 | "strings" 6 | 7 | "github.com/docker/go-units" 8 | ) 9 | 10 | // ParseRAMInHuman returns int value in bytes of a human readable string 11 | // e.g. 100KB -> 102400 12 | func ParseRAMInHuman(ram string) (int64, error) { 13 | if ram == "" { 14 | return 0, nil 15 | } 16 | ramInBytes, err := strconv.ParseInt(ram, 10, 64) 17 | if err == nil { 18 | return ramInBytes, nil 19 | } 20 | 21 | flag := int64(1) 22 | if strings.HasPrefix(ram, "-") { 23 | flag = int64(-1) 24 | ram = strings.TrimLeft(ram, "-") 25 | } 26 | ramInBytes, err = units.RAMInBytes(ram) 27 | if err != nil { 28 | return 0, err 29 | } 30 | return ramInBytes * flag, nil 31 | } 32 | -------------------------------------------------------------------------------- /client/interceptor/types.go: -------------------------------------------------------------------------------- 1 | package interceptor 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "google.golang.org/grpc" 8 | ) 9 | 10 | // RetryOptions . 11 | type RetryOptions struct { 12 | Max int 13 | } 14 | 15 | type retryStream struct { 16 | ctx context.Context 17 | grpc.ClientStream 18 | mux sync.RWMutex 19 | sent any 20 | newStream func() (grpc.ClientStream, error) 21 | retryOpts RetryOptions 22 | } 23 | 24 | func (s *retryStream) getStream() grpc.ClientStream { 25 | s.mux.RLock() 26 | defer s.mux.RUnlock() 27 | return s.ClientStream 28 | } 29 | 30 | func (s *retryStream) setStream(stream grpc.ClientStream) { 31 | s.mux.Lock() 32 | defer s.mux.Unlock() 33 | s.ClientStream = stream 34 | } 35 | -------------------------------------------------------------------------------- /auth/simple/credential.go: -------------------------------------------------------------------------------- 1 | package simple 2 | 3 | import "context" 4 | 5 | // BasicCredential for basic credential 6 | type BasicCredential struct { 7 | username string 8 | password string 9 | } 10 | 11 | // NewBasicCredential new a basic credential 12 | func NewBasicCredential(username, password string) *BasicCredential { 13 | return &BasicCredential{username, password} 14 | } 15 | 16 | // GetRequestMetadata for basic auth 17 | func (c BasicCredential) GetRequestMetadata(_ context.Context, _ ...string) (map[string]string, error) { 18 | return map[string]string{ 19 | c.username: c.password, 20 | }, nil 21 | } 22 | 23 | // RequireTransportSecurity for ssl require 24 | func (c BasicCredential) RequireTransportSecurity() bool { 25 | return false 26 | } 27 | -------------------------------------------------------------------------------- /version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import ( 4 | "fmt" 5 | "runtime" 6 | ) 7 | 8 | var ( 9 | // NAME is app name 10 | NAME = "Eru-Core" 11 | // VERSION is app version 12 | VERSION = "unknown" 13 | // REVISION is app revision 14 | REVISION = "HEAD" 15 | // BUILTAT is app built info 16 | BUILTAT = "now" 17 | ) 18 | 19 | // String show version thing 20 | func String() string { 21 | version := "" 22 | version += fmt.Sprintf("Version: %s\n", VERSION) 23 | version += fmt.Sprintf("Git hash: %s\n", REVISION) 24 | version += fmt.Sprintf("Built: %s\n", BUILTAT) 25 | version += fmt.Sprintf("Golang version: %s\n", runtime.Version()) 26 | version += fmt.Sprintf("OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH) 27 | return version 28 | } 29 | -------------------------------------------------------------------------------- /resource/plugins/cpumem/metrics_test.go: -------------------------------------------------------------------------------- 1 | package cpumem 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/docker/go-units" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestGetMetricsDescription(t *testing.T) { 12 | ctx := context.Background() 13 | cm := initCPUMEM(ctx, t) 14 | md, err := cm.GetMetricsDescription(ctx) 15 | assert.NoError(t, err) 16 | assert.NotNil(t, md) 17 | assert.Len(t, *md, 4) 18 | } 19 | 20 | func TestGetMetrics(t *testing.T) { 21 | ctx := context.Background() 22 | cm := initCPUMEM(ctx, t) 23 | _, err := cm.GetMetrics(ctx, "", "") 24 | assert.Error(t, err) 25 | 26 | nodes := generateNodes(ctx, t, cm, 1, 2, units.GB, 100, -1) 27 | _, err = cm.GetMetrics(ctx, "testpod", nodes[0]) 28 | assert.NoError(t, err) 29 | } 30 | -------------------------------------------------------------------------------- /client/servicediscovery/builder.go: -------------------------------------------------------------------------------- 1 | package servicediscovery 2 | 3 | import "google.golang.org/grpc/resolver" 4 | 5 | // LBResolverBuilder for service discovery lb 6 | type LBResolverBuilder struct { 7 | updateCh chan []string 8 | } 9 | 10 | var lbResolverBuilder *LBResolverBuilder 11 | 12 | func init() { //nolint 13 | lbResolverBuilder = &LBResolverBuilder{ 14 | updateCh: make(chan []string), 15 | } 16 | resolver.Register(lbResolverBuilder) 17 | } 18 | 19 | // Scheme for interface 20 | func (b *LBResolverBuilder) Scheme() string { 21 | return "lb" 22 | } 23 | 24 | // Build for interface 25 | func (b *LBResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { 26 | return newLBResolver(cc, target.URL.Path, b.updateCh), nil 27 | } 28 | -------------------------------------------------------------------------------- /engine/systemd/network.go: -------------------------------------------------------------------------------- 1 | package systemd 2 | 3 | import ( 4 | "context" 5 | 6 | enginetypes "github.com/projecteru2/core/engine/types" 7 | "github.com/projecteru2/core/types" 8 | ) 9 | 10 | // NetworkConnect connects target netloc 11 | func (e *Engine) NetworkConnect(_ context.Context, _, _, _, _ string) (subnets []string, err error) { 12 | err = types.ErrEngineNotImplemented 13 | return 14 | } 15 | 16 | // NetworkDisconnect disconnects target netloc 17 | func (e *Engine) NetworkDisconnect(_ context.Context, _, _ string, _ bool) (err error) { 18 | err = types.ErrEngineNotImplemented 19 | return 20 | } 21 | 22 | // NetworkList lists networks 23 | func (e *Engine) NetworkList(_ context.Context, _ []string) (networks []*enginetypes.Network, err error) { 24 | err = types.ErrEngineNotImplemented 25 | return 26 | } 27 | -------------------------------------------------------------------------------- /resource/plugins/types/metrics.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // MetricsDescription . 4 | type MetricsDescription struct { 5 | Name string `json:"name" mapstructure:"name"` 6 | Help string `json:"help" mapstructure:"help"` 7 | Type string `json:"type" mapstructure:"type"` 8 | Labels []string `json:"labels" mapstructure:"labels"` 9 | } 10 | 11 | // GetMetricsDescriptionResponse . 12 | type GetMetricsDescriptionResponse []*MetricsDescription 13 | 14 | // Metrics indicate metrics 15 | type Metrics struct { 16 | Name string `json:"name" mapstructure:"name"` 17 | Labels []string `json:"labels" mapstructure:"labels"` 18 | Key string `json:"key" mapstructure:"key"` 19 | Value string `json:"value" mapstructure:"value"` 20 | } 21 | 22 | // GetNodeMetricsResponse . 23 | type GetMetricsResponse []*Metrics 24 | -------------------------------------------------------------------------------- /utils/ram_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/docker/go-units" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestParseRAMInHuman(t *testing.T) { 11 | size, err := ParseRAMInHuman("") 12 | assert.Nil(t, err) 13 | assert.EqualValues(t, 0, size) 14 | 15 | size, err = ParseRAMInHuman("1") 16 | assert.Nil(t, err) 17 | assert.EqualValues(t, 1, size) 18 | 19 | size, err = ParseRAMInHuman("-1") 20 | assert.Nil(t, err) 21 | assert.EqualValues(t, -1, size) 22 | 23 | size, err = ParseRAMInHuman("hhhh") 24 | assert.NotNil(t, err) 25 | 26 | size, err = ParseRAMInHuman("1G") 27 | assert.Nil(t, err) 28 | assert.EqualValues(t, units.GiB, size) 29 | 30 | size, err = ParseRAMInHuman("-1T") 31 | assert.Nil(t, err) 32 | assert.EqualValues(t, -units.TiB, size) 33 | } 34 | -------------------------------------------------------------------------------- /cluster/calcium/hook.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | 7 | "github.com/projecteru2/core/engine" 8 | ) 9 | 10 | func (c *Calcium) doHook( 11 | ctx context.Context, 12 | ID, user string, 13 | cmds, env []string, 14 | cmdForce, privileged, force bool, 15 | engine engine.API, 16 | ) ([]*bytes.Buffer, error) { 17 | outputs := []*bytes.Buffer{} 18 | for _, cmd := range cmds { 19 | output, err := c.execuateInside(ctx, engine, ID, cmd, user, env, privileged) 20 | if err != nil { 21 | // 执行 hook 的过程中,如果 cmdForce 为真并且不忽略 hook 就输出错误 22 | outputs = append(outputs, bytes.NewBufferString(err.Error())) 23 | if cmdForce && !force { 24 | return outputs, err 25 | } 26 | continue 27 | } 28 | outputs = append(outputs, bytes.NewBuffer(output)) 29 | } 30 | return outputs, nil 31 | } 32 | -------------------------------------------------------------------------------- /engine/systemd/container.go: -------------------------------------------------------------------------------- 1 | package systemd 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | 7 | "github.com/projecteru2/core/engine/docker" 8 | enginetypes "github.com/projecteru2/core/engine/types" 9 | ) 10 | 11 | // VirtualizationCreate create a workload 12 | func (e *Engine) VirtualizationCreate(ctx context.Context, opts *enginetypes.VirtualizationCreateOptions) (*enginetypes.VirtualizationCreated, error) { //nolint 13 | rArgs := &docker.RawArgs{StorageOpt: map[string]string{}} 14 | if len(opts.RawArgs) > 0 { 15 | if err := json.Unmarshal(opts.RawArgs, rArgs); err != nil { 16 | return nil, err 17 | } 18 | } 19 | rArgs.Runtime = e.config.Systemd.Runtime 20 | b, err := json.Marshal(rArgs) 21 | if err != nil { 22 | return nil, err 23 | } 24 | opts.RawArgs = b 25 | return e.API.VirtualizationCreate(ctx, opts) 26 | } 27 | -------------------------------------------------------------------------------- /.github/workflows/goreleaser.yml: -------------------------------------------------------------------------------- 1 | name: goreleaser 2 | 3 | on: 4 | push: 5 | tags: 6 | - v* 7 | 8 | jobs: 9 | goreleaser: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout 13 | uses: actions/checkout@v4 14 | with: 15 | fetch-depth: 0 16 | 17 | - name: Set up environment variables 18 | run: | 19 | echo "VERSION=$(git describe --tags $(git rev-list --tags --max-count=1))" >> $GITHUB_ENV 20 | 21 | - name: Set up Go 22 | uses: actions/setup-go@v4 23 | with: 24 | go-version-file: 'go.mod' 25 | 26 | - name: Run GoReleaser 27 | uses: goreleaser/goreleaser-action@v5 28 | with: 29 | version: latest 30 | args: release --clean 31 | env: 32 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 33 | -------------------------------------------------------------------------------- /resource/plugins/binary/commands.go: -------------------------------------------------------------------------------- 1 | package binary 2 | 3 | const ( 4 | CalculateDeployCommand = "calculate-deploy" 5 | CalculateReallocCommand = "calculate-realloc" 6 | CalculateRemapCommand = "calculate-remap" 7 | 8 | AddNodeCommand = "add-node" 9 | RemoveNodeCommand = "remove-node" 10 | 11 | GetNodesDeployCapacityCommand = "get-nodes-deploy-capacity" 12 | 13 | SetNodeResourceCapacityCommand = "set-node-resource-capacity" 14 | 15 | GetNodeResourceInfoCommand = "get-node-resource-info" 16 | SetNodeResourceInfoCommand = "set-node-resource-info" 17 | 18 | SetNodeResourceUsageCommand = "set-node-resource-usage" 19 | 20 | GetMostIdleNodeCommand = "get-most-idle-node" 21 | 22 | FixNodeResourceCommand = "fix-node-resource" 23 | 24 | GetMetricsDescriptionCommand = "get-metrics-description" 25 | GetMetricsCommand = "get-metrics" 26 | ) 27 | -------------------------------------------------------------------------------- /store/redis/processing_test.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/projecteru2/core/types" 7 | ) 8 | 9 | func (s *RediaronTestSuite) TestProcessing() { 10 | ctx := context.Background() 11 | processing := &types.Processing{ 12 | Appname: "app", 13 | Entryname: "entry", 14 | Ident: "abc", 15 | Nodename: "node", 16 | } 17 | 18 | // create 19 | s.NoError(s.rediaron.CreateProcessing(ctx, processing, 10)) 20 | // create again 21 | s.Error(s.rediaron.CreateProcessing(ctx, processing, 10)) 22 | s.NoError(s.rediaron.AddWorkload(ctx, &types.Workload{Name: "a_b_c"}, processing)) 23 | 24 | nodeCount, err := s.rediaron.doLoadProcessing(ctx, processing.Appname, processing.Entryname) 25 | s.NoError(err) 26 | s.Equal(nodeCount["node"], 9) 27 | // delete 28 | s.NoError(s.rediaron.DeleteProcessing(ctx, processing)) 29 | } 30 | -------------------------------------------------------------------------------- /spec.yaml: -------------------------------------------------------------------------------- 1 | appname: "eru" 2 | entrypoints: 3 | core: 4 | cmd: "/usr/bin/eru-core --config /core.yaml" 5 | restart: always 6 | publish: 7 | - "5001" 8 | healthcheck: 9 | tcp_ports: 10 | - "5001" 11 | 12 | stages: 13 | - build 14 | - pack 15 | builds: 16 | build: 17 | base: "golang:alpine" 18 | # only support ssh protocol 19 | repo: "git@github.com:projecteru2/core.git" 20 | version: "HEAD" 21 | security: false 22 | dir: /go/src/github.com/projecteru2/core 23 | commands: 24 | - apk add --no-cache build-base musl-dev git curl make 25 | - make test 26 | - make binary 27 | cache: 28 | /go/src/github.com/projecteru2/core/eru-core: /usr/bin/eru-core 29 | pack: 30 | base: alpine 31 | labels: 32 | ERU: 1 33 | core: 1 34 | envs: 35 | CORE_IN_DOCKER: 1 36 | 37 | -------------------------------------------------------------------------------- /client/servicediscovery/resolver.go: -------------------------------------------------------------------------------- 1 | package servicediscovery 2 | 3 | import ( 4 | "google.golang.org/grpc/resolver" 5 | ) 6 | 7 | type lbResolver struct { 8 | cc resolver.ClientConn 9 | } 10 | 11 | func newLBResolver(cc resolver.ClientConn, endpoint string, updateCh <-chan []string) *lbResolver { 12 | r := &lbResolver{cc: cc} 13 | r.updateAddresses(endpoint) 14 | go func() { 15 | for { 16 | r.updateAddresses(<-updateCh...) 17 | } 18 | }() 19 | return r 20 | } 21 | 22 | func (r *lbResolver) updateAddresses(endpoints ...string) { 23 | addresses := []resolver.Address{} 24 | for _, ep := range endpoints { 25 | addresses = append(addresses, resolver.Address{Addr: ep}) 26 | } 27 | r.cc.UpdateState(resolver.State{Addresses: addresses}) //nolint 28 | } 29 | 30 | func (r *lbResolver) ResolveNow(_ resolver.ResolveNowOptions) {} 31 | 32 | func (r lbResolver) Close() {} 33 | -------------------------------------------------------------------------------- /cluster/calcium/raw_engine.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/projecteru2/core/log" 8 | "github.com/projecteru2/core/types" 9 | ) 10 | 11 | func (c *Calcium) RawEngine(ctx context.Context, opts *types.RawEngineOptions) (msg *types.RawEngineMessage, err error) { 12 | ID := opts.ID 13 | logger := log.WithFunc("calcium.RawEngine").WithField("ID", opts.ID) 14 | var wg sync.WaitGroup 15 | wg.Add(1) 16 | _ = c.pool.Invoke(func() { 17 | defer wg.Done() 18 | if err = c.withWorkloadLocked(ctx, ID, opts.IgnoreLock, func(ctx context.Context, workload *types.Workload) error { 19 | msg, err = workload.RawEngine(ctx, opts) 20 | return err 21 | }); err == nil { 22 | logger.Infof(ctx, "Workload %s", ID) 23 | logger.Infof(ctx, "%+v", msg) 24 | } 25 | }) 26 | wg.Wait() 27 | 28 | logger.Error(ctx, err) 29 | return 30 | } 31 | -------------------------------------------------------------------------------- /types/image.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "io" 5 | 6 | enginetypes "github.com/projecteru2/core/engine/types" 7 | ) 8 | 9 | // BuildMethod . 10 | type BuildMethod int 11 | 12 | const ( 13 | // BuildFromSCM must be default method to avoid breaking 14 | BuildFromSCM BuildMethod = iota 15 | // BuildFromUnknown . 16 | BuildFromUnknown 17 | // BuildFromRaw . 18 | BuildFromRaw 19 | // BuildFromExist . 20 | BuildFromExist 21 | ) 22 | 23 | // Builds is identical to enginetype.Builds 24 | type Builds = enginetypes.Builds 25 | 26 | // Build is identical to enginetype.Build 27 | type Build = enginetypes.Build 28 | 29 | // BuildOptions is options for building image 30 | type BuildOptions struct { 31 | Name string 32 | User string 33 | UID int 34 | Tags []string 35 | BuildMethod 36 | *Builds 37 | Tar io.Reader 38 | ExistID string 39 | Platform string 40 | } 41 | -------------------------------------------------------------------------------- /client/resolver/static/resolver.go: -------------------------------------------------------------------------------- 1 | package static 2 | 3 | import ( 4 | "strings" 5 | 6 | "google.golang.org/grpc/resolver" 7 | ) 8 | 9 | // Resolver for target static://{addr1},{addr2},{addr3} 10 | type Resolver struct { 11 | addresses []resolver.Address 12 | cc resolver.ClientConn 13 | } 14 | 15 | // New Resolver 16 | func New(cc resolver.ClientConn, endpoints string) *Resolver { 17 | var addresses []resolver.Address 18 | for _, ep := range strings.Split(endpoints, ",") { 19 | addresses = append(addresses, resolver.Address{Addr: ep}) 20 | } 21 | cc.UpdateState(resolver.State{Addresses: addresses}) //nolint 22 | return &Resolver{ 23 | cc: cc, 24 | addresses: addresses, 25 | } 26 | } 27 | 28 | // ResolveNow for interface 29 | func (r *Resolver) ResolveNow(_ resolver.ResolveNowOptions) {} 30 | 31 | // Close for interface 32 | func (r *Resolver) Close() {} 33 | -------------------------------------------------------------------------------- /engine/systemd/exec.go: -------------------------------------------------------------------------------- 1 | package systemd 2 | 3 | import ( 4 | "context" 5 | "io" 6 | 7 | enginetypes "github.com/projecteru2/core/engine/types" 8 | "github.com/projecteru2/core/types" 9 | ) 10 | 11 | // Execute executes a cmd and attaches stdio 12 | func (e *Engine) Execute(_ context.Context, _ string, _ *enginetypes.ExecConfig) (execID string, stdout io.ReadCloser, stderr io.ReadCloser, writer io.WriteCloser, err error) { 13 | err = types.ErrEngineNotImplemented 14 | return 15 | } 16 | 17 | // ExecResize resize the terminal size 18 | func (e *Engine) ExecResize(_ context.Context, _ string, _, _ uint) (err error) { 19 | err = types.ErrEngineNotImplemented 20 | return 21 | } 22 | 23 | // ExecExitCode fetches exceuction exit code 24 | func (e *Engine) ExecExitCode(_ context.Context, _, _ string) (execCode int, err error) { 25 | err = types.ErrEngineNotImplemented 26 | return 27 | } 28 | -------------------------------------------------------------------------------- /resource/plugins/types/calculate.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // CalculateDeployResponse . 4 | type CalculateDeployResponse struct { 5 | EnginesParams []EngineParams `json:"engines_params" mapstructure:"engines_params"` 6 | WorkloadsResource []WorkloadResource `json:"workloads_resource" mapstructure:"workloads_resource"` 7 | } 8 | 9 | // CalculateReallocResponse . 10 | type CalculateReallocResponse struct { 11 | EngineParams EngineParams `json:"engine_params" mapstructure:"engine_params"` 12 | DeltaResource WorkloadResource `json:"delta_resource" mapstructure:"delta_resource"` 13 | WorkloadResource WorkloadResource `json:"workload_resource" mapstructure:"workload_resource"` 14 | } 15 | 16 | // CalculateRemapResponse . 17 | type CalculateRemapResponse struct { 18 | EngineParamsMap map[string]EngineParams `json:"engine_params_map" mapstructure:"engine_params_map"` 19 | } 20 | -------------------------------------------------------------------------------- /utils/context.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/projecteru2/core/types" 7 | 8 | "google.golang.org/grpc/peer" 9 | ) 10 | 11 | // NewInheritCtx new a todo context and get the previous values 12 | func NewInheritCtx(ctx context.Context) context.Context { 13 | return InheritTracingInfo(ctx, context.TODO()) 14 | } 15 | 16 | // InheritTracingInfo pass through the tracing info: peer, tracing id 17 | func InheritTracingInfo(ctx, newCtx context.Context) context.Context { 18 | rCtx := newCtx 19 | if ctx == nil { 20 | return rCtx 21 | } 22 | 23 | p, ok := peer.FromContext(ctx) 24 | if ok { 25 | rCtx = peer.NewContext(rCtx, p) 26 | } 27 | 28 | if traceID := ctx.Value(types.TracingID); traceID != nil { 29 | if tid, ok := traceID.(string); ok { 30 | rCtx = context.WithValue(rCtx, types.TracingID, tid) 31 | } 32 | } 33 | 34 | return rCtx 35 | } 36 | -------------------------------------------------------------------------------- /engine/types/exec.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // ExecConfig is a small subset of the Config struct that holds the configuration 4 | // for the exec feature of docker. 5 | // copy from github.com/docker/docker/api/types 6 | type ExecConfig struct { 7 | User string // User that will run the command 8 | Privileged bool // Is the workload in privileged mode 9 | Tty bool // Attach standard streams to a tty. 10 | AttachStdin bool // Attach the standard input, makes possible user interaction 11 | AttachStderr bool // Attach the standard error 12 | AttachStdout bool // Attach the standard output 13 | Detach bool // Execute in detach mode 14 | DetachKeys string // Escape keys for detach 15 | Env []string // Environment variables 16 | WorkingDir string // Working directory 17 | Cmd []string // Execution commands and args 18 | } 19 | -------------------------------------------------------------------------------- /utils/http_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "os" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestGetHTTPClient(t *testing.T) { 13 | assert.NotNil(t, GetHTTPClient()) 14 | } 15 | 16 | func TestGetUnixSockClient(t *testing.T) { 17 | assert.NotNil(t, GetUnixSockClient()) 18 | } 19 | 20 | func TestGetHTTPSClient(t *testing.T) { 21 | ctx := context.Background() 22 | client, err := GetHTTPSClient(ctx, "", "abc", "", "", "") 23 | assert.NoError(t, err) 24 | assert.NotNil(t, client) 25 | 26 | client, err = GetHTTPSClient(ctx, os.TempDir(), "abc", "1", "2", "3") 27 | assert.Error(t, err) 28 | assert.Nil(t, client) 29 | } 30 | 31 | func TestCheckRedirect(t *testing.T) { 32 | via := []*http.Request{{Method: http.MethodGet}} 33 | err := checkRedirect(nil, via) 34 | assert.Equal(t, err, http.ErrUseLastResponse) 35 | } 36 | -------------------------------------------------------------------------------- /store/etcdv3/mercury_test.go: -------------------------------------------------------------------------------- 1 | package etcdv3 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/projecteru2/core/engine/factory" 9 | "github.com/projecteru2/core/types" 10 | 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func NewMercury(t *testing.T) *Mercury { 15 | config := types.Config{} 16 | config.LockTimeout = 10 * time.Second 17 | config.GlobalTimeout = 30 * time.Second 18 | config.Etcd = types.EtcdConfig{ 19 | Machines: []string{"127.0.0.1:2379"}, 20 | Prefix: "/eru-test", 21 | LockPrefix: "/eru-test-lock", 22 | } 23 | config.ProbeTarget = "8.8.8.8:80" 24 | config.MaxConcurrency = 100000 25 | // config.Docker.CertPath = "/tmp" 26 | 27 | ctx, cancel := context.WithCancel(context.Background()) 28 | defer cancel() 29 | factory.InitEngineCache(ctx, config, nil) 30 | 31 | m, err := New(config, t) 32 | assert.NoError(t, err) 33 | return m 34 | } 35 | -------------------------------------------------------------------------------- /resource/plugins/binary/metrics.go: -------------------------------------------------------------------------------- 1 | package binary 2 | 3 | import ( 4 | "context" 5 | 6 | binarytypes "github.com/projecteru2/core/resource/plugins/binary/types" 7 | plugintypes "github.com/projecteru2/core/resource/plugins/types" 8 | ) 9 | 10 | // GetMetricsDescription . 11 | func (p Plugin) GetMetricsDescription(ctx context.Context) (*plugintypes.GetMetricsDescriptionResponse, error) { 12 | req := &binarytypes.GetMetricsDescriptionRequest{} 13 | resp := &plugintypes.GetMetricsDescriptionResponse{} 14 | return resp, p.call(ctx, GetMetricsDescriptionCommand, req, resp) 15 | } 16 | 17 | // GetMetrics . 18 | func (p Plugin) GetMetrics(ctx context.Context, podname, nodename string) (*plugintypes.GetMetricsResponse, error) { 19 | req := &binarytypes.GetMetricsRequest{ 20 | Podname: podname, 21 | Nodename: nodename, 22 | } 23 | resp := &plugintypes.GetMetricsResponse{} 24 | return resp, p.call(ctx, GetMetricsCommand, req, resp) 25 | } 26 | -------------------------------------------------------------------------------- /store/etcdv3/processing_test.go: -------------------------------------------------------------------------------- 1 | package etcdv3 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/projecteru2/core/types" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestProcessing(t *testing.T) { 13 | m := NewMercury(t) 14 | ctx := context.Background() 15 | processing := &types.Processing{ 16 | Appname: "app", 17 | Entryname: "entry", 18 | Nodename: "node", 19 | Ident: "abc", 20 | } 21 | 22 | // create 23 | assert.NoError(t, m.CreateProcessing(ctx, processing, 10)) 24 | // create again 25 | assert.Error(t, m.CreateProcessing(ctx, processing, 10)) 26 | assert.NoError(t, m.AddWorkload(ctx, &types.Workload{Name: "a_b_c"}, processing)) 27 | 28 | nodeCount, err := m.doLoadProcessing(ctx, processing.Appname, processing.Entryname) 29 | assert.NoError(t, err) 30 | assert.Equal(t, nodeCount["node"], 9) 31 | // delete 32 | assert.NoError(t, m.DeleteProcessing(ctx, processing)) 33 | } 34 | -------------------------------------------------------------------------------- /.github/workflows/golangci-lint.yml: -------------------------------------------------------------------------------- 1 | name: golangci-lint 2 | on: 3 | push: 4 | tags: 5 | - '!v*' 6 | branches: 7 | - '*' 8 | pull_request: 9 | 10 | jobs: 11 | golangci: 12 | name: lint 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v4 17 | 18 | - name: Set up Go 19 | uses: actions/setup-go@v4 20 | with: 21 | go-version-file: 'go.mod' 22 | 23 | - name: golangci-lint 24 | uses: golangci/golangci-lint-action@v3 25 | env: 26 | ACTIONS_ALLOW_UNSECURE_COMMANDS: 'true' 27 | with: 28 | # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. 29 | version: latest 30 | # Optional: show only new issues if it's a pull request. The default value is `false`. 31 | only-new-issues: true 32 | -------------------------------------------------------------------------------- /resource/plugins/goplugin/plugin.go: -------------------------------------------------------------------------------- 1 | package goplugin 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "path/filepath" 7 | 8 | goplugin "plugin" 9 | 10 | "github.com/pkg/errors" 11 | "github.com/projecteru2/core/resource/plugins" 12 | coretypes "github.com/projecteru2/core/types" 13 | ) 14 | 15 | // NewPlugin . 16 | func NewPlugin(ctx context.Context, path string, config coretypes.Config) (plugins.Plugin, error) { 17 | pFname, err := filepath.Abs(path) 18 | if err != nil { 19 | return nil, err 20 | } 21 | gp, err := goplugin.Open(pFname) 22 | if err != nil { 23 | return nil, errors.Wrapf(err, "failed to open plugin %s", pFname) 24 | } 25 | sym, err := gp.Lookup("NewPlugin") 26 | if err != nil { 27 | return nil, errors.Wrapf(err, "failed to lookup NewPlugin %s", pFname) 28 | } 29 | fn, ok := sym.(func(context.Context, coretypes.Config) (plugins.Plugin, error)) 30 | if !ok { 31 | return nil, fmt.Errorf("NewPlugin is not a function") 32 | } 33 | return fn(ctx, config) 34 | } 35 | -------------------------------------------------------------------------------- /utils/service.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "strings" 7 | 8 | "github.com/cockroachdb/errors" 9 | "github.com/projecteru2/core/types" 10 | ) 11 | 12 | // GetOutboundAddress finds out self-service address 13 | func GetOutboundAddress(bind string, probeTarget string) (string, error) { 14 | parts := strings.Split(bind, ":") 15 | if len(parts) != 2 { 16 | return "", errors.Wrap(types.ErrInvaildIPWithPort, bind) 17 | } 18 | ip := parts[0] 19 | port := parts[1] 20 | 21 | address := net.ParseIP(ip) 22 | if ip == "" || address == nil || address.IsUnspecified() { 23 | return getOutboundAddress(port, probeTarget) 24 | } 25 | 26 | return bind, nil 27 | } 28 | 29 | func getOutboundAddress(port string, probeTarget string) (string, error) { 30 | conn, err := net.Dial("udp", probeTarget) 31 | if err != nil { 32 | return "", err 33 | } 34 | defer conn.Close() 35 | 36 | localAddr := conn.LocalAddr().(*net.UDPAddr) 37 | return fmt.Sprintf("%s:%s", localAddr.IP, port), nil 38 | } 39 | -------------------------------------------------------------------------------- /strategy/strategy_test.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func deployedNodes() []Info { 11 | return []Info{ 12 | { 13 | Nodename: "n1", 14 | Capacity: 10, 15 | Count: 2, 16 | }, 17 | { 18 | Nodename: "n2", 19 | Capacity: 10, 20 | Count: 3, 21 | }, 22 | { 23 | Nodename: "n3", 24 | Capacity: 10, 25 | Count: 5, 26 | }, 27 | { 28 | Nodename: "n4", 29 | Capacity: 10, 30 | Count: 7, 31 | }, 32 | } 33 | } 34 | 35 | func TestDeploy(t *testing.T) { 36 | ctx := context.Background() 37 | 38 | // invalid strategy 39 | _, err := Deploy(ctx, "invalid", -1, 3, nil, 2) 40 | assert.Error(t, err) 41 | 42 | // count < 0 43 | _, err = Deploy(ctx, "AUTO", -1, 3, nil, 2) 44 | assert.Error(t, err) 45 | 46 | Plans["test"] = func(_ context.Context, _ []Info, _, _, _ int) (map[string]int, error) { 47 | return nil, nil 48 | } 49 | _, err = Deploy(ctx, "test", 1, 3, nil, 2) 50 | assert.NoError(t, err) 51 | } 52 | -------------------------------------------------------------------------------- /engine/virt/helper_test.go: -------------------------------------------------------------------------------- 1 | package virt 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | func TestCombineUserImage(t *testing.T) { 10 | user := "user" 11 | image := "image" 12 | 13 | combine := combineUserImage(user, image) 14 | require.Equal(t, "user@image", combine) 15 | u, i, err := splitUserImage(combine) 16 | require.NoError(t, err) 17 | require.Equal(t, user, u) 18 | require.Equal(t, image, i) 19 | 20 | combine = combineUserImage("", image) 21 | require.Equal(t, image, combine) 22 | u, i, err = splitUserImage(combine) 23 | require.NoError(t, err) 24 | require.Equal(t, "", u) 25 | require.Equal(t, image, i) 26 | 27 | combine = combineUserImage(user, "") 28 | require.Equal(t, "", combine) 29 | u, i, err = splitUserImage(combine) 30 | require.Error(t, err) 31 | 32 | combine = combineUserImage("", "") 33 | require.Equal(t, "", combine) 34 | 35 | u, i, err = splitUserImage("@") 36 | require.Error(t, err) 37 | 38 | u, i, err = splitUserImage("hello@") 39 | require.Error(t, err) 40 | } 41 | -------------------------------------------------------------------------------- /store/etcdv3/embedded/embeded.go: -------------------------------------------------------------------------------- 1 | package embedded 2 | 3 | import ( 4 | "flag" 5 | "os" 6 | "testing" 7 | 8 | "go.etcd.io/etcd/client/v3/namespace" 9 | "go.etcd.io/etcd/tests/v3/integration" 10 | ) 11 | 12 | var clusters map[string]*integration.ClusterV3 = map[string]*integration.ClusterV3{} 13 | 14 | // NewCluster new a embedded cluster 15 | func NewCluster(t *testing.T, prefix string) *integration.ClusterV3 { 16 | cluster := clusters[t.Name()] 17 | if cluster == nil { 18 | os.Args = []string{"test.short=false"} 19 | testing.Init() 20 | flag.Parse() 21 | integration.BeforeTestExternal(t) 22 | cluster = integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) 23 | t.Cleanup(func() { 24 | cluster.Terminate(t) 25 | delete(clusters, t.Name()) 26 | }) 27 | cliv3 := cluster.RandClient() 28 | cliv3.KV = namespace.NewKV(cliv3.KV, prefix) 29 | cliv3.Watcher = namespace.NewWatcher(cliv3.Watcher, prefix) 30 | cliv3.Lease = namespace.NewLease(cliv3.Lease, prefix) 31 | clusters[t.Name()] = cluster 32 | } 33 | return cluster 34 | } 35 | -------------------------------------------------------------------------------- /resource/plugins/cpumem/types/cpu.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // CPUPlan . 4 | type CPUPlan struct { 5 | NUMANode string 6 | CPUMap CPUMap 7 | } 8 | 9 | // CPUMap . 10 | type CPUMap map[string]int 11 | 12 | // TotalPieces . 13 | func (c CPUMap) TotalPieces() int { 14 | res := 0 15 | for _, pieces := range c { 16 | res += pieces 17 | } 18 | return res 19 | } 20 | 21 | // Sub . 22 | func (c CPUMap) Sub(c1 CPUMap) { 23 | for cpu, pieces := range c1 { 24 | c[cpu] -= pieces 25 | } 26 | } 27 | 28 | // Add . 29 | func (c CPUMap) Add(c1 CPUMap) { 30 | for cpu, pieces := range c1 { 31 | c[cpu] += pieces 32 | } 33 | } 34 | 35 | // NUMA map[cpuID]nodeID 36 | type NUMA map[string]string 37 | 38 | // NUMAMemory map[nodeID]memory 39 | type NUMAMemory map[string]int64 40 | 41 | // Add . 42 | func (n NUMAMemory) Add(n1 NUMAMemory) { 43 | for numaNodeID, memory := range n1 { 44 | n[numaNodeID] += memory 45 | } 46 | } 47 | 48 | // Sub . 49 | func (n NUMAMemory) Sub(n1 NUMAMemory) { 50 | for numaNodeID, memory := range n1 { 51 | n[numaNodeID] -= memory 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /auth/auth.go: -------------------------------------------------------------------------------- 1 | package auth 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/projecteru2/core/auth/simple" 7 | "github.com/projecteru2/core/types" 8 | 9 | "google.golang.org/grpc" 10 | ) 11 | 12 | // Auth define auth obj 13 | type Auth interface { 14 | StreamInterceptor(srv any, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error 15 | UnaryInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) 16 | } 17 | 18 | // NewAuth return auth obj 19 | func NewAuth(auth types.AuthConfig) Auth { 20 | // TODO 这里可以组装其他的方法 21 | return simple.NewBasicAuth(auth.Username, auth.Password) 22 | } 23 | 24 | // Credential for client 25 | type Credential interface { 26 | GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) 27 | RequireTransportSecurity() bool 28 | } 29 | 30 | // NewCredential return credential obj 31 | func NewCredential(auth types.AuthConfig) Credential { 32 | // TODO 这里可以组装其他的方法 33 | return simple.NewBasicCredential(auth.Username, auth.Password) 34 | } 35 | -------------------------------------------------------------------------------- /types/node_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | "testing" 7 | 8 | enginemocks "github.com/projecteru2/core/engine/mocks" 9 | enginetypes "github.com/projecteru2/core/engine/types" 10 | 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/mock" 13 | ) 14 | 15 | func TestNodeMeta(t *testing.T) { 16 | nm := NodeMeta{Name: "1"} 17 | nnm, err := nm.DeepCopy() 18 | assert.NoError(t, err) 19 | assert.Equal(t, nm.Name, nnm.Name) 20 | } 21 | 22 | func TestNodeInfo(t *testing.T) { 23 | mockEngine := &enginemocks.API{} 24 | r := &enginetypes.Info{ID: "test"} 25 | mockEngine.On("Info", mock.Anything).Return(r, ErrNoOps).Once() 26 | 27 | node := &Node{} 28 | ctx := context.Background() 29 | 30 | node.Engine = mockEngine 31 | err := node.Info(ctx) 32 | assert.Error(t, err) 33 | mockEngine.On("Info", mock.Anything).Return(r, nil) 34 | err = node.Info(ctx) 35 | assert.NoError(t, err) 36 | assert.True(t, strings.Contains(node.NodeInfo, "test")) 37 | 38 | node.Bypass = true 39 | assert.True(t, node.IsDown()) 40 | } 41 | -------------------------------------------------------------------------------- /store/redis/pod_test.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/projecteru2/core/types" 7 | ) 8 | 9 | func (s *RediaronTestSuite) TestPod() { 10 | ctx := context.Background() 11 | podname := "testv3" 12 | 13 | pod, err := s.rediaron.AddPod(ctx, podname, "CPU") 14 | s.NoError(err) 15 | s.Equal(pod.Name, podname) 16 | 17 | _, err = s.rediaron.AddPod(ctx, podname, "CPU") 18 | s.Equal(err, ErrAlreadyExists) 19 | 20 | pod2, err := s.rediaron.GetPod(ctx, podname) 21 | s.NoError(err) 22 | s.Equal(pod2.Name, podname) 23 | 24 | pods, err := s.rediaron.GetAllPods(ctx) 25 | s.NoError(err) 26 | s.Equal(len(pods), 1) 27 | s.Equal(pods[0].Name, podname) 28 | 29 | _, err = s.rediaron.AddNode(ctx, &types.AddNodeOptions{Nodename: "test", Endpoint: "mock://", Podname: podname}) 30 | s.NoError(err) 31 | err = s.rediaron.RemovePod(ctx, podname) 32 | s.Error(err) 33 | err = s.rediaron.RemoveNode(ctx, &types.Node{NodeMeta: types.NodeMeta{Podname: podname, Name: "test", Endpoint: "mock://"}}) 34 | s.NoError(err) 35 | err = s.rediaron.RemovePod(ctx, podname) 36 | s.NoError(err) 37 | } 38 | -------------------------------------------------------------------------------- /resource/cobalt/call.go: -------------------------------------------------------------------------------- 1 | package cobalt 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/cockroachdb/errors" 8 | "github.com/projecteru2/core/log" 9 | "github.com/projecteru2/core/resource/plugins" 10 | ) 11 | 12 | func call[T any](ctx context.Context, ps []plugins.Plugin, f func(plugins.Plugin) (T, error)) (map[plugins.Plugin]T, error) { 13 | var wg sync.WaitGroup 14 | var combinedErr error 15 | var results sync.Map 16 | for _, p := range ps { 17 | wg.Add(1) 18 | go func(p plugins.Plugin) { 19 | defer wg.Done() 20 | 21 | result, err := f(p) 22 | if err != nil { 23 | log.WithFunc("resource.cobalt.call").Errorf(ctx, err, "failed to call plugin %+v", p.Name()) 24 | results.Store(p, err) 25 | return 26 | } 27 | results.Store(p, result) 28 | }(p) 29 | } 30 | wg.Wait() 31 | ans := make(map[plugins.Plugin]T) 32 | for _, p := range ps { 33 | value, _ := results.Load(p) 34 | switch vt := value.(type) { 35 | case error: 36 | combinedErr = errors.CombineErrors(combinedErr, vt) 37 | case T: 38 | ans[p] = vt 39 | } 40 | } 41 | return ans, combinedErr 42 | } 43 | -------------------------------------------------------------------------------- /resource/plugins/cpumem/cpumem.go: -------------------------------------------------------------------------------- 1 | package cpumem 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/projecteru2/core/log" 8 | "github.com/projecteru2/core/store/etcdv3/meta" 9 | coretypes "github.com/projecteru2/core/types" 10 | ) 11 | 12 | const ( 13 | name = "cpumem" 14 | rate = 8 15 | nodeResourceInfoKey = "/resource/cpumem/%s" 16 | priority = 100 17 | ) 18 | 19 | // Plugin 20 | type Plugin struct { 21 | name string 22 | config coretypes.Config 23 | store meta.KV 24 | } 25 | 26 | // NewPlugin . 27 | func NewPlugin(ctx context.Context, config coretypes.Config, t *testing.T) (*Plugin, error) { 28 | if t == nil && len(config.Etcd.Machines) < 1 { 29 | return nil, coretypes.ErrConfigInvaild 30 | } 31 | var err error 32 | plugin := &Plugin{name: name, config: config} 33 | if plugin.store, err = meta.NewETCD(config.Etcd, t); err != nil { 34 | log.WithFunc("resource.cpumem.NewPlugin").Error(ctx, err) 35 | return nil, err 36 | } 37 | return plugin, nil 38 | } 39 | 40 | // Name . 41 | func (p Plugin) Name() string { 42 | return p.name 43 | } 44 | -------------------------------------------------------------------------------- /store/redis/deploy_test.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | "path/filepath" 6 | 7 | "github.com/projecteru2/core/types" 8 | ) 9 | 10 | func (s *RediaronTestSuite) TestDeploy() { 11 | ctx := context.Background() 12 | opts := &types.DeployOptions{ 13 | Name: "app", 14 | Entrypoint: &types.Entrypoint{Name: "entry"}, 15 | ProcessIdent: "abc", 16 | NodeFilter: &types.NodeFilter{}, 17 | } 18 | 19 | // no workload deployed 20 | nodeCount, err := s.rediaron.GetDeployStatus(ctx, opts.Name, opts.Entrypoint.Name) 21 | s.NoError(err) 22 | s.Equal(len(nodeCount), 0) 23 | // have workloads 24 | key := filepath.Join(workloadDeployPrefix, opts.Name, opts.Entrypoint.Name, "node", "id1") 25 | _, err = s.rediaron.cli.Set(ctx, key, "", 0).Result() 26 | s.NoError(err) 27 | key = filepath.Join(workloadDeployPrefix, opts.Name, opts.Entrypoint.Name, "node", "id2") 28 | s.NoError(err) 29 | _, err = s.rediaron.cli.Set(ctx, key, "", 0).Result() 30 | s.NoError(err) 31 | nodeCount, err = s.rediaron.GetDeployStatus(ctx, opts.Name, opts.Entrypoint.Name) 32 | s.NoError(err) 33 | s.Equal(nodeCount["node"], 2) 34 | } 35 | -------------------------------------------------------------------------------- /wal/kv/mocked_test.go: -------------------------------------------------------------------------------- 1 | package kv 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestMockedKV(t *testing.T) { 11 | m := NewMockedKV() 12 | require.NoError(t, m.Open("/tmp/wal", 0777, time.Second)) 13 | 14 | a := []byte("/a") 15 | b := []byte("/b") 16 | expValue := []byte("v") 17 | require.NoError(t, m.Put(a, expValue)) 18 | require.NoError(t, m.Put(b, expValue)) 19 | require.NoError(t, m.Put([]byte("out-of-scan"), expValue)) 20 | 21 | ch, abort := m.Scan([]byte("/")) 22 | elem := <-ch 23 | ent, ok := elem.(MockedScanEntry) 24 | require.True(t, ok) 25 | require.NoError(t, ent.Err) 26 | abort() 27 | 28 | _, abort = m.Scan([]byte("/")) 29 | abort() 30 | 31 | realValue, err := m.Get(a) 32 | require.NoError(t, err) 33 | require.Equal(t, expValue, realValue) 34 | 35 | realValue, err = m.Get(b) 36 | require.NoError(t, err) 37 | require.Equal(t, expValue, realValue) 38 | 39 | require.NoError(t, m.Delete(b)) 40 | realValue, err = m.Get(b) 41 | require.Error(t, err) 42 | 43 | require.NoError(t, m.Close()) 44 | 45 | realValue, err = m.Get(a) 46 | require.Error(t, err) 47 | } 48 | -------------------------------------------------------------------------------- /wal/event.go: -------------------------------------------------------------------------------- 1 | package wal 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "path/filepath" 7 | "strconv" 8 | "strings" 9 | ) 10 | 11 | // HydroEvent indicates a log event. 12 | type HydroEvent struct { 13 | // A global unique identifier. 14 | ID uint64 `json:"ID"` 15 | 16 | // Registered event type name. 17 | Type string `json:"type"` 18 | 19 | // The encoded log item. 20 | Item []byte `json:"item"` 21 | } 22 | 23 | // NewHydroEvent initializes a new HydroEvent instance. 24 | func NewHydroEvent(ID uint64, typ string, item []byte) *HydroEvent { 25 | return &HydroEvent{ID: ID, Type: typ, Item: item} 26 | } 27 | 28 | // Encode this event 29 | func (e HydroEvent) Encode() ([]byte, error) { 30 | return json.MarshalIndent(e, "", "\t") 31 | } 32 | 33 | // Key returns this event's key path. 34 | func (e HydroEvent) Key() []byte { 35 | return []byte(filepath.Join(eventPrefix, fmt.Sprintf("%016x", e.ID))) 36 | } 37 | 38 | func parseHydroEventID(key []byte) (uint64, error) { 39 | // Trims the EventPrefix, then trims the padding 0. 40 | ID := strings.TrimLeft(strings.TrimPrefix(string(key), eventPrefix), "0") 41 | return strconv.ParseUint(ID, 16, 64) 42 | } 43 | -------------------------------------------------------------------------------- /wal/kv/kv.go: -------------------------------------------------------------------------------- 1 | package kv 2 | 3 | import ( 4 | "os" 5 | "time" 6 | ) 7 | 8 | // KV is the interface that groups the Simpler and Scanner interfaces. 9 | type KV interface { 10 | OpenCloser 11 | Simpler 12 | Scanner 13 | Sequencer 14 | } 15 | 16 | // Simpler is the interface that groups the basic Put, Get and Delete methods. 17 | type Simpler interface { 18 | Put([]byte, []byte) error 19 | Get([]byte) ([]byte, error) 20 | Delete([]byte) error 21 | } 22 | 23 | // Scanner is the interface that wraps the basic Scan method. 24 | type Scanner interface { 25 | Scan([]byte) (<-chan ScanEntry, func()) 26 | } 27 | 28 | // Sequencer is the interface that wraps the basic NextSequence method. 29 | type Sequencer interface { 30 | NextSequence() (ID uint64, err error) 31 | } 32 | 33 | // OpenCloser is the interface that groups the basic Open and Close methods. 34 | type OpenCloser interface { 35 | Open(path string, mode os.FileMode, timeout time.Duration) error 36 | Close() error 37 | } 38 | 39 | // ScanEntry is the interface that groups the basic Pair and Error methods. 40 | type ScanEntry interface { 41 | Pair() (key []byte, value []byte) 42 | Error() error 43 | } 44 | -------------------------------------------------------------------------------- /engine/docker/helper_test.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "io/ioutil" 7 | "os" 8 | "strings" 9 | "testing" 10 | 11 | "github.com/stretchr/testify/assert" 12 | 13 | coreutils "github.com/projecteru2/core/utils" 14 | ) 15 | 16 | func TestCreateTarStream(t *testing.T) { 17 | buff := bytes.NewBufferString("test") 18 | rc := ioutil.NopCloser(buff) 19 | fname, err := coreutils.TempFile(rc) 20 | assert.NoError(t, err) 21 | _, err = CreateTarStream(fname) 22 | assert.NoError(t, err) 23 | } 24 | 25 | func TestWithDumpFiles(t *testing.T) { 26 | data := map[string][]byte{ 27 | "/tmp/test-1": []byte("1"), 28 | "/tmp/test-2": []byte("2"), 29 | } 30 | fp := []string{} 31 | for target, content := range data { 32 | withTarfileDump(context.Background(), target, content, 0, 0, int64(0), func(target, tarfile string) error { 33 | assert.True(t, strings.HasPrefix(target, "/tmp/test")) 34 | fp = append(fp, tarfile) 35 | _, err := os.Stat(tarfile) 36 | assert.Nil(t, err) 37 | return nil 38 | }) 39 | } 40 | for _, path := range fp { 41 | _, err := os.Stat(path) 42 | assert.True(t, os.IsNotExist(err)) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /engine/systemd/systemd.go: -------------------------------------------------------------------------------- 1 | package systemd 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/projecteru2/core/engine" 7 | "github.com/projecteru2/core/engine/docker" 8 | enginetypes "github.com/projecteru2/core/engine/types" 9 | coretypes "github.com/projecteru2/core/types" 10 | ) 11 | 12 | // TCPPrefix is engine endpoint prefix 13 | const TCPPrefix = "systemd://" 14 | 15 | // Engine is engine for systemd 16 | type Engine struct { 17 | engine.API 18 | config coretypes.Config 19 | ep *enginetypes.Params 20 | } 21 | 22 | // MakeClient make systemd cli 23 | func MakeClient(ctx context.Context, config coretypes.Config, nodename, endpoint, ca, cert, key string) (engine.API, error) { 24 | api, err := docker.MakeClient(ctx, config, nodename, endpoint, ca, cert, key) 25 | if err != nil { 26 | return nil, err 27 | } 28 | ep := &enginetypes.Params{ 29 | Nodename: nodename, 30 | Endpoint: endpoint, 31 | CA: ca, 32 | Cert: cert, 33 | Key: key, 34 | } 35 | return &Engine{ 36 | API: api, 37 | config: config, 38 | ep: ep, 39 | }, nil 40 | } 41 | 42 | func (e *Engine) GetParams() *enginetypes.Params { 43 | return e.ep 44 | } 45 | -------------------------------------------------------------------------------- /engine/docker/container_test.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestRawArgs(t *testing.T) { 10 | assert := assert.New(t) 11 | 12 | r1, err := loadRawArgs([]byte(``)) 13 | assert.NoError(err) 14 | assert.NotEqual(r1.StorageOpt, nil) 15 | assert.Equal(len(r1.StorageOpt), 0) 16 | assert.NotEqual(r1.CapAdd, nil) 17 | assert.Equal(len(r1.CapAdd), 0) 18 | assert.NotEqual(r1.CapDrop, nil) 19 | assert.Equal(len(r1.CapDrop), 0) 20 | assert.NotEqual(r1.Ulimits, nil) 21 | assert.Equal(len(r1.Ulimits), 0) 22 | 23 | r2, err := loadRawArgs([]byte(`{"storage_opt": null, "cap_add": null, "cap_drop": null, "ulimits": null}`)) 24 | assert.NoError(err) 25 | assert.NotEqual(r2.StorageOpt, nil) 26 | assert.Equal(len(r2.StorageOpt), 0) 27 | assert.NotEqual(r2.CapAdd, nil) 28 | assert.Equal(len(r2.CapAdd), 0) 29 | assert.NotEqual(r2.CapDrop, nil) 30 | assert.Equal(len(r2.CapDrop), 0) 31 | assert.NotEqual(r2.Ulimits, nil) 32 | assert.Equal(len(r2.Ulimits), 0) 33 | 34 | _, err = loadRawArgs([]byte(`{"storage_opt": null, "cap_add": null, "cap_drop": null, "ulimits"}`)) 35 | assert.Error(err) 36 | } 37 | -------------------------------------------------------------------------------- /metrics/handler.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | 7 | "github.com/projecteru2/core/cluster" 8 | "github.com/projecteru2/core/log" 9 | "github.com/projecteru2/core/types" 10 | ) 11 | 12 | // ResourceMiddleware to make sure update resource correct 13 | func (m *Metrics) ResourceMiddleware(cluster cluster.Cluster) func(http.Handler) http.Handler { 14 | logger := log.WithFunc("metrics.ResourceMiddleware") 15 | return func(h http.Handler) http.Handler { 16 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 17 | ctx, cancel := context.WithTimeout(r.Context(), m.Config.GlobalTimeout) 18 | defer cancel() 19 | nodes, err := cluster.ListPodNodes(ctx, &types.ListNodesOptions{All: true}) 20 | if err != nil { 21 | logger.Error(ctx, err, "Get all nodes err") 22 | } 23 | for node := range nodes { 24 | m.SendPodNodeStatus(ctx, node) 25 | metrics, err := m.rmgr.GetNodeMetrics(ctx, node) 26 | if err != nil { 27 | logger.Error(ctx, err, "Get metrics failed") 28 | continue 29 | } 30 | m.SendMetrics(ctx, metrics...) 31 | } 32 | 33 | h.ServeHTTP(w, r) 34 | }) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2010-2017 Google, Inc. http://angularjs.org 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /store/etcdv3/deploy_test.go: -------------------------------------------------------------------------------- 1 | package etcdv3 2 | 3 | import ( 4 | "context" 5 | "path/filepath" 6 | "testing" 7 | 8 | "github.com/projecteru2/core/types" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestDeploy(t *testing.T) { 14 | m := NewMercury(t) 15 | ctx := context.Background() 16 | opts := &types.DeployOptions{ 17 | Name: "app", 18 | Entrypoint: &types.Entrypoint{Name: "entry"}, 19 | ProcessIdent: "abc", 20 | NodeFilter: &types.NodeFilter{}, 21 | } 22 | 23 | // no workload deployed 24 | nodeCount, err := m.GetDeployStatus(ctx, opts.Name, opts.Entrypoint.Name) 25 | assert.NoError(t, err) 26 | assert.Equal(t, len(nodeCount), 0) 27 | // have workloads 28 | key := filepath.Join(workloadDeployPrefix, opts.Name, opts.Entrypoint.Name, "node", "id1") 29 | _, err = m.Put(ctx, key, "") 30 | assert.NoError(t, err) 31 | key = filepath.Join(workloadDeployPrefix, opts.Name, opts.Entrypoint.Name, "node", "id2") 32 | _, err = m.Put(ctx, key, "") 33 | assert.NoError(t, err) 34 | nodeCount, err = m.GetDeployStatus(ctx, opts.Name, opts.Entrypoint.Name) 35 | assert.NoError(t, err) 36 | assert.Equal(t, nodeCount["node"], 2) 37 | } 38 | -------------------------------------------------------------------------------- /cluster/calcium/metrics.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/sanity-io/litter" 7 | 8 | "github.com/projecteru2/core/log" 9 | "github.com/projecteru2/core/metrics" 10 | "github.com/projecteru2/core/types" 11 | ) 12 | 13 | // InitMetrics . 14 | func (c *Calcium) InitMetrics(ctx context.Context) { 15 | logger := log.WithFunc("calcium.InitMetrics") 16 | metricsDescriptions, err := c.rmgr.GetMetricsDescription(ctx) 17 | if err != nil { 18 | logger.Error(ctx, err, "failed to get metrics description") 19 | return 20 | } 21 | if err = metrics.InitMetrics(ctx, c.config, metricsDescriptions); err != nil { 22 | logger.Error(ctx, err, "failed to init metrics") 23 | return 24 | } 25 | logger.Infof(ctx, "init metrics success \n%+v", litter.Sdump(metricsDescriptions)) 26 | } 27 | 28 | func (c *Calcium) doSendNodeMetrics(ctx context.Context, node *types.Node) { 29 | nodeMetrics, err := c.rmgr.GetNodeMetrics(ctx, node) 30 | if err != nil { 31 | log.WithFunc("calcium.doSendNodeMetrics").Errorf(ctx, err, "convert node %s resource info to metrics failed", node.Name) 32 | return 33 | } 34 | metrics.Client.SendMetrics(ctx, nodeMetrics...) 35 | } 36 | -------------------------------------------------------------------------------- /strategy/drained.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "context" 5 | "sort" 6 | 7 | "github.com/projecteru2/core/types" 8 | 9 | "github.com/cockroachdb/errors" 10 | ) 11 | 12 | // DrainedPlan 优先往Capacity最小的节点部署,尽可能把节点的资源榨干在部署下一台. 13 | func DrainedPlan(_ context.Context, infos []Info, need, total, _ int) (map[string]int, error) { 14 | if total < need { 15 | return nil, errors.Wrapf(types.ErrInsufficientResource, "need: %d, available: %d", need, total) 16 | } 17 | 18 | deploy := map[string]int{} 19 | 20 | infosCopy := make([]Info, len(infos)) 21 | copy(infosCopy, infos) 22 | sort.Slice(infosCopy, func(i, j int) bool { 23 | if infosCopy[i].Capacity < infosCopy[j].Capacity { 24 | return true 25 | } 26 | return infosCopy[i].Usage > infosCopy[j].Usage 27 | }) 28 | 29 | for idx := 0; idx < len(infosCopy); idx++ { 30 | info := &infosCopy[idx] 31 | if need < info.Capacity { 32 | deploy[info.Nodename] = need 33 | need = 0 34 | } else { 35 | deploy[info.Nodename] = info.Capacity 36 | need -= info.Capacity 37 | } 38 | if need == 0 { 39 | return deploy, nil 40 | } 41 | } 42 | return nil, errors.Wrapf(types.ErrInsufficientResource, "BUG: never reach here") 43 | } 44 | -------------------------------------------------------------------------------- /store/etcdv3/pod_test.go: -------------------------------------------------------------------------------- 1 | package etcdv3 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/projecteru2/core/types" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestPod(t *testing.T) { 13 | m := NewMercury(t) 14 | ctx := context.Background() 15 | podname := "testv3" 16 | 17 | pod, err := m.AddPod(ctx, podname, "CPU") 18 | assert.NoError(t, err) 19 | assert.Equal(t, pod.Name, podname) 20 | 21 | _, err = m.AddPod(ctx, podname, "CPU") 22 | assert.Equal(t, err, types.ErrKeyExists) 23 | 24 | pod2, err := m.GetPod(ctx, podname) 25 | assert.NoError(t, err) 26 | assert.Equal(t, pod2.Name, podname) 27 | 28 | pods, err := m.GetAllPods(ctx) 29 | assert.NoError(t, err) 30 | assert.Equal(t, len(pods), 1) 31 | assert.Equal(t, pods[0].Name, podname) 32 | 33 | _, err = m.AddNode(ctx, &types.AddNodeOptions{Nodename: "test", Endpoint: "mock://", Podname: podname}) 34 | assert.NoError(t, err) 35 | err = m.RemovePod(ctx, podname) 36 | assert.Error(t, err) 37 | err = m.RemoveNode(ctx, &types.Node{NodeMeta: types.NodeMeta{Podname: podname, Name: "test", Endpoint: "mock://"}}) 38 | assert.NoError(t, err) 39 | err = m.RemovePod(ctx, podname) 40 | assert.NoError(t, err) 41 | } 42 | -------------------------------------------------------------------------------- /wal/wal.go: -------------------------------------------------------------------------------- 1 | package wal 2 | 3 | import ( 4 | "context" 5 | ) 6 | 7 | const ( 8 | eventPrefix = "/events/" 9 | ) 10 | 11 | // WAL is the interface that groups the Register and Recover interfaces. 12 | type WAL interface { 13 | Registry 14 | Recoverer 15 | Logger 16 | Closer 17 | } 18 | 19 | // Recoverer is the interface that wraps the basic Recover method. 20 | type Recoverer interface { 21 | Recover(context.Context) 22 | } 23 | 24 | // Registry is the interface that wraps the basic Register method. 25 | type Registry interface { 26 | Register(EventHandler) 27 | } 28 | 29 | // Logger is the interface that wraps the basic Log method. 30 | type Logger interface { 31 | Log(string, any) (Commit, error) 32 | } 33 | 34 | // Closer is the interface that groups the Close methods. 35 | type Closer interface { 36 | Close() error 37 | } 38 | 39 | // EventHandler is the interface that groups a few methods. 40 | type EventHandler interface { 41 | Typ() string 42 | Check(context.Context, any) (need bool, err error) 43 | Encode(any) ([]byte, error) 44 | Decode([]byte) (any, error) 45 | Handle(context.Context, any) error 46 | } 47 | 48 | // Commit is a function for committing an event log 49 | type Commit func() error 50 | -------------------------------------------------------------------------------- /store/redis/helper.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | ) 7 | 8 | // extracts node name from key 9 | // /nodestatus/nodename -> nodename 10 | func extractNodename(s string) string { 11 | ps := strings.Split(s, "/") 12 | return ps[len(ps)-1] 13 | } 14 | 15 | func parseStatusKey(key string) (string, string, string, string) { 16 | parts := strings.Split(key, "/") 17 | l := len(parts) 18 | return parts[l-4], parts[l-3], parts[l-2], parts[l-1] 19 | } 20 | 21 | // getByKeyPattern gets key-value pairs that key matches pattern 22 | func (r *Rediaron) getByKeyPattern(ctx context.Context, pattern string, limit int64) (map[string]string, error) { 23 | var ( 24 | cursor uint64 25 | result []string 26 | err error 27 | count int64 28 | keys = []string{} 29 | ) 30 | for { 31 | result, cursor, err = r.cli.Scan(ctx, cursor, pattern, 0).Result() 32 | if err != nil { 33 | return nil, err 34 | } 35 | 36 | keys = append(keys, result...) 37 | count += int64(len(result)) 38 | if cursor == 0 || (limit > 0 && count >= limit) { 39 | break 40 | } 41 | } 42 | if limit > 0 && int64(len(keys)) >= limit { 43 | keys = keys[:limit] 44 | } 45 | return r.GetMulti(ctx, keys) 46 | } 47 | -------------------------------------------------------------------------------- /source/common/helper.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "archive/zip" 5 | "bytes" 6 | "io" 7 | "os" 8 | "path/filepath" 9 | ) 10 | 11 | // unzipFile unzip a file(from resp.Body) to the spec path 12 | func unzipFile(body io.Reader, path string) error { 13 | content, err := io.ReadAll(body) 14 | if err != nil { 15 | return err 16 | } 17 | 18 | reader, err := zip.NewReader(bytes.NewReader(content), int64(len(content))) 19 | if err != nil { 20 | return err 21 | } 22 | 23 | // extract files from zipfile 24 | for _, f := range reader.File { 25 | zipped, err := f.Open() 26 | if err != nil { 27 | return err 28 | } 29 | 30 | defer zipped.Close() 31 | 32 | // G305: File traversal when extracting zip archive 33 | p := filepath.Join(path, f.Name) //nolint 34 | 35 | if f.FileInfo().IsDir() { 36 | _ = os.MkdirAll(p, f.Mode()) 37 | continue 38 | } 39 | 40 | writer, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE, f.Mode()) 41 | if err != nil { 42 | return err 43 | } 44 | 45 | defer writer.Close() 46 | if _, err = io.Copy(writer, zipped); err != nil { //nolint 47 | // G110: Potential DoS vulnerability via decompression bomb 48 | return err 49 | } 50 | } 51 | return nil 52 | } 53 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | run: 2 | timeout: 5m 3 | tests: false 4 | skip-dirs: 5 | - vendor 6 | - tools 7 | - 3rdmocks 8 | modules-download-mode: readonly 9 | go: 1.20 10 | 11 | linters-settings: 12 | maligned: 13 | suggest-new: true 14 | gocritic: 15 | disabled-checks: 16 | - captLocal 17 | 18 | linters: 19 | disable-all: true 20 | enable: 21 | - bodyclose 22 | # - depguard 23 | - dogsled 24 | - gochecknoinits 25 | - goconst 26 | - gocyclo 27 | - gofmt 28 | - goimports 29 | - revive 30 | - goprintffuncname 31 | - gosec 32 | - gosimple 33 | - govet 34 | - ineffassign 35 | - misspell 36 | - nakedret 37 | - rowserrcheck 38 | - exportloopref 39 | - staticcheck 40 | - typecheck 41 | - unconvert 42 | - unparam 43 | - unused 44 | - asciicheck 45 | - nestif 46 | - errcheck 47 | - gocritic 48 | #Consider this 49 | # - godox 50 | # - funlen 51 | # - lll 52 | # - gochecknoglobals 53 | # don't enable: 54 | # - whitespace 55 | # - goerr113 56 | # - godot 57 | # - maligned 58 | # - prealloc 59 | # - testpackage 60 | # - wsl 61 | # - sylecheck 62 | -------------------------------------------------------------------------------- /utils/file.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "io/fs" 5 | "path/filepath" 6 | ) 7 | 8 | const executablePerm = 0111 9 | 10 | // ListAllExecutableFiles returns all the executable files in the given path 11 | func ListAllExecutableFiles(basedir string) ([]string, error) { 12 | files := []string{} 13 | err := filepath.Walk(basedir, func(path string, info fs.FileInfo, err error) error { 14 | if err != nil { 15 | return err 16 | } 17 | if info.IsDir() && path != basedir { 18 | return filepath.SkipDir 19 | } 20 | if !info.IsDir() && isExecutable(info.Mode().Perm()) { 21 | files = append(files, path) 22 | } 23 | return nil 24 | }) 25 | 26 | return files, err 27 | } 28 | 29 | func ListAllSharedLibFiles(basedir string) ([]string, error) { 30 | files := []string{} 31 | err := filepath.Walk(basedir, func(path string, info fs.FileInfo, err error) error { 32 | if err != nil { 33 | return err 34 | } 35 | if info.IsDir() && path != basedir { 36 | return filepath.SkipDir 37 | } 38 | if !info.IsDir() && filepath.Ext(path) == ".so" { 39 | files = append(files, path) 40 | } 41 | return nil 42 | }) 43 | 44 | return files, err 45 | } 46 | 47 | func isExecutable(perm fs.FileMode) bool { 48 | return perm&executablePerm == executablePerm 49 | } 50 | -------------------------------------------------------------------------------- /engine/docker/tarfile.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "archive/tar" 5 | "context" 6 | "os" 7 | "path/filepath" 8 | 9 | "github.com/projecteru2/core/log" 10 | ) 11 | 12 | func withTarfileDump(ctx context.Context, target string, content []byte, uid, gid int, mode int64, f func(target, tarfile string) error) error { 13 | tarfile, err := tempTarFile(target, content, uid, gid, mode) 14 | 15 | defer func(tarfile string) { 16 | if err := os.RemoveAll(tarfile); err != nil { 17 | log.WithFunc("engine.docker.withTarfileDump").Warnf(ctx, "clean dump files failed: %+v", err) 18 | } 19 | }(tarfile) 20 | 21 | if err != nil { 22 | return err 23 | } 24 | return f(target, tarfile) 25 | } 26 | 27 | func tempTarFile(path string, data []byte, uid, gid int, mode int64) (string, error) { 28 | filename := filepath.Base(path) 29 | f, err := os.CreateTemp(os.TempDir(), filename) 30 | if err != nil { 31 | return "", err 32 | } 33 | name := f.Name() 34 | defer f.Close() 35 | 36 | tw := tar.NewWriter(f) 37 | defer tw.Close() 38 | hdr := &tar.Header{ 39 | Name: filename, 40 | Size: int64(len(data)), 41 | Mode: mode, 42 | Uid: uid, 43 | Gid: gid, 44 | } 45 | if err := tw.WriteHeader(hdr); err != nil { 46 | return name, err 47 | } 48 | _, err = tw.Write(data) 49 | return name, err 50 | } 51 | -------------------------------------------------------------------------------- /resource/plugins/binary/call.go: -------------------------------------------------------------------------------- 1 | package binary 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "os/exec" 8 | 9 | "github.com/projecteru2/core/log" 10 | ) 11 | 12 | // calls the plugin and gets json response 13 | func (p Plugin) call(ctx context.Context, cmd string, req any, resp any) error { 14 | ctx, cancel := context.WithTimeout(ctx, p.config.ResourcePlugin.CallTimeout) 15 | defer cancel() 16 | logger := log.WithFunc("resource.binary.call") 17 | 18 | command := exec.CommandContext(ctx, p.path, cmd) // nolint 19 | command.Dir = p.config.ResourcePlugin.Dir 20 | 21 | out, err := p.execCommand(ctx, command, req) 22 | if err != nil { 23 | logger.Error(ctx, err, string(out)) 24 | return err 25 | } 26 | 27 | if len(out) == 0 { 28 | return nil 29 | } 30 | return json.Unmarshal(out, resp) 31 | } 32 | 33 | func (p Plugin) execCommand(ctx context.Context, cmd *exec.Cmd, req any) ([]byte, error) { 34 | logger := log.WithFunc("resource.binary.execCommand") 35 | b, err := json.Marshal(req) 36 | if err != nil { 37 | return nil, err 38 | } 39 | if len(cmd.Args) < 2 || cmd.Args[1] != GetMetricsCommand { 40 | logger.WithField("in", string(b)).WithField("cmd", cmd.String()).Info(ctx, "call params") 41 | } 42 | cmd.Stdin = bytes.NewBuffer(b) 43 | return cmd.CombinedOutput() 44 | } 45 | -------------------------------------------------------------------------------- /utils/transaction_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "os" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestTxn(t *testing.T) { 14 | err1 := errors.New("err1") 15 | err := Txn( 16 | context.Background(), 17 | func(context.Context) error { 18 | return err1 19 | }, 20 | nil, 21 | func(context.Context, bool) error { 22 | return errors.New("error 2") 23 | }, 24 | 10*time.Second, 25 | ) 26 | assert.Contains(t, err.Error(), err1.Error()) 27 | err = Txn( 28 | context.Background(), 29 | func(context.Context) error { 30 | return nil 31 | }, 32 | func(context.Context) error { 33 | return err1 34 | }, 35 | nil, 36 | 10*time.Second, 37 | ) 38 | assert.Contains(t, err.Error(), err1.Error()) 39 | err = Txn( 40 | context.Background(), 41 | func(context.Context) error { 42 | return nil 43 | }, 44 | nil, 45 | nil, 46 | 10*time.Second, 47 | ) 48 | assert.NoError(t, err) 49 | } 50 | 51 | func TestPCR(t *testing.T) { 52 | prepare := func(context.Context) error { 53 | return os.ErrClosed 54 | } 55 | commit := func(context.Context) error { 56 | return os.ErrClosed 57 | } 58 | 59 | ctx := context.Background() 60 | assert.Error(t, PCR(ctx, prepare, commit, commit, time.Second)) 61 | } 62 | -------------------------------------------------------------------------------- /resource/plugins/binary/types/calculate.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | plugintypes "github.com/projecteru2/core/resource/plugins/types" 5 | ) 6 | 7 | // CalculateDeployRequest . 8 | type CalculateDeployRequest struct { 9 | Nodename string `json:"nodename" mapstructure:"nodename"` 10 | DeployCount int `json:"deploy_count" mapstructure:"deploy_count"` 11 | WorkloadResourceRequest plugintypes.WorkloadResourceRequest `json:"workload_resource_request" mapstructure:"workload_resource_request"` 12 | } 13 | 14 | // CalculateReallocRequest . 15 | type CalculateReallocRequest struct { 16 | Nodename string `json:"nodename" mapstructure:"nodename"` 17 | WorkloadResource plugintypes.WorkloadResource `json:"workload_resource" mapstructure:"workload_resource"` 18 | WorkloadResourceRequest plugintypes.WorkloadResourceRequest `json:"workload_resource_request" mapstructure:"workload_resource_request"` 19 | } 20 | 21 | // CalculateRemapRequest . 22 | type CalculateRemapRequest struct { 23 | Nodename string `json:"nodename" mapstructure:"nodename"` 24 | WorkloadsResource map[string]plugintypes.WorkloadResource `json:"workloads_resource" mapstructure:"workloads_resource"` 25 | } 26 | -------------------------------------------------------------------------------- /cluster/calcium/log.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | 7 | enginetypes "github.com/projecteru2/core/engine/types" 8 | "github.com/projecteru2/core/log" 9 | "github.com/projecteru2/core/types" 10 | ) 11 | 12 | // LogStream log stream for one workload 13 | func (c *Calcium) LogStream(ctx context.Context, opts *types.LogStreamOptions) (chan *types.LogStreamMessage, error) { 14 | logger := log.WithFunc("calcium.LogStream").WithField("opts", opts) 15 | ch := make(chan *types.LogStreamMessage) 16 | _ = c.pool.Invoke(func() { 17 | defer close(ch) 18 | workload, err := c.GetWorkload(ctx, opts.ID) 19 | if err != nil { 20 | logger.Error(ctx, err) 21 | ch <- &types.LogStreamMessage{ID: opts.ID, Error: err} 22 | return 23 | } 24 | 25 | stdout, stderr, err := workload.Engine.VirtualizationLogs(ctx, &enginetypes.VirtualizationLogStreamOptions{ 26 | ID: opts.ID, 27 | Tail: opts.Tail, 28 | Since: opts.Since, 29 | Until: opts.Until, 30 | Follow: opts.Follow, 31 | Stdout: true, 32 | Stderr: true, 33 | }) 34 | logger.Error(ctx, err) 35 | if err != nil { 36 | ch <- &types.LogStreamMessage{ID: opts.ID, Error: err} 37 | return 38 | } 39 | 40 | for m := range c.processStdStream(ctx, stdout, stderr, bufio.ScanLines, byte('\n')) { 41 | ch <- &types.LogStreamMessage{ID: opts.ID, Data: m.Data, StdStreamType: m.StdStreamType} 42 | } 43 | }) 44 | 45 | return ch, nil 46 | } 47 | -------------------------------------------------------------------------------- /store/etcdv3/service_test.go: -------------------------------------------------------------------------------- 1 | package etcdv3 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sort" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestRegisterServiceWithDeregister(t *testing.T) { 14 | m := NewMercury(t) 15 | 16 | ctx := context.Background() 17 | svc := "svc" 18 | path := fmt.Sprintf(serviceStatusKey, svc) 19 | _, deregister, err := m.RegisterService(ctx, svc, time.Minute) 20 | assert.NoError(t, err) 21 | 22 | kv, err := m.GetOne(ctx, path) 23 | assert.NoError(t, err) 24 | assert.Equal(t, path, string(kv.Key)) 25 | 26 | deregister() 27 | //time.Sleep(time.Second) 28 | kv, err = m.GetOne(ctx, path) 29 | assert.Error(t, err) 30 | assert.Nil(t, kv) 31 | } 32 | 33 | func TestServiceStatusStream(t *testing.T) { 34 | m := NewMercury(t) 35 | ctx, cancel := context.WithCancel(context.Background()) 36 | defer cancel() 37 | _, unregisterService1, err := m.RegisterService(ctx, "127.0.0.1:5001", time.Second) 38 | assert.NoError(t, err) 39 | ch, err := m.ServiceStatusStream(ctx) 40 | assert.NoError(t, err) 41 | assert.Equal(t, <-ch, []string{"127.0.0.1:5001"}) 42 | _, _, err = m.RegisterService(ctx, "127.0.0.1:5002", time.Second) 43 | assert.NoError(t, err) 44 | endpoints := <-ch 45 | sort.Strings(endpoints) 46 | assert.Equal(t, endpoints, []string{"127.0.0.1:5001", "127.0.0.1:5002"}) 47 | unregisterService1() 48 | assert.Equal(t, <-ch, []string{"127.0.0.1:5002"}) 49 | } 50 | -------------------------------------------------------------------------------- /rpc/counter.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "github.com/projecteru2/core/log" 5 | "github.com/projecteru2/core/types" 6 | "github.com/projecteru2/core/utils" 7 | 8 | "golang.org/x/net/context" 9 | ) 10 | 11 | type task struct { 12 | v *Vibranium 13 | name string 14 | verbose bool 15 | context context.Context 16 | cancel context.CancelFunc 17 | } 18 | 19 | // gRPC上全局的计数器 20 | // 只有在任务数为0的时候才给停止 21 | // 为啥会加在gRPC server上呢? 22 | // 因为一个入口给一个最简单了... 23 | 24 | // 增加一个任务, 在任务调用之前要调用一次. 25 | // 否则任务不被追踪, 不保证任务能够正常完成. 26 | func (v *Vibranium) newTask(ctx context.Context, name string, verbose bool) *task { 27 | if ctx != nil { 28 | ctx = context.WithValue(ctx, types.TracingID, utils.RandomString(8)) 29 | } 30 | ctx, cancel := context.WithCancel(ctx) 31 | if verbose { 32 | log.WithFunc("vibranium.newTask").WithField("name", name).Debug(ctx, "task added") 33 | } 34 | v.counter.Add(1) 35 | v.TaskNum++ 36 | return &task{ 37 | v: v, 38 | name: name, 39 | verbose: verbose, 40 | context: ctx, 41 | cancel: cancel, 42 | } 43 | } 44 | 45 | // 完成一个任务, 在任务执行完之后调用一次. 46 | // 否则计数器用完不会为0, 你也别想退出这个进程了. 47 | func (t *task) done() { 48 | if t.verbose { 49 | log.WithFunc("vibranium.done").WithField("name", t.name).Debug(t.context, "task done") 50 | } 51 | t.v.counter.Done() 52 | t.v.TaskNum-- 53 | } 54 | 55 | // Wait for all tasks done 56 | // 会在外面graceful之后调用. 57 | // 不完成不给退出进程. 58 | func (v *Vibranium) Wait() { 59 | v.counter.Wait() 60 | } 61 | -------------------------------------------------------------------------------- /strategy/strategy.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/projecteru2/core/log" 7 | "github.com/projecteru2/core/types" 8 | ) 9 | 10 | const ( 11 | // Auto . 12 | Auto = "AUTO" 13 | // Fill . 14 | Fill = "FILL" 15 | // Each . 16 | Each = "EACH" 17 | // Global . 18 | Global = "GLOBAL" 19 | // Drained . 20 | Drained = "DRAINED" 21 | // Dummy for calculate capacity 22 | Dummy = "DUMMY" 23 | ) 24 | 25 | // Plans . 26 | var Plans = map[string]strategyFunc{ 27 | Auto: CommunismPlan, 28 | Fill: FillPlan, 29 | Each: AveragePlan, 30 | Global: GlobalPlan, 31 | Drained: DrainedPlan, 32 | } 33 | 34 | type strategyFunc = func(_ context.Context, _ []Info, need, total, limit int) (map[string]int, error) 35 | 36 | // Deploy . 37 | func Deploy(ctx context.Context, strategy string, count, nodesLimit int, strategyInfos []Info, total int) (map[string]int, error) { 38 | deployMethod, ok := Plans[strategy] 39 | if !ok { 40 | return nil, types.ErrInvaildDeployStrategy 41 | } 42 | if count <= 0 { 43 | return nil, types.ErrInvaildDeployCount 44 | } 45 | 46 | log.WithFunc("strategy.Deploy").Debugf(ctx, "strategy %s, infos %+v, need %d, total %d, limit %d", strategy, strategyInfos, count, total, nodesLimit) 47 | return deployMethod(ctx, strategyInfos, count, total, nodesLimit) 48 | } 49 | 50 | // Info . 51 | type Info struct { 52 | Nodename string 53 | 54 | Usage float64 55 | Rate float64 56 | 57 | Capacity int 58 | Count int 59 | } 60 | -------------------------------------------------------------------------------- /engine/types/image.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // BuildMethod . 4 | type BuildMethod int 5 | 6 | // Image contain image meta data 7 | type Image struct { 8 | ID string 9 | Tags []string 10 | } 11 | 12 | // BuildContentOptions . 13 | type BuildContentOptions struct { 14 | User string 15 | UID int 16 | *Builds 17 | } 18 | 19 | // BuildRefOptions . 20 | type BuildRefOptions struct { 21 | Name string 22 | Tags []string 23 | User string 24 | } 25 | 26 | // Builds define builds 27 | type Builds struct { 28 | Stages []string `yaml:"stages,omitempty,flow"` 29 | Builds map[string]*Build `yaml:"builds,omitempty,flow"` 30 | } 31 | 32 | // Build define build 33 | type Build struct { 34 | Base string `yaml:"base,omitempty"` 35 | Repo string `yaml:"repo,omitempty"` 36 | Version string `yaml:"version,omitempty"` 37 | Dir string `yaml:"dir,omitempty"` 38 | Submodule bool `yaml:"submodule,omitempty"` 39 | Security bool `yaml:"security,omitempty"` 40 | Commands []string `yaml:"commands,omitempty,flow"` 41 | Envs map[string]string `yaml:"envs,omitempty,flow"` 42 | Args map[string]string `yaml:"args,omitempty,flow"` 43 | Labels map[string]string `yaml:"labels,omitempty,flow"` 44 | Artifacts map[string]string `yaml:"artifacts,omitempty,flow"` 45 | Cache map[string]string `yaml:"cache,omitempty,flow"` 46 | StopSignal string `yaml:"stop_signal,omitempty,flow"` 47 | } 48 | -------------------------------------------------------------------------------- /strategy/average.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "context" 5 | "sort" 6 | 7 | "github.com/cockroachdb/errors" 8 | "github.com/projecteru2/core/log" 9 | "github.com/projecteru2/core/types" 10 | ) 11 | 12 | // AveragePlan deploy workload each node 13 | // 容量够的机器每一台部署 N 个 14 | // need 是每台机器所需总量,limit 是限制节点数, 保证本轮增量部署 need*limit 个实例 15 | // limit = 0 即对所有节点部署 16 | func AveragePlan(ctx context.Context, infos []Info, need, _, limit int) (map[string]int, error) { 17 | log.WithFunc("strategy.AveragePlan").Debugf(ctx, "need %d limit %d infos %+v", need, limit, infos) 18 | scheduleInfosLength := len(infos) 19 | if limit == 0 { 20 | limit = scheduleInfosLength 21 | } 22 | if scheduleInfosLength < limit { 23 | return nil, errors.Wrapf(types.ErrInsufficientResource, "node len %d < limit, cannot alloc an average node plan", scheduleInfosLength) 24 | } 25 | sort.Slice(infos, func(i, j int) bool { return infos[i].Capacity > infos[j].Capacity }) 26 | p := sort.Search(scheduleInfosLength, func(i int) bool { return infos[i].Capacity < need }) 27 | if p == 0 { 28 | return nil, errors.Wrap(types.ErrInsufficientCapacity, "insufficient nodes, at least 1 needed") 29 | } 30 | if p < limit { 31 | return nil, errors.Wrapf(types.ErrInsufficientResource, "not enough nodes with capacity of %d, require %d nodes", need, limit) 32 | } 33 | deployMap := map[string]int{} 34 | for _, strategyInfo := range infos[:limit] { 35 | deployMap[strategyInfo.Nodename] += need 36 | } 37 | 38 | return deployMap, nil 39 | } 40 | -------------------------------------------------------------------------------- /engine/systemd/virtualization.go: -------------------------------------------------------------------------------- 1 | package systemd 2 | 3 | import ( 4 | "context" 5 | "io" 6 | 7 | enginetypes "github.com/projecteru2/core/engine/types" 8 | resourcetypes "github.com/projecteru2/core/resource/types" 9 | "github.com/projecteru2/core/types" 10 | ) 11 | 12 | // VirtualizationLogs fetches service logs 13 | func (e *Engine) VirtualizationLogs(_ context.Context, _ *enginetypes.VirtualizationLogStreamOptions) (stdout io.ReadCloser, stderr io.ReadCloser, err error) { 14 | err = types.ErrEngineNotImplemented 15 | return 16 | } 17 | 18 | // VirtualizationAttach attaches a service's stdio 19 | func (e *Engine) VirtualizationAttach(_ context.Context, _ string, _, _ bool) (stdout, stderr io.ReadCloser, writer io.WriteCloser, err error) { 20 | err = types.ErrEngineNotImplemented 21 | return 22 | } 23 | 24 | // VirtualizationResize resizes a terminal window 25 | func (e *Engine) VirtualizationResize(_ context.Context, _ string, _, _ uint) (err error) { 26 | err = types.ErrEngineNotImplemented 27 | return 28 | } 29 | 30 | // VirtualizationWait waits for service finishing 31 | func (e *Engine) VirtualizationWait(_ context.Context, _, _ string) (res *enginetypes.VirtualizationWaitResult, err error) { 32 | err = types.ErrEngineNotImplemented 33 | return 34 | } 35 | 36 | // VirtualizationUpdateResource updates service resource limits 37 | func (e *Engine) VirtualizationUpdateResource(context.Context, string, resourcetypes.Resources) (err error) { 38 | err = types.ErrEngineNotImplemented 39 | return 40 | } 41 | -------------------------------------------------------------------------------- /store/redis/deploy.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | "path/filepath" 6 | "strings" 7 | 8 | "github.com/projecteru2/core/log" 9 | ) 10 | 11 | // GetDeployStatus . 12 | func (r *Rediaron) GetDeployStatus(ctx context.Context, appname, entryname string) (map[string]int, error) { 13 | // 手动加 / 防止不精确 14 | key := filepath.Join(workloadDeployPrefix, appname, entryname) + "/*" 15 | data, err := r.getByKeyPattern(ctx, key, 0) 16 | if err != nil { 17 | return nil, err 18 | } 19 | if len(data) == 0 { 20 | log.WithFunc("store.redis.GetDeployStatus").Warnf(ctx, "Deploy status not found %s.%s", appname, entryname) 21 | } 22 | 23 | deployCount := r.doGetDeployStatus(ctx, data) 24 | 25 | processingCount, err := r.doLoadProcessing(ctx, appname, entryname) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | // node count: deploy count + processing count 31 | nodeCount := map[string]int{} 32 | for node, count := range deployCount { 33 | nodeCount[node] = count 34 | } 35 | for node, count := range processingCount { 36 | nodeCount[node] += count 37 | } 38 | 39 | return nodeCount, nil 40 | } 41 | 42 | // doGetDeployStatus returns how many workload have been deployed on each node 43 | func (r *Rediaron) doGetDeployStatus(_ context.Context, data map[string]string) map[string]int { 44 | nodesCount := map[string]int{} 45 | for key := range data { 46 | parts := strings.Split(key, "/") 47 | nodename := parts[len(parts)-2] 48 | nodesCount[nodename]++ 49 | } 50 | 51 | return nodesCount 52 | } 53 | -------------------------------------------------------------------------------- /auth/simple/simple.go: -------------------------------------------------------------------------------- 1 | package simple 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/projecteru2/core/types" 7 | 8 | "google.golang.org/grpc" 9 | "google.golang.org/grpc/metadata" 10 | ) 11 | 12 | // BasicAuth use token to auth grcp request 13 | type BasicAuth struct { 14 | username string 15 | password string 16 | } 17 | 18 | // NewBasicAuth return a basicauth obj 19 | func NewBasicAuth(username, password string) *BasicAuth { 20 | return &BasicAuth{username, password} 21 | } 22 | 23 | // StreamInterceptor define stream interceptor 24 | func (b *BasicAuth) StreamInterceptor(srv any, stream grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { 25 | ctx := stream.Context() 26 | if err := b.doAuth(ctx); err != nil { 27 | return err 28 | } 29 | return handler(srv, stream) 30 | } 31 | 32 | // UnaryInterceptor define unary interceptor 33 | func (b *BasicAuth) UnaryInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { 34 | if err := b.doAuth(ctx); err != nil { 35 | return nil, err 36 | } 37 | return handler(ctx, req) 38 | } 39 | 40 | func (b *BasicAuth) doAuth(ctx context.Context) error { 41 | meta, ok := metadata.FromIncomingContext(ctx) 42 | if !ok { 43 | return types.ErrInvaildGRPCRequestMeta 44 | } 45 | passwords, ok := meta[b.username] 46 | if !ok { 47 | return types.ErrInvaildGRPCUsername 48 | } 49 | if len(passwords) < 1 || passwords[0] != b.password { 50 | return types.ErrInvaildGRPCPassword 51 | } 52 | return nil 53 | } 54 | -------------------------------------------------------------------------------- /engine/transform.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import ( 4 | resourcetypes "github.com/projecteru2/core/resource/types" 5 | ) 6 | 7 | // TODO 可以考虑进一步简化,每个 engine 自行处理 8 | 9 | // VirtualizationResource define resources 10 | type VirtualizationResource struct { 11 | CPU map[string]int64 `json:"cpu_map" mapstructure:"cpu_map"` // for cpu binding 12 | Quota float64 `json:"cpu" mapstructure:"cpu"` // for cpu quota 13 | Memory int64 `json:"memory" mapstructure:"memory"` // for memory binding 14 | Storage int64 `json:"storage" mapstructure:"storage"` 15 | NUMANode string `json:"numa_node" mapstructure:"numa_node"` // numa node 16 | Volumes []string `json:"volumes" mapstructure:"volumes"` 17 | VolumePlan map[string]map[string]int64 `json:"volume_plan" mapstructure:"volume_plan"` // literal VolumePlan 18 | VolumeChanged bool `json:"volume_changed" mapstructure:"volume_changed"` // indicate whether new volumes contained in realloc request 19 | IOPSOptions map[string]string `json:"iops_options" mapstructure:"IOPS_options"` // format: {device_name: "read-IOPS:write-IOPS:read-bps:write-bps"} 20 | Remap bool `json:"remap" mapstructure:"remap"` 21 | } 22 | 23 | // MakeVirtualizationResource . 24 | func MakeVirtualizationResource[T any](engineParams resourcetypes.Resources, dst T, f func(resourcetypes.Resources, T) error) error { 25 | return f(engineParams, dst) 26 | } 27 | -------------------------------------------------------------------------------- /strategy/average_test.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "context" 5 | "sort" 6 | "testing" 7 | 8 | "github.com/cockroachdb/errors" 9 | "github.com/projecteru2/core/types" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestAveragePlan(t *testing.T) { 14 | // 正常的 15 | nodes := deployedNodes() 16 | r, err := AveragePlan(context.Background(), nodes, 1, 0, 0) 17 | assert.NoError(t, err) 18 | finalCounts := []int{} 19 | for _, node := range nodes { 20 | finalCounts = append(finalCounts, node.Count+r[node.Nodename]) 21 | } 22 | sort.Ints(finalCounts) 23 | assert.ElementsMatch(t, []int{3, 4, 6, 8}, finalCounts) 24 | 25 | // nodes len < limit 26 | nodes = deployedNodes() 27 | _, err = AveragePlan(context.Background(), nodes, 100, 0, 5) 28 | assert.Error(t, err) 29 | // 超过 cap 30 | nodes = deployedNodes() 31 | _, err = AveragePlan(context.Background(), nodes, 100, 0, 0) 32 | assert.Error(t, err) 33 | assert.True(t, errors.Is(err, types.ErrInsufficientCapacity)) 34 | // 正常 limit 35 | nodes = deployedNodes() 36 | _, err = AveragePlan(context.Background(), nodes, 1, 1, 1) 37 | assert.NoError(t, err) 38 | 39 | nodes = genNodesByCapCount([]int{1, 2, 3, 4, 5}, []int{3, 3, 3, 3, 3}) 40 | _, err = AveragePlan(context.Background(), nodes, 4, 100, 4) 41 | assert.Contains(t, err.Error(), "not enough nodes with capacity of 4, require 4 nodes") 42 | 43 | nodes = genNodesByCapCount([]int{1, 2, 3, 4, 5}, []int{3, 3, 3, 3, 3}) 44 | _, err = AveragePlan(context.Background(), nodes, 2, 100, 0) 45 | assert.Contains(t, err.Error(), "not enough nodes with capacity of 2, require 5 nodes") 46 | } 47 | -------------------------------------------------------------------------------- /lock/redis/lock_test.go: -------------------------------------------------------------------------------- 1 | package redislock 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/alicebob/miniredis/v2" 9 | "github.com/go-redis/redis/v8" 10 | "github.com/stretchr/testify/suite" 11 | ) 12 | 13 | type RedisLockTestSuite struct { 14 | suite.Suite 15 | 16 | cli *redis.Client 17 | } 18 | 19 | func (s *RedisLockTestSuite) SetupTest() { 20 | s.cli.FlushAll(context.Background()) 21 | } 22 | 23 | func (s *RedisLockTestSuite) TearDownTest() { 24 | s.cli.FlushAll(context.Background()) 25 | } 26 | 27 | func (s *RedisLockTestSuite) TestMutex() { 28 | _, err := New(s.cli, "", time.Second, time.Second) 29 | s.Error(err) 30 | l, err := New(s.cli, "test", time.Second, time.Second) 31 | s.NoError(err) 32 | 33 | ctx := context.Background() 34 | ctx, err = l.Lock(ctx) 35 | s.Nil(ctx.Err()) 36 | s.NoError(err) 37 | 38 | err = l.Unlock(ctx) 39 | s.NoError(err) 40 | } 41 | 42 | func (s *RedisLockTestSuite) TestTryLock() { 43 | l1, err := New(s.cli, "test", time.Second, time.Second) 44 | s.NoError(err) 45 | l2, err := New(s.cli, "test", time.Second, time.Second) 46 | s.NoError(err) 47 | 48 | ctx1, err := l1.Lock(context.Background()) 49 | s.Nil(ctx1.Err()) 50 | s.NoError(err) 51 | 52 | ctx2, err := l2.TryLock(context.Background()) 53 | s.Nil(ctx2) 54 | s.Error(err) 55 | } 56 | 57 | func TestRedisLock(t *testing.T) { 58 | s, err := miniredis.Run() 59 | if err != nil { 60 | t.Fail() 61 | } 62 | defer s.Close() 63 | 64 | cli := redis.NewClient(&redis.Options{ 65 | Addr: s.Addr(), 66 | DB: 0, 67 | }) 68 | defer cli.Close() 69 | suite.Run(t, &RedisLockTestSuite{ 70 | cli: cli, 71 | }) 72 | } 73 | -------------------------------------------------------------------------------- /engine/types/virtualization.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | resourcetypes "github.com/projecteru2/core/resource/types" 5 | ) 6 | 7 | // VirtualizationCreateOptions use for create virtualization target 8 | type VirtualizationCreateOptions struct { 9 | EngineParams resourcetypes.Resources 10 | Name string 11 | User string 12 | Image string 13 | WorkingDir string 14 | Stdin bool 15 | Privileged bool 16 | Cmd []string 17 | Env []string 18 | DNS []string 19 | Hosts []string 20 | Publish []string 21 | Sysctl map[string]string 22 | Labels map[string]string 23 | 24 | Debug bool 25 | Restart string 26 | 27 | Networks map[string]string 28 | 29 | LogType string 30 | LogConfig map[string]string 31 | 32 | RawArgs []byte 33 | Lambda bool 34 | 35 | AncestorWorkloadID string 36 | } 37 | 38 | // VirtualizationCreated use for store name and ID 39 | type VirtualizationCreated struct { 40 | ID string 41 | Name string 42 | Labels map[string]string 43 | } 44 | 45 | // VirtualizationInfo store virtualization info 46 | type VirtualizationInfo struct { 47 | ID string 48 | User string 49 | Image string 50 | Running bool 51 | Env []string 52 | Labels map[string]string 53 | Networks map[string]string 54 | // TODO other information like cpu memory 55 | } 56 | 57 | // VirtualizationWaitResult store exit result 58 | type VirtualizationWaitResult struct { 59 | Message string 60 | Code int64 61 | } 62 | 63 | // SendMessage returns from engine 64 | type SendMessage struct { 65 | ID string 66 | Path string 67 | Error error 68 | } 69 | -------------------------------------------------------------------------------- /strategy/fill.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "context" 5 | "sort" 6 | 7 | "github.com/cockroachdb/errors" 8 | "github.com/projecteru2/core/log" 9 | "github.com/projecteru2/core/types" 10 | "github.com/projecteru2/core/utils" 11 | ) 12 | 13 | // FillPlan deploy workload each node 14 | // 根据之前部署的策略每一台补充到 N 个,已经超过 N 个的节点视为已满足 15 | // need 是每台上限, limit 是限制节点数, 保证最终状态至少有 limit*need 个实例 16 | // limit = 0 代表对所有节点进行填充 17 | func FillPlan(ctx context.Context, infos []Info, need, _, limit int) (_ map[string]int, err error) { 18 | log.WithFunc("strategy.FillPlan").Debugf(ctx, "need %d limit %d infos %+v", need, limit, infos) 19 | scheduleInfosLength := len(infos) 20 | if limit == 0 { 21 | limit = scheduleInfosLength 22 | } 23 | if scheduleInfosLength < limit { 24 | return nil, errors.Wrapf(types.ErrInsufficientResource, "node len %d cannot alloc a fill node plan", scheduleInfosLength) 25 | } 26 | sort.Slice(infos, func(i, j int) bool { 27 | if infos[i].Count == infos[j].Count { 28 | return infos[i].Capacity > infos[j].Capacity 29 | } 30 | return infos[i].Count > infos[j].Count 31 | }) 32 | deployMap, toDeploy := make(map[string]int), 0 33 | for _, info := range infos { 34 | if info.Count+info.Capacity >= need { 35 | deployMap[info.Nodename] += utils.Max(need-info.Count, 0) 36 | toDeploy += deployMap[info.Nodename] 37 | limit-- 38 | if limit == 0 { 39 | if toDeploy == 0 { 40 | err = types.ErrAlreadyFilled 41 | } 42 | return deployMap, err 43 | } 44 | } 45 | } 46 | return nil, errors.Wrapf(types.ErrInsufficientResource, "not enough nodes that can fill up to %d instances, require %d nodes", need, limit) 47 | } 48 | -------------------------------------------------------------------------------- /utils/config_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "os" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestLoadConfig(t *testing.T) { 14 | f1 := "test" 15 | buffer := bytes.NewBufferString(f1) 16 | fname, err := TempFile(io.NopCloser(buffer)) 17 | assert.NoError(t, err) 18 | _, err = LoadConfig(fname) 19 | assert.Error(t, err) 20 | os.Remove(fname) 21 | 22 | f1 = `log_level: "DEBUG" 23 | bind: ":5001" 24 | statsd: "127.0.0.1:8125" 25 | profile: ":12346" 26 | global_timeout: 300s 27 | 28 | auth: 29 | username: admin 30 | password: password 31 | etcd: 32 | machines: 33 | - "http://127.0.0.1:2379" 34 | lock_prefix: "core/_lock" 35 | git: 36 | public_key: "***REMOVED***" 37 | private_key: "***REMOVED***" 38 | token: "***REMOVED***" 39 | scm_type: "github" 40 | docker: 41 | network_mode: "bridge" 42 | cert_path: "/etc/eru/tls" 43 | hub: "hub.docker.com" 44 | namespace: "projecteru2" 45 | build_pod: "eru-test" 46 | local_dns: true 47 | ` 48 | 49 | buffer = bytes.NewBufferString(f1) 50 | fname, err = TempFile(io.NopCloser(buffer)) 51 | assert.NoError(t, err) 52 | config, err := LoadConfig(fname) 53 | assert.NoError(t, err) 54 | assert.Equal(t, config.LockTimeout, time.Duration(time.Second*30)) 55 | assert.Equal(t, config.GlobalTimeout, time.Duration(time.Second*300)) 56 | assert.Equal(t, config.Etcd.Prefix, "/eru") 57 | assert.Equal(t, config.Docker.Log.Type, "journald") 58 | assert.Equal(t, config.Docker.APIVersion, "1.32") 59 | assert.Equal(t, config.Scheduler.MaxShare, -1) 60 | assert.Equal(t, config.Scheduler.ShareBase, 100) 61 | os.Remove(fname) 62 | } 63 | -------------------------------------------------------------------------------- /utils/file_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestListAllExecutableFiles(t *testing.T) { 12 | dir, err := os.MkdirTemp(os.TempDir(), "test*") 13 | assert.NoError(t, err) 14 | defer os.RemoveAll(dir) 15 | 16 | file, err := os.CreateTemp(dir, "abc") 17 | assert.NoError(t, err) 18 | 19 | subdir, err := os.MkdirTemp(dir, "def") 20 | assert.NoError(t, err) 21 | 22 | assert.NotNil(t, file) 23 | assert.NotNil(t, subdir) 24 | 25 | fInfo, err := os.Stat(file.Name()) 26 | assert.NoError(t, err) 27 | assert.NotNil(t, fInfo) 28 | 29 | assert.False(t, isExecutable(fInfo.Mode().Perm())) 30 | 31 | os.Chmod(file.Name(), 0777) 32 | fInfo, _ = os.Stat(file.Name()) 33 | assert.True(t, isExecutable(fInfo.Mode().Perm())) 34 | 35 | fs, err := ListAllExecutableFiles(dir) 36 | assert.NoError(t, err) 37 | assert.Len(t, fs, 1) 38 | } 39 | 40 | func TestListAllShareLibFiles(t *testing.T) { 41 | dir, err := os.MkdirTemp(os.TempDir(), "test*") 42 | assert.NoError(t, err) 43 | defer os.RemoveAll(dir) 44 | 45 | _, err = os.Create(filepath.Join(dir, "abc")) 46 | assert.NoError(t, err) 47 | 48 | _, err = os.Create(filepath.Join(dir, "bcd.so")) 49 | assert.NoError(t, err) 50 | 51 | subdir, err := os.MkdirTemp(dir, "def") 52 | assert.NoError(t, err) 53 | 54 | _, err = os.Create(filepath.Join(subdir, "abc1")) 55 | assert.NoError(t, err) 56 | 57 | _, err = os.Create(filepath.Join(subdir, "bcd1.so")) 58 | assert.NoError(t, err) 59 | 60 | fs, err := ListAllSharedLibFiles(dir) 61 | assert.NoError(t, err) 62 | assert.Len(t, fs, 1) 63 | assert.Equal(t, filepath.Join(dir, "bcd.so"), fs[0]) 64 | } 65 | -------------------------------------------------------------------------------- /cluster/calcium/log_test.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "io" 7 | "testing" 8 | 9 | enginemocks "github.com/projecteru2/core/engine/mocks" 10 | storemocks "github.com/projecteru2/core/store/mocks" 11 | "github.com/projecteru2/core/types" 12 | 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/mock" 15 | ) 16 | 17 | func TestLogStream(t *testing.T) { 18 | c := NewTestCluster() 19 | store := c.store.(*storemocks.Store) 20 | engine := &enginemocks.API{} 21 | ID := "test" 22 | workload := &types.Workload{ 23 | ID: ID, 24 | Engine: engine, 25 | } 26 | ctx := context.Background() 27 | opts := &types.LogStreamOptions{ID: ID} 28 | // failed by GetWorkload 29 | store.On("GetWorkload", mock.Anything, mock.Anything).Return(nil, types.ErrMockError).Once() 30 | ch, err := c.LogStream(ctx, opts) 31 | assert.NoError(t, err) 32 | for c := range ch { 33 | assert.Equal(t, c.ID, ID) 34 | assert.Empty(t, c.Data) 35 | } 36 | store.On("GetWorkload", mock.Anything, mock.Anything).Return(workload, nil) 37 | // failed by VirtualizationLogs 38 | engine.On("VirtualizationLogs", mock.Anything, mock.Anything).Return(nil, nil, types.ErrMockError).Once() 39 | ch, err = c.LogStream(ctx, opts) 40 | assert.NoError(t, err) 41 | for c := range ch { 42 | assert.Equal(t, c.ID, ID) 43 | assert.Empty(t, c.Data) 44 | } 45 | reader := bytes.NewBufferString("aaaa\nbbbb\n") 46 | engine.On("VirtualizationLogs", mock.Anything, mock.Anything).Return(io.NopCloser(reader), nil, nil) 47 | // success 48 | ch, err = c.LogStream(ctx, opts) 49 | assert.NoError(t, err) 50 | for c := range ch { 51 | assert.Equal(t, c.ID, ID) 52 | assert.NotEmpty(t, c.Data) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /auth/mocks/Auth.go: -------------------------------------------------------------------------------- 1 | // Code generated by mockery v1.0.0. DO NOT EDIT. 2 | 3 | package mocks 4 | 5 | import context "context" 6 | import grpc "google.golang.org/grpc" 7 | import mock "github.com/stretchr/testify/mock" 8 | 9 | // Auth is an autogenerated mock type for the Auth type 10 | type Auth struct { 11 | mock.Mock 12 | } 13 | 14 | // StreamInterceptor provides a mock function with given fields: srv, stream, info, handler 15 | func (_m *Auth) StreamInterceptor(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { 16 | ret := _m.Called(srv, stream, info, handler) 17 | 18 | var r0 error 19 | if rf, ok := ret.Get(0).(func(interface{}, grpc.ServerStream, *grpc.StreamServerInfo, grpc.StreamHandler) error); ok { 20 | r0 = rf(srv, stream, info, handler) 21 | } else { 22 | r0 = ret.Error(0) 23 | } 24 | 25 | return r0 26 | } 27 | 28 | // UnaryInterceptor provides a mock function with given fields: ctx, req, info, handler 29 | func (_m *Auth) UnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { 30 | ret := _m.Called(ctx, req, info, handler) 31 | 32 | var r0 interface{} 33 | if rf, ok := ret.Get(0).(func(context.Context, interface{}, *grpc.UnaryServerInfo, grpc.UnaryHandler) interface{}); ok { 34 | r0 = rf(ctx, req, info, handler) 35 | } else { 36 | if ret.Get(0) != nil { 37 | r0 = ret.Get(0).(interface{}) 38 | } 39 | } 40 | 41 | var r1 error 42 | if rf, ok := ret.Get(1).(func(context.Context, interface{}, *grpc.UnaryServerInfo, grpc.UnaryHandler) error); ok { 43 | r1 = rf(ctx, req, info, handler) 44 | } else { 45 | r1 = ret.Error(1) 46 | } 47 | 48 | return r0, r1 49 | } 50 | -------------------------------------------------------------------------------- /store/etcdv3/deploy.go: -------------------------------------------------------------------------------- 1 | package etcdv3 2 | 3 | import ( 4 | "context" 5 | "path/filepath" 6 | "strings" 7 | 8 | clientv3 "go.etcd.io/etcd/client/v3" 9 | 10 | "github.com/projecteru2/core/log" 11 | ) 12 | 13 | // GetDeployStatus get deploy status from store 14 | func (m *Mercury) GetDeployStatus(ctx context.Context, appname, entryname string) (map[string]int, error) { 15 | // 手动加 / 防止不精确 16 | key := filepath.Join(workloadDeployPrefix, appname, entryname) + "/" 17 | resp, err := m.Get(ctx, key, clientv3.WithPrefix(), clientv3.WithKeysOnly()) 18 | if err != nil { 19 | return nil, err 20 | } 21 | if resp.Count == 0 { 22 | log.WithFunc("store.etcdv3.GetDeployStatus").Warnf(ctx, "Deploy status not found %s.%s", appname, entryname) 23 | } 24 | 25 | deployCount := m.doGetDeployStatus(ctx, resp) 26 | processingCount, err := m.doLoadProcessing(ctx, appname, entryname) 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | // node count: deploy count + processing count 32 | nodeCount := map[string]int{} 33 | for node, count := range deployCount { 34 | nodeCount[node] = count 35 | } 36 | for node, count := range processingCount { 37 | nodeCount[node] += count 38 | } 39 | 40 | return nodeCount, nil 41 | } 42 | 43 | // doGetDeployStatus returns how many workload have been deployed on each node 44 | func (m *Mercury) doGetDeployStatus(_ context.Context, resp *clientv3.GetResponse) map[string]int { 45 | nodesCount := map[string]int{} 46 | for _, ev := range resp.Kvs { 47 | key := string(ev.Key) 48 | parts := strings.Split(key, "/") 49 | nodename := parts[len(parts)-2] 50 | if _, ok := nodesCount[nodename]; !ok { 51 | nodesCount[nodename] = 1 52 | continue 53 | } 54 | nodesCount[nodename]++ 55 | } 56 | 57 | return nodesCount 58 | } 59 | -------------------------------------------------------------------------------- /resource/plugins/binary/calculate.go: -------------------------------------------------------------------------------- 1 | package binary 2 | 3 | import ( 4 | "context" 5 | 6 | binarytypes "github.com/projecteru2/core/resource/plugins/binary/types" 7 | plugintypes "github.com/projecteru2/core/resource/plugins/types" 8 | ) 9 | 10 | // CalculateDeploy . 11 | func (p Plugin) CalculateDeploy(ctx context.Context, nodename string, deployCount int, resourceRequest plugintypes.WorkloadResourceRequest) (*plugintypes.CalculateDeployResponse, error) { 12 | req := &binarytypes.CalculateDeployRequest{ 13 | Nodename: nodename, 14 | DeployCount: deployCount, 15 | WorkloadResourceRequest: resourceRequest, 16 | } 17 | resp := &plugintypes.CalculateDeployResponse{} 18 | return resp, p.call(ctx, CalculateDeployCommand, req, resp) 19 | } 20 | 21 | // CalculateRealloc . 22 | func (p Plugin) CalculateRealloc(ctx context.Context, nodename string, resource plugintypes.WorkloadResource, resourceRequest plugintypes.WorkloadResourceRequest) (*plugintypes.CalculateReallocResponse, error) { 23 | req := &binarytypes.CalculateReallocRequest{ 24 | Nodename: nodename, 25 | WorkloadResource: resource, 26 | WorkloadResourceRequest: resourceRequest, 27 | } 28 | resp := &plugintypes.CalculateReallocResponse{} 29 | return resp, p.call(ctx, CalculateReallocCommand, req, resp) 30 | } 31 | 32 | // CalculateRemap . 33 | func (p Plugin) CalculateRemap(ctx context.Context, nodename string, workloadsResource map[string]plugintypes.WorkloadResource) (*plugintypes.CalculateRemapResponse, error) { 34 | req := &binarytypes.CalculateRemapRequest{ 35 | Nodename: nodename, 36 | WorkloadsResource: workloadsResource, 37 | } 38 | resp := &plugintypes.CalculateRemapResponse{} 39 | return resp, p.call(ctx, CalculateRemapCommand, req, resp) 40 | } 41 | -------------------------------------------------------------------------------- /resource/cobalt/metrics.go: -------------------------------------------------------------------------------- 1 | package cobalt 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/projecteru2/core/log" 7 | "github.com/projecteru2/core/resource/plugins" 8 | plugintypes "github.com/projecteru2/core/resource/plugins/types" 9 | "github.com/projecteru2/core/types" 10 | ) 11 | 12 | // GetMetricsDescription . 13 | func (m Manager) GetMetricsDescription(ctx context.Context) ([]*plugintypes.MetricsDescription, error) { 14 | var metricsDescriptions []*plugintypes.MetricsDescription 15 | resps, err := call(ctx, m.plugins, func(plugin plugins.Plugin) (*plugintypes.GetMetricsDescriptionResponse, error) { 16 | resp, err := plugin.GetMetricsDescription(ctx) 17 | return resp, err 18 | }) 19 | 20 | if err != nil { 21 | return nil, err 22 | } 23 | 24 | for _, resp := range resps { 25 | metricsDescriptions = append(metricsDescriptions, *resp...) 26 | } 27 | 28 | return metricsDescriptions, nil 29 | } 30 | 31 | // GetNodeMetrics . 32 | func (m Manager) GetNodeMetrics(ctx context.Context, node *types.Node) ([]*plugintypes.Metrics, error) { 33 | logger := log.WithFunc("resource.cobalt.GetNodeMetrics").WithField("node", node.Name) 34 | 35 | var metrics []*plugintypes.Metrics 36 | resps, err := call(ctx, m.plugins, func(plugin plugins.Plugin) (*plugintypes.GetMetricsResponse, error) { 37 | resp, err := plugin.GetMetrics(ctx, node.Podname, node.Name) 38 | if err != nil { 39 | logger.Errorf(ctx, err, "plugin %+v failed to convert node resource info to metrics", plugin.Name()) 40 | } 41 | return resp, err 42 | }) 43 | 44 | if err != nil { 45 | logger.Error(ctx, err, "failed to convert node resource info to metrics") 46 | return nil, err 47 | } 48 | 49 | for _, resp := range resps { 50 | metrics = append(metrics, *resp...) 51 | } 52 | 53 | return metrics, nil 54 | } 55 | -------------------------------------------------------------------------------- /log/sentry.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "runtime/debug" 7 | "strings" 8 | "time" 9 | 10 | "github.com/cockroachdb/errors" 11 | "github.com/getsentry/sentry-go" 12 | "github.com/projecteru2/core/types" 13 | "google.golang.org/grpc/peer" 14 | ) 15 | 16 | // SentryDefer . 17 | func SentryDefer() { 18 | if sentryDSN == "" { 19 | return 20 | } 21 | defer sentry.Flush(2 * time.Second) 22 | if r := recover(); r != nil { 23 | sentry.CaptureMessage(fmt.Sprintf("%+v: %s", r, debug.Stack())) 24 | panic(r) 25 | } 26 | } 27 | 28 | func genGRPCTracingInfo(ctx context.Context) (tracingInfo string) { 29 | if ctx == nil { 30 | return "" 31 | } 32 | 33 | tracing := []string{} 34 | if p, ok := peer.FromContext(ctx); ok { 35 | tracing = append(tracing, p.Addr.String()) 36 | } 37 | 38 | if traceID := ctx.Value(types.TracingID); traceID != nil { 39 | if tid, ok := traceID.(string); ok { 40 | tracing = append(tracing, tid) 41 | } 42 | } 43 | tracingInfo = strings.Join(tracing, "-") 44 | return 45 | } 46 | 47 | func reportToSentry(ctx context.Context, level sentry.Level, err error, format string, args ...any) { //nolint 48 | if sentryDSN == "" { 49 | return 50 | } 51 | defer sentry.Flush(2 * time.Second) 52 | event, extraDetails := errors.BuildSentryReport(err) 53 | for k, v := range extraDetails { 54 | event.Extra[k] = v 55 | } 56 | event.Level = level 57 | 58 | if msg := fmt.Sprintf(format, args...); msg != "" { 59 | event.Tags["message"] = msg 60 | } 61 | 62 | if tracingInfo := genGRPCTracingInfo(ctx); tracingInfo != "" { 63 | event.Tags["tracing"] = tracingInfo 64 | } 65 | 66 | if res := string(*sentry.CaptureEvent(event)); res != "" { 67 | WithFunc("log.reportToSentry").WithField("ID", res).Info(ctx, "Report to Sentry") 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /log/inner.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/alphadose/haxmap" 7 | "github.com/getsentry/sentry-go" 8 | "github.com/rs/zerolog" 9 | ) 10 | 11 | func fatalf(ctx context.Context, err error, format string, fields *haxmap.Map[string, any], args ...any) { 12 | args = argsValidate(args) 13 | reportToSentry(ctx, sentry.LevelFatal, err, format, args...) 14 | f := globalLogger.Fatal() 15 | wrap(f, fields).Err(err).Msgf(format, args...) 16 | } 17 | 18 | func warnf(_ context.Context, format string, fields *haxmap.Map[string, any], args ...any) { 19 | args = argsValidate(args) 20 | f := globalLogger.Warn() 21 | wrap(f, fields).Msgf(format, args...) 22 | } 23 | 24 | func infof(_ context.Context, format string, fields *haxmap.Map[string, any], args ...any) { 25 | args = argsValidate(args) 26 | f := globalLogger.Info() 27 | wrap(f, fields).Msgf(format, args...) 28 | } 29 | 30 | func debugf(_ context.Context, format string, fields *haxmap.Map[string, any], args ...any) { 31 | args = argsValidate(args) 32 | f := globalLogger.Debug() 33 | wrap(f, fields).Msgf(format, args...) 34 | } 35 | 36 | func errorf(ctx context.Context, err error, format string, fields *haxmap.Map[string, any], args ...any) { 37 | if err == nil { 38 | return 39 | } 40 | args = argsValidate(args) 41 | reportToSentry(ctx, sentry.LevelError, err, format, args...) 42 | f := globalLogger.Error() 43 | wrap(f, fields).Stack().Err(err).Msgf(format, args...) 44 | } 45 | 46 | func argsValidate(args []any) []any { 47 | if len(args) > 0 { 48 | return args 49 | } 50 | return []any{""} 51 | } 52 | 53 | func wrap(f *zerolog.Event, kv *haxmap.Map[string, any]) *zerolog.Event { 54 | if kv == nil { 55 | return f 56 | } 57 | kv.ForEach(func(k string, v any) bool { 58 | f = f.Interface(k, v) 59 | return true 60 | }) 61 | return f 62 | } 63 | -------------------------------------------------------------------------------- /types/specs.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | // Hook define hooks 8 | type Hook struct { 9 | AfterStart []string `yaml:"after_start,omitempty"` 10 | BeforeStop []string `yaml:"before_stop,omitempty"` 11 | AfterResume []string `yaml:"after_resume,omitempty"` 12 | BeforeSuspend []string `yaml:"before_suspend,omitempty"` 13 | Force bool `yaml:"force,omitempty"` 14 | } 15 | 16 | // HealthCheck define healthcheck 17 | type HealthCheck struct { 18 | TCPPorts []string `yaml:"tcp_ports,omitempty,flow"` 19 | HTTPPort string `yaml:"http_port"` 20 | HTTPURL string `yaml:"url,omitempty"` 21 | HTTPCode int `yaml:"code,omitempty"` 22 | } 23 | 24 | // Entrypoint is a single entrypoint 25 | type Entrypoint struct { 26 | Name string `yaml:"name,omitempty"` 27 | Commands []string `yaml:"commands,omitempty"` 28 | Privileged bool `yaml:"privileged,omitempty"` 29 | Dir string `yaml:"dir,omitempty"` 30 | Log *LogConfig `yaml:"log,omitempty"` 31 | Publish []string `yaml:"publish,omitempty,flow"` 32 | HealthCheck *HealthCheck `yaml:"healthcheck,omitempty,flow"` 33 | Hook *Hook `yaml:"hook,omitempty,flow"` 34 | Restart string `yaml:"restart,omitempty"` 35 | Sysctls map[string]string `yaml:"sysctls,omitempty,flow"` 36 | } 37 | 38 | // Validate checks entrypoint's name 39 | func (e *Entrypoint) Validate() error { 40 | if e.Name == "" { 41 | return ErrEmptyEntrypointName 42 | } 43 | if strings.Contains(e.Name, "_") { 44 | return ErrUnderlineInEntrypointName 45 | } 46 | return nil 47 | } 48 | 49 | // Bind define a single bind 50 | type Bind struct { 51 | InWorkloadPath string `yaml:"bind,omitempty"` 52 | ReadOnly bool `yaml:"ro,omitempty"` 53 | } 54 | -------------------------------------------------------------------------------- /strategy/drained_test.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestDrainedPlan(t *testing.T) { 11 | nodes := []Info{ 12 | { 13 | Nodename: "n1", 14 | Capacity: 10, 15 | Count: 2, 16 | }, 17 | { 18 | Nodename: "n2", 19 | Capacity: 9, 20 | Count: 3, 21 | }, 22 | { 23 | Nodename: "n3", 24 | Capacity: 10, 25 | Count: 5, 26 | }, 27 | { 28 | Nodename: "n4", 29 | Capacity: 8, 30 | Count: 7, 31 | }, 32 | } 33 | 34 | r, err := DrainedPlan(context.Background(), nodes, 1, 100, 0) 35 | assert.NoError(t, err) 36 | assert.ElementsMatch(t, []int{2, 3, 5, 8}, getFinalStatus(r, nodes)) 37 | 38 | r, err = DrainedPlan(context.Background(), nodes, 2, 1, 0) 39 | assert.Error(t, err) 40 | 41 | r, err = DrainedPlan(context.Background(), nodes, 2, 100, 0) 42 | assert.NoError(t, err) 43 | assert.ElementsMatch(t, []int{2, 3, 5, 9}, getFinalStatus(r, nodes)) 44 | 45 | r, err = DrainedPlan(context.Background(), nodes, 3, 100, 0) 46 | assert.ElementsMatch(t, []int{2, 3, 5, 10}, getFinalStatus(r, nodes)) 47 | 48 | r, err = DrainedPlan(context.Background(), nodes, 10, 100, 0) 49 | assert.NoError(t, err) 50 | assert.ElementsMatch(t, []int{2, 5, 5, 15}, getFinalStatus(r, nodes)) 51 | 52 | r, err = DrainedPlan(context.Background(), nodes, 25, 100, 0) 53 | assert.NoError(t, err) 54 | assert.ElementsMatch(t, []int{10, 12, 5, 15}, getFinalStatus(r, nodes)) 55 | 56 | r, err = DrainedPlan(context.Background(), nodes, 29, 100, 0) 57 | assert.NoError(t, err) 58 | assert.ElementsMatch(t, []int{12, 12, 7, 15}, getFinalStatus(r, nodes)) 59 | 60 | r, err = DrainedPlan(context.Background(), nodes, 37, 100, 0) 61 | assert.NoError(t, err) 62 | assert.ElementsMatch(t, []int{12, 12, 15, 15}, getFinalStatus(r, nodes)) 63 | } 64 | -------------------------------------------------------------------------------- /cluster/calcium/copy.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/projecteru2/core/log" 8 | "github.com/projecteru2/core/types" 9 | ) 10 | 11 | // Copy uses VirtualizationCopyFrom cp to copy specified things and send to remote 12 | func (c *Calcium) Copy(ctx context.Context, opts *types.CopyOptions) (chan *types.CopyMessage, error) { 13 | logger := log.WithFunc("calcium.Copy").WithField("opts", opts) 14 | if err := opts.Validate(); err != nil { 15 | logger.Error(ctx, err) 16 | return nil, err 17 | } 18 | 19 | ch := make(chan *types.CopyMessage) 20 | _ = c.pool.Invoke(func() { 21 | defer close(ch) 22 | 23 | wg := sync.WaitGroup{} 24 | wg.Add(len(opts.Targets)) 25 | defer wg.Wait() 26 | logger.Infof(ctx, "Copy %d workloads files", len(opts.Targets)) 27 | 28 | // workload one by one 29 | for ID, paths := range opts.Targets { 30 | _ = c.pool.Invoke(func(ID string, paths []string) func() { 31 | return func() { 32 | defer wg.Done() 33 | 34 | workload, err := c.GetWorkload(ctx, ID) 35 | if err != nil { 36 | for _, path := range paths { 37 | logger.Error(ctx, err) 38 | ch <- &types.CopyMessage{ 39 | ID: ID, 40 | Path: path, 41 | Error: err, 42 | } 43 | } 44 | return 45 | } 46 | 47 | for _, path := range paths { 48 | content, uid, gid, mode, err := workload.Engine.VirtualizationCopyFrom(ctx, workload.ID, path) 49 | ch <- &types.CopyMessage{ 50 | ID: ID, 51 | Path: path, 52 | Error: err, 53 | LinuxFile: types.LinuxFile{ 54 | Filename: path, 55 | Content: content, 56 | UID: uid, 57 | GID: gid, 58 | Mode: mode, 59 | }, 60 | } 61 | } 62 | } 63 | }(ID, paths)) 64 | } 65 | }) 66 | return ch, nil 67 | } 68 | -------------------------------------------------------------------------------- /store/etcdv3/mercury.go: -------------------------------------------------------------------------------- 1 | package etcdv3 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/panjf2000/ants/v2" 7 | "github.com/projecteru2/core/store/etcdv3/meta" 8 | "github.com/projecteru2/core/types" 9 | "github.com/projecteru2/core/utils" 10 | ) 11 | 12 | const ( 13 | podInfoKey = "/pod/info/%s" // /pod/info/{podname} 14 | serviceStatusKey = "/services/%s" // /service/{ipv4:port} 15 | 16 | nodeInfoKey = "/node/%s" // /node/{nodename} 17 | nodePodKey = "/node/%s:pod/%s" // /node/{podname}:pod/{nodename} 18 | nodeCaKey = "/node/%s:ca" // /node/{nodename}:ca 19 | nodeCertKey = "/node/%s:cert" // /node/{nodename}:cert 20 | nodeKeyKey = "/node/%s:key" // /node/{nodename}:key 21 | nodeStatusPrefix = "/status:node/" // /status:node/{nodename} -> node status key 22 | nodeWorkloadsKey = "/node/%s:workloads/%s" // /node/{nodename}:workloads/{workloadID} 23 | 24 | workloadInfoKey = "/workloads/%s" // /workloads/{workloadID} 25 | workloadDeployPrefix = "/deploy" // /deploy/{appname}/{entrypoint}/{nodename}/{workloadID} 26 | workloadStatusPrefix = "/status" // /status/{appname}/{entrypoint}/{nodename}/{workloadID} value -> something by agent 27 | workloadProcessingPrefix = "/processing" // /processing/{appname}/{entrypoint}/{nodename}/{opsIdent} value -> count 28 | ) 29 | 30 | // Mercury means store with etcdv3 31 | type Mercury struct { 32 | meta.KV 33 | config types.Config 34 | pool *ants.PoolWithFunc 35 | } 36 | 37 | // New for create a Mercury instance 38 | func New(config types.Config, t *testing.T) (m *Mercury, err error) { 39 | pool, err := utils.NewPool(config.MaxConcurrency) 40 | if err != nil { 41 | return nil, err 42 | } 43 | m = &Mercury{config: config, pool: pool} 44 | m.KV, err = meta.NewETCD(config.Etcd, t) 45 | return 46 | } 47 | -------------------------------------------------------------------------------- /cluster/calcium/remap.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/projecteru2/core/log" 7 | "github.com/projecteru2/core/types" 8 | "github.com/projecteru2/core/utils" 9 | ) 10 | 11 | type remapMsg struct { 12 | ID string 13 | err error 14 | } 15 | 16 | // RemapResourceAndLog called on changes of resource binding, such as cpu binding 17 | // as an internal api, remap doesn't lock node, the responsibility of that should be taken on by caller 18 | func (c *Calcium) RemapResourceAndLog(ctx context.Context, logger *log.Fields, node *types.Node) { 19 | ctx, cancel := context.WithTimeout(utils.NewInheritCtx(ctx), c.config.GlobalTimeout) 20 | defer cancel() 21 | 22 | err := c.withNodeOperationLocked(ctx, node.Name, func(ctx context.Context, node *types.Node) error { 23 | if ch, err := c.doRemapResource(ctx, node); err == nil { 24 | for msg := range ch { 25 | logger.Infof(ctx, "remap workload ID %+v", msg.ID) 26 | if msg.err != nil { 27 | logger.Error(ctx, msg.err) 28 | } 29 | } 30 | } 31 | return nil 32 | }) 33 | 34 | if err != nil { 35 | logger.Error(ctx, err, "remap node failed") 36 | } 37 | } 38 | 39 | func (c *Calcium) doRemapResource(ctx context.Context, node *types.Node) (ch chan *remapMsg, err error) { 40 | workloads, err := c.store.ListNodeWorkloads(ctx, node.Name, nil) 41 | if err != nil { 42 | return 43 | } 44 | 45 | engineParamsMap, err := c.rmgr.Remap(ctx, node.Name, workloads) 46 | if err != nil { 47 | return nil, err 48 | } 49 | 50 | ch = make(chan *remapMsg, len(engineParamsMap)) 51 | _ = c.pool.Invoke(func() { 52 | defer close(ch) 53 | for workloadID, engineParams := range engineParamsMap { 54 | ch <- &remapMsg{ 55 | ID: workloadID, 56 | err: node.Engine.VirtualizationUpdateResource(ctx, workloadID, engineParams), 57 | } 58 | } 59 | }) 60 | 61 | return ch, nil 62 | } 63 | -------------------------------------------------------------------------------- /cluster/calcium/send.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "sync" 7 | 8 | "github.com/projecteru2/core/engine" 9 | "github.com/projecteru2/core/log" 10 | "github.com/projecteru2/core/types" 11 | ) 12 | 13 | // Send files to workload 14 | func (c *Calcium) Send(ctx context.Context, opts *types.SendOptions) (chan *types.SendMessage, error) { 15 | logger := log.WithFunc("calcium.Send").WithField("opts", opts) 16 | if err := opts.Validate(); err != nil { 17 | logger.Error(ctx, err) 18 | return nil, err 19 | } 20 | ch := make(chan *types.SendMessage) 21 | _ = c.pool.Invoke(func() { 22 | defer close(ch) 23 | wg := &sync.WaitGroup{} 24 | wg.Add(len(opts.IDs)) 25 | 26 | for _, ID := range opts.IDs { 27 | logger.Infof(ctx, "Send files to %s", ID) 28 | _ = c.pool.Invoke(func(ID string) func() { 29 | return func() { 30 | defer wg.Done() 31 | if err := c.withWorkloadLocked(ctx, ID, false, func(ctx context.Context, workload *types.Workload) error { 32 | for _, file := range opts.Files { 33 | err := c.doSendFileToWorkload(ctx, workload.Engine, workload.ID, file) 34 | logger.Error(ctx, err) 35 | ch <- &types.SendMessage{ID: ID, Path: file.Filename, Error: err} 36 | } 37 | return nil 38 | }); err != nil { 39 | logger.Error(ctx, err) 40 | ch <- &types.SendMessage{ID: ID, Error: err} 41 | } 42 | } 43 | }(ID)) 44 | } 45 | wg.Wait() 46 | }) 47 | return ch, nil 48 | } 49 | 50 | func (c *Calcium) doSendFileToWorkload(ctx context.Context, engine engine.API, ID string, file types.LinuxFile) error { 51 | log.WithFunc("calcium.doSendFileToWorkload").Infof(ctx, "Send file to %s:%s", ID, file.Filename) 52 | return engine.VirtualizationCopyChunkTo(ctx, ID, file.Filename, int64(len(file.Content)), bytes.NewReader(file.Clone().Content), file.UID, file.GID, file.Mode) 53 | } 54 | -------------------------------------------------------------------------------- /store/redis/ephemeral.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "time" 7 | 8 | "github.com/projecteru2/core/log" 9 | "github.com/projecteru2/core/types" 10 | 11 | "github.com/cockroachdb/errors" 12 | ) 13 | 14 | var ephemeralValue = "__aaron__" 15 | 16 | // StartEphemeral starts an empheral kv pair. 17 | func (r *Rediaron) StartEphemeral(ctx context.Context, path string, heartbeat time.Duration) (<-chan struct{}, func(), error) { 18 | set, err := r.cli.SetNX(ctx, path, ephemeralValue, heartbeat).Result() 19 | if err != nil { 20 | return nil, nil, err 21 | } 22 | if !set { 23 | return nil, nil, errors.Wrap(types.ErrKeyExists, path) 24 | } 25 | 26 | ctx, cancel := context.WithCancel(ctx) 27 | expiry := make(chan struct{}) 28 | 29 | var wg sync.WaitGroup 30 | wg.Add(1) 31 | _ = r.pool.Invoke(func() { 32 | defer wg.Done() 33 | defer close(expiry) 34 | 35 | tick := time.NewTicker(heartbeat / 3) 36 | defer tick.Stop() 37 | 38 | for { 39 | select { 40 | case <-tick.C: 41 | if err := r.refreshEphemeral(ctx, path, heartbeat); err != nil { 42 | r.revokeEphemeral(path) 43 | return 44 | } 45 | case <-ctx.Done(): 46 | r.revokeEphemeral(path) 47 | return 48 | } 49 | } 50 | }) 51 | 52 | return expiry, func() { 53 | cancel() 54 | wg.Wait() 55 | }, nil 56 | } 57 | 58 | func (r *Rediaron) revokeEphemeral(path string) { 59 | ctx, cancel := context.WithTimeout(context.TODO(), time.Second) 60 | defer cancel() 61 | if _, err := r.cli.Del(ctx, path).Result(); err != nil { 62 | log.Errorf(ctx, err, "revoke with %s failed", path) 63 | } 64 | } 65 | 66 | func (r *Rediaron) refreshEphemeral(ctx context.Context, path string, ttl time.Duration) error { 67 | ctx, cancel := context.WithTimeout(ctx, time.Second) 68 | defer cancel() 69 | _, err := r.cli.Expire(ctx, path, ttl).Result() 70 | return err 71 | } 72 | -------------------------------------------------------------------------------- /cluster/calcium/helper.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "github.com/projecteru2/core/log" 5 | "github.com/projecteru2/core/types" 6 | "github.com/projecteru2/core/utils" 7 | 8 | "golang.org/x/net/context" 9 | ) 10 | 11 | func distributionInspect(ctx context.Context, node *types.Node, image string, digests []string) bool { 12 | logger := log.WithFunc("calcium.distributionInspect") 13 | remoteDigest, err := node.Engine.ImageRemoteDigest(ctx, image) 14 | if err != nil { 15 | logger.Error(ctx, err, "get manifest failed") 16 | return false 17 | } 18 | 19 | for _, digest := range digests { 20 | if digest == remoteDigest { 21 | logger.Debugf(ctx, "local digest %s", digest) 22 | logger.Debugf(ctx, "remote digest %s", remoteDigest) 23 | return true 24 | } 25 | } 26 | return false 27 | } 28 | 29 | // Pull an image 30 | func pullImage(ctx context.Context, node *types.Node, image string) error { 31 | logger := log.WithFunc("calcium.pullImage").WithField("node", node.Name).WithField("image", image) 32 | logger.Info(ctx, "Pulling image") 33 | if image == "" { 34 | return types.ErrNoImage 35 | } 36 | 37 | // check local 38 | exists := false 39 | digests, err := node.Engine.ImageLocalDigests(ctx, image) 40 | if err != nil { 41 | logger.Errorf(ctx, err, "Check image failed %+v", err) 42 | } else { 43 | logger.Debug(ctx, "Local Image exists") 44 | exists = true 45 | } 46 | 47 | if exists && distributionInspect(ctx, node, image, digests) { 48 | logger.Debug(ctx, "Image cached, skip pulling") 49 | return nil 50 | } 51 | 52 | logger.Info(ctx, "Image not cached, pulling") 53 | rc, err := node.Engine.ImagePull(ctx, image, false) 54 | defer utils.EnsureReaderClosed(ctx, rc) 55 | if err != nil { 56 | logger.Errorf(ctx, err, "Error during pulling image %s", image) 57 | return err 58 | } 59 | logger.Infof(ctx, "Done pulling image %s", image) 60 | return nil 61 | } 62 | -------------------------------------------------------------------------------- /cluster/calcium/remap_test.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | enginemocks "github.com/projecteru2/core/engine/mocks" 8 | lockmocks "github.com/projecteru2/core/lock/mocks" 9 | "github.com/projecteru2/core/log" 10 | resourcemocks "github.com/projecteru2/core/resource/mocks" 11 | resourcetypes "github.com/projecteru2/core/resource/types" 12 | storemocks "github.com/projecteru2/core/store/mocks" 13 | "github.com/projecteru2/core/types" 14 | "github.com/stretchr/testify/assert" 15 | "github.com/stretchr/testify/mock" 16 | ) 17 | 18 | func TestRemapResource(t *testing.T) { 19 | c := NewTestCluster() 20 | store := c.store.(*storemocks.Store) 21 | rmgr := c.rmgr.(*resourcemocks.Manager) 22 | rmgr.On("GetNodeResourceInfo", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( 23 | resourcetypes.Resources{"test": {"abc": 123}}, 24 | resourcetypes.Resources{"test": {"abc": 123}}, 25 | []string{types.ErrMockError.Error()}, 26 | nil) 27 | rmgr.On("Remap", mock.Anything, mock.Anything, mock.Anything).Return( 28 | map[string]resourcetypes.Resources{}, 29 | nil, 30 | ) 31 | engine := &enginemocks.API{} 32 | node := &types.Node{Engine: engine} 33 | 34 | workload := &types.Workload{ 35 | Resources: resourcetypes.Resources{}, 36 | } 37 | store.On("ListNodeWorkloads", mock.Anything, mock.Anything, mock.Anything).Return([]*types.Workload{workload}, nil) 38 | _, err := c.doRemapResource(context.Background(), node) 39 | assert.Nil(t, err) 40 | 41 | store.On("GetNode", mock.Anything, mock.Anything).Return(node, nil) 42 | lock := &lockmocks.DistributedLock{} 43 | lock.On("Lock", mock.Anything).Return(context.Background(), nil) 44 | lock.On("Unlock", mock.Anything).Return(nil) 45 | store.On("CreateLock", mock.Anything, mock.Anything).Return(lock, nil) 46 | c.RemapResourceAndLog(context.Background(), log.WithField("test", "zc"), node) 47 | } 48 | -------------------------------------------------------------------------------- /cluster/calcium/pod.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/projecteru2/core/log" 7 | "github.com/projecteru2/core/types" 8 | ) 9 | 10 | // AddPod add pod 11 | func (c *Calcium) AddPod(ctx context.Context, podname, desc string) (*types.Pod, error) { 12 | logger := log.WithFunc("calcium.AddPod").WithField("podname", podname) 13 | if podname == "" { 14 | logger.Error(ctx, types.ErrEmptyPodName) 15 | return nil, types.ErrEmptyPodName 16 | } 17 | pod, err := c.store.AddPod(ctx, podname, desc) 18 | logger.Error(ctx, err) 19 | return pod, err 20 | } 21 | 22 | // RemovePod remove pod 23 | func (c *Calcium) RemovePod(ctx context.Context, podname string) error { 24 | logger := log.WithFunc("calcium.RemovePod").WithField("podname", podname) 25 | if podname == "" { 26 | logger.Error(ctx, types.ErrEmptyPodName) 27 | return types.ErrEmptyPodName 28 | } 29 | 30 | nodeFilter := &types.NodeFilter{ 31 | Podname: podname, 32 | All: true, 33 | } 34 | return c.withNodesPodLocked(ctx, nodeFilter, func(ctx context.Context, _ map[string]*types.Node) error { 35 | // TODO dissociate workload to node 36 | // TODO should remove node first 37 | err := c.store.RemovePod(ctx, podname) 38 | logger.Error(ctx, err) 39 | return err 40 | }) 41 | } 42 | 43 | // GetPod get one pod 44 | func (c *Calcium) GetPod(ctx context.Context, podname string) (*types.Pod, error) { 45 | logger := log.WithFunc("calcium.GetPod").WithField("podname", podname) 46 | if podname == "" { 47 | logger.Error(ctx, types.ErrEmptyPodName) 48 | return nil, types.ErrEmptyPodName 49 | } 50 | pod, err := c.store.GetPod(ctx, podname) 51 | logger.Error(ctx, err) 52 | return pod, err 53 | } 54 | 55 | // ListPods show pods 56 | func (c *Calcium) ListPods(ctx context.Context) ([]*types.Pod, error) { 57 | pods, err := c.store.GetAllPods(ctx) 58 | log.WithFunc("calcium.ListPods").Error(ctx, err) 59 | return pods, err 60 | } 61 | -------------------------------------------------------------------------------- /cluster/calcium/raw_engine_test.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | enginemocks "github.com/projecteru2/core/engine/mocks" 8 | enginetypes "github.com/projecteru2/core/engine/types" 9 | lockmocks "github.com/projecteru2/core/lock/mocks" 10 | storemocks "github.com/projecteru2/core/store/mocks" 11 | "github.com/projecteru2/core/types" 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/mock" 14 | ) 15 | 16 | func TestRawEngine(t *testing.T) { 17 | c := NewTestCluster() 18 | ctx := context.Background() 19 | store := c.store.(*storemocks.Store) 20 | lock := &lockmocks.DistributedLock{} 21 | lock.On("Lock", mock.Anything).Return(ctx, nil) 22 | lock.On("Unlock", mock.Anything).Return(nil) 23 | store.On("CreateLock", mock.Anything, mock.Anything).Return(lock, nil) 24 | workload := &types.Workload{ 25 | ID: "id1", 26 | Privileged: true, 27 | } 28 | engine := &enginemocks.API{} 29 | workload.Engine = engine 30 | store.On("GetWorkloads", mock.Anything, mock.Anything).Return([]*types.Workload{workload}, nil) 31 | engine.On("RawEngine", mock.Anything, mock.Anything).Return(&enginetypes.RawEngineResult{}, nil).Once() 32 | _, err := c.RawEngine(ctx, &types.RawEngineOptions{ID: "id1", Op: "xxxx"}) 33 | assert.NoError(t, err) 34 | } 35 | 36 | func TestRawEngineIgnoreLock(t *testing.T) { 37 | c := NewTestCluster() 38 | ctx := context.Background() 39 | store := c.store.(*storemocks.Store) 40 | workload := &types.Workload{ 41 | ID: "id1", 42 | Privileged: true, 43 | } 44 | engine := &enginemocks.API{} 45 | workload.Engine = engine 46 | store.On("GetWorkload", mock.Anything, mock.Anything).Return(workload, nil) 47 | engine.On("RawEngine", mock.Anything, mock.Anything).Return(&enginetypes.RawEngineResult{}, nil).Once() 48 | _, err := c.RawEngine(ctx, &types.RawEngineOptions{ID: "id1", Op: "xxxx", IgnoreLock: true}) 49 | assert.NoError(t, err) 50 | } 51 | -------------------------------------------------------------------------------- /cluster/calcium/capacity.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/projecteru2/core/log" 7 | "github.com/projecteru2/core/strategy" 8 | "github.com/projecteru2/core/types" 9 | "github.com/sanity-io/litter" 10 | "golang.org/x/exp/maps" 11 | 12 | "github.com/cockroachdb/errors" 13 | plugintypes "github.com/projecteru2/core/resource/plugins/types" 14 | ) 15 | 16 | // CalculateCapacity calculates capacity 17 | func (c *Calcium) CalculateCapacity(ctx context.Context, opts *types.DeployOptions) (*types.CapacityMessage, error) { 18 | logger := log.WithFunc("calcium.CalculateCapacity").WithField("opts", opts) 19 | logger.Infof(ctx, "Calculate capacity with options:\n%s", litter.Options{Compact: true}.Sdump(opts)) 20 | var err error 21 | msg := &types.CapacityMessage{ 22 | Total: 0, 23 | NodeCapacities: map[string]int{}, 24 | } 25 | 26 | return msg, c.withNodesPodLocked(ctx, opts.NodeFilter, func(ctx context.Context, nodeMap map[string]*types.Node) error { 27 | nodenames := maps.Keys(nodeMap) 28 | 29 | if opts.DeployStrategy != strategy.Dummy { 30 | if msg.NodeCapacities, err = c.doGetDeployStrategy(ctx, nodenames, opts); err != nil { 31 | logger.Error(ctx, err, "doGetDeployMap failed") 32 | return err 33 | } 34 | 35 | for _, capacity := range msg.NodeCapacities { 36 | msg.Total += capacity 37 | } 38 | return nil 39 | } 40 | 41 | var infos map[string]*plugintypes.NodeDeployCapacity 42 | infos, msg.Total, err = c.rmgr.GetNodesDeployCapacity(ctx, nodenames, opts.Resources) 43 | if err != nil { 44 | logger.Error(ctx, err, "failed to get nodes capacity") 45 | return err 46 | } 47 | if msg.Total <= 0 { 48 | return errors.Wrap(types.ErrInsufficientResource, "no node meets all the resource requirements at the same time") 49 | } 50 | for node, info := range infos { 51 | msg.NodeCapacities[node] = info.Capacity 52 | } 53 | return nil 54 | }) 55 | } 56 | -------------------------------------------------------------------------------- /make-release: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ROOT="`pwd`/build" 4 | mkdir -p $ROOT 5 | 6 | after_install_script="$ROOT/after_install.sh" 7 | RPMROOT="$ROOT/rpmbuild" 8 | DEBROOT="$ROOT/debbuild" 9 | 10 | cat > $after_install_script << EOF 11 | systemctl daemon-reload 12 | EOF 13 | 14 | VERSION=$(cat VERSION) 15 | 16 | BIN="$RPMROOT/usr/bin" 17 | CONF="$RPMROOT/etc/eru" 18 | SERVICE="$RPMROOT/usr/lib/systemd/system" 19 | 20 | mkdir -p $BIN 21 | mkdir -p $CONF 22 | mkdir -p $SERVICE 23 | 24 | # calc iteration number 25 | ITRATION_FILE=.${VERSION}.itr 26 | if [ ! -f $ITRATION_FILE ]; then 27 | echo 0 > $ITRATION_FILE 28 | fi 29 | ITRATION_NUMBER=$(cat $ITRATION_FILE) 30 | if [ $ITRATION_NUMBER -lt 0 ]; then 31 | ITRATION_NUMBER=0 32 | fi 33 | # increase every calling 34 | ITRATION_NUMBER=$(expr $ITRATION_NUMBER + 1) 35 | echo $ITRATION_NUMBER > $ITRATION_FILE 36 | 37 | cp eru-core $BIN 38 | cp core.yaml.sample $CONF 39 | cp eru-core.service $SERVICE 40 | 41 | echo $VERSION rpm build begin 42 | 43 | fpm -f -s dir -t rpm -n eru-core --epoch 0 -v $VERSION --iteration ${ITRATION_NUMBER}.el7 -C $RPMROOT -p $PWD --verbose --no-rpm-auto-add-directories --category 'Development/App' --description 'eru core' --after-install $after_install_script --url 'https://github.com/projecteru2/core' --license 'MIT' -a all --no-rpm-sign usr etc 44 | 45 | BIN="$DEBROOT/usr/bin" 46 | CONF="$DEBROOT/etc/eru" 47 | SERVICE="$DEBROOT/lib/systemd/system" 48 | 49 | mkdir -p $BIN 50 | mkdir -p $CONF 51 | mkdir -p $SERVICE 52 | 53 | cp eru-core $BIN 54 | cp core.yaml.sample $CONF 55 | cp eru-core.service $SERVICE 56 | 57 | echo $VERSION deb build begin 58 | 59 | fpm -f -s dir -t deb -n eru-core -v $VERSION --iteration ${ITRATION_NUMBER}.noarch -C $DEBROOT -p $PWD --verbose --category 'Development/App' --description 'eru core' --url 'https://github.com/projecteru2/core' --license 'MIT' --after-install $after_install_script --deb-no-default-config-files usr etc lib 60 | 61 | rm -rf $ROOT 62 | -------------------------------------------------------------------------------- /resource/plugins/cpumem/cpumem_test.go: -------------------------------------------------------------------------------- 1 | package cpumem 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | 8 | enginetypes "github.com/projecteru2/core/engine/types" 9 | plugintypes "github.com/projecteru2/core/resource/plugins/types" 10 | coretypes "github.com/projecteru2/core/types" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func TestName(t *testing.T) { 15 | cm := initCPUMEM(context.Background(), t) 16 | assert.Equal(t, cm.name, cm.Name()) 17 | } 18 | 19 | func initCPUMEM(ctx context.Context, t *testing.T) *Plugin { 20 | config := coretypes.Config{ 21 | Etcd: coretypes.EtcdConfig{ 22 | Prefix: "/cpumem", 23 | }, 24 | Scheduler: coretypes.SchedulerConfig{ 25 | MaxShare: -1, 26 | ShareBase: 100, 27 | }, 28 | } 29 | 30 | cm, err := NewPlugin(ctx, config, t) 31 | assert.NoError(t, err) 32 | return cm 33 | } 34 | 35 | func generateNodes( 36 | ctx context.Context, t *testing.T, cm *Plugin, 37 | nums int, cores int, memory int64, shares, index int, 38 | ) []string { 39 | reqs := generateNodeResourceRequests(t, nums, cores, memory, shares, index) 40 | info := &enginetypes.Info{NCPU: 8, MemTotal: 2048} 41 | names := []string{} 42 | for name, req := range reqs { 43 | _, err := cm.AddNode(ctx, name, req, info) 44 | assert.NoError(t, err) 45 | names = append(names, name) 46 | } 47 | t.Cleanup(func() { 48 | for name := range reqs { 49 | cm.RemoveNode(ctx, name) 50 | } 51 | }) 52 | return names 53 | } 54 | 55 | func generateNodeResourceRequests(t *testing.T, nums int, cores int, memory int64, shares, index int) map[string]plugintypes.NodeResourceRequest { 56 | infos := map[string]plugintypes.NodeResourceRequest{} 57 | for i := index; i < index+nums; i++ { 58 | info := plugintypes.NodeResourceRequest{ 59 | "cpu": cores, 60 | "share": shares, 61 | "memory": fmt.Sprintf("%v", memory), 62 | } 63 | infos[fmt.Sprintf("test%v", i)] = info 64 | } 65 | return infos 66 | } 67 | -------------------------------------------------------------------------------- /store/redis/service_test.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sort" 7 | "time" 8 | ) 9 | 10 | func (s *RediaronTestSuite) TestRegisterServiceWithDeregister() { 11 | m := s.rediaron 12 | ctx := context.Background() 13 | svc := "svc" 14 | path := fmt.Sprintf(serviceStatusKey, svc) 15 | _, deregister, err := m.RegisterService(ctx, svc, time.Minute) 16 | s.NoError(err) 17 | 18 | v, err := m.GetOne(ctx, path) 19 | s.NoError(err) 20 | s.Equal(ephemeralValue, v) 21 | 22 | deregister() 23 | //time.Sleep(time.Second) 24 | v, err = m.GetOne(ctx, path) 25 | s.Error(err) 26 | s.Empty(v) 27 | } 28 | 29 | func (s *RediaronTestSuite) TestServiceStatusStream() { 30 | m := s.rediaron 31 | ctx, cancel := context.WithCancel(context.Background()) 32 | 33 | go func() { 34 | time.Sleep(3 * time.Second) 35 | cancel() 36 | }() 37 | 38 | _, unregisterService1, err := m.RegisterService(ctx, "127.0.0.1:5001", time.Second) 39 | s.NoError(err) 40 | 41 | ch, err := m.ServiceStatusStream(ctx) 42 | s.NoError(err) 43 | 44 | s.Equal(<-ch, []string{"127.0.0.1:5001"}) 45 | 46 | // register and triggers set manually 47 | _, _, err = m.RegisterService(ctx, "127.0.0.1:5002", time.Second) 48 | s.NoError(err) 49 | time.Sleep(500 * time.Millisecond) 50 | triggerMockedKeyspaceNotification(s.rediaron.cli, fmt.Sprintf(serviceStatusKey, "127.0.0.1:5002"), actionSet) 51 | 52 | endpoints := <-ch 53 | sort.Strings(endpoints) 54 | s.Equal(endpoints, []string{"127.0.0.1:5001", "127.0.0.1:5002"}) 55 | 56 | // unregister and triggers del manually 57 | _, _, err = m.RegisterService(ctx, "127.0.0.1:5002", time.Second) 58 | unregisterService1() 59 | time.Sleep(500 * time.Millisecond) 60 | triggerMockedKeyspaceNotification(s.rediaron.cli, fmt.Sprintf(serviceStatusKey, "127.0.0.1:5001"), actionDel) 61 | 62 | // trigger fastforward to clean the value 63 | s.rediserver.FastForward(time.Second) 64 | s.Equal(<-ch, []string{"127.0.0.1:5002"}) 65 | } 66 | -------------------------------------------------------------------------------- /resource/types/resource_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestRawParams(t *testing.T) { 10 | var r RawParams 11 | 12 | r = RawParams{ 13 | "cde": 1, 14 | "bef": []interface{}{1, 2, 3, "1"}, 15 | "efg": []string{}, 16 | } 17 | assert.Equal(t, r.Float64("abc"), 0.0) 18 | assert.Equal(t, r.Int64("abc"), int64(0)) 19 | assert.Equal(t, r.String("abc"), "") 20 | assert.Equal(t, r.String("cde"), "") 21 | assert.Len(t, r.StringSlice("bef"), 1) 22 | assert.Nil(t, r.OneOfStringSlice("efg")) 23 | assert.Nil(t, r.RawParams("fgd")) 24 | 25 | r = RawParams{ 26 | "int64": 1, 27 | "str-int": "1", 28 | "float-int": 1.999999999999999999999, 29 | "float64": 1.999999999999999999999, 30 | "string": "string", 31 | "string-slice": []string{"string", "string"}, 32 | "bool": nil, 33 | "raw-params": map[string]interface{}{ 34 | "int64": 1, 35 | "str-int": "1", 36 | "float-int": 1.999999999999999999999, 37 | "float64": 1.999999999999999999999, 38 | "string": "string", 39 | "string-slice": []string{"string", "string"}, 40 | "bool": nil, 41 | }, 42 | "slice-raw-params": []map[string]interface{}{ 43 | {"int": 1}, 44 | {"float": 1}, 45 | }, 46 | } 47 | 48 | assert.Equal(t, r.Int64("int64"), int64(1)) 49 | assert.Equal(t, r.Int64("str-int"), int64(1)) 50 | assert.Equal(t, r.Int64("float-int"), int64(2)) 51 | assert.Equal(t, r.Float64("float64"), 1.999999999999999999999) 52 | assert.Equal(t, r.String("string"), "string") 53 | assert.Equal(t, r.StringSlice("string-slice"), []string{"string", "string"}) 54 | assert.Equal(t, r.OneOfStringSlice("?", "string-slice"), []string{"string", "string"}) 55 | assert.Equal(t, r.Bool("bool"), true) 56 | assert.Equal(t, r.RawParams("raw-params")["int64"], 1) 57 | assert.Equal(t, r.SliceRawParams("slice-raw-params")[0]["int"], 1) 58 | assert.Equal(t, r.IsSet("?"), false) 59 | } 60 | -------------------------------------------------------------------------------- /store/etcdv3/meta/ephemeral.go: -------------------------------------------------------------------------------- 1 | package meta 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "time" 7 | 8 | "github.com/projecteru2/core/log" 9 | "github.com/projecteru2/core/types" 10 | 11 | "github.com/cockroachdb/errors" 12 | clientv3 "go.etcd.io/etcd/client/v3" 13 | ) 14 | 15 | // StartEphemeral starts an empheral kv pair. 16 | func (e *ETCD) StartEphemeral(ctx context.Context, path string, heartbeat time.Duration) (<-chan struct{}, func(), error) { 17 | lease, err := e.cliv3.Grant(ctx, int64(heartbeat/time.Second)) 18 | if err != nil { 19 | return nil, nil, err 20 | } 21 | 22 | switch tx, err := e.cliv3.Txn(ctx). 23 | If(clientv3.Compare(clientv3.Version(path), "=", 0)). 24 | Then(clientv3.OpPut(path, "", clientv3.WithLease(lease.ID))). 25 | Commit(); { 26 | case err != nil: 27 | return nil, nil, err 28 | case !tx.Succeeded: 29 | return nil, nil, errors.Wrap(types.ErrKeyExists, path) 30 | } 31 | 32 | ctx, cancel := context.WithCancel(ctx) 33 | expiry := make(chan struct{}) 34 | logger := log.WithFunc("store.etcdv3.meta.StartEphemeral") 35 | 36 | var wg sync.WaitGroup 37 | wg.Add(1) 38 | go func() { 39 | defer wg.Done() 40 | defer close(expiry) 41 | 42 | tick := time.NewTicker(heartbeat / 3) 43 | defer tick.Stop() 44 | 45 | // Revokes the lease. 46 | defer func() { 47 | // It shouldn't be inheriting from the ctx. 48 | ctx, cancel := context.WithTimeout(context.TODO(), time.Minute) 49 | defer cancel() 50 | if _, err := e.cliv3.Revoke(ctx, lease.ID); err != nil { 51 | logger.Errorf(ctx, err, "revoke %d with %s failed", lease.ID, path) 52 | } 53 | }() 54 | 55 | for { 56 | select { 57 | case <-tick.C: 58 | if _, err := e.cliv3.KeepAliveOnce(ctx, lease.ID); err != nil { 59 | logger.Errorf(ctx, err, "keepalive %d with %s failed", lease.ID, path) 60 | return 61 | } 62 | case <-ctx.Done(): 63 | return 64 | } 65 | } 66 | }() 67 | 68 | return expiry, func() { 69 | cancel() 70 | wg.Wait() 71 | }, nil 72 | } 73 | -------------------------------------------------------------------------------- /store/etcdv3/meta/meta.go: -------------------------------------------------------------------------------- 1 | package meta 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/projecteru2/core/lock" 8 | 9 | "go.etcd.io/etcd/api/v3/mvccpb" 10 | clientv3 "go.etcd.io/etcd/client/v3" 11 | ) 12 | 13 | // KV . 14 | type KV interface { 15 | Grant(ctx context.Context, ttl int64) (*clientv3.LeaseGrantResponse, error) 16 | BindStatus(ctx context.Context, entityKey, statusKey, statusValue string, ttl int64) error 17 | 18 | Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) 19 | GetOne(ctx context.Context, key string, opts ...clientv3.OpOption) (*mvccpb.KeyValue, error) 20 | GetMulti(ctx context.Context, keys []string, opts ...clientv3.OpOption) (kvs []*mvccpb.KeyValue, err error) 21 | Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan 22 | 23 | Create(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) 24 | Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) 25 | Update(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) 26 | Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) 27 | 28 | BatchCreateAndDecr(ctx context.Context, data map[string]string, decrKey string) error 29 | 30 | BatchCreate(ctx context.Context, data map[string]string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) 31 | BatchUpdate(ctx context.Context, data map[string]string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) 32 | BatchDelete(ctx context.Context, keys []string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) 33 | BatchPut(ctx context.Context, data map[string]string, opts ...clientv3.OpOption) (*clientv3.TxnResponse, error) 34 | 35 | StartEphemeral(ctx context.Context, path string, heartbeat time.Duration) (<-chan struct{}, func(), error) 36 | CreateLock(key string, ttl time.Duration) (lock.DistributedLock, error) 37 | } 38 | -------------------------------------------------------------------------------- /resource/manager.go: -------------------------------------------------------------------------------- 1 | package resource 2 | 3 | import ( 4 | "context" 5 | 6 | enginetypes "github.com/projecteru2/core/engine/types" 7 | plugintypes "github.com/projecteru2/core/resource/plugins/types" 8 | resourcetypes "github.com/projecteru2/core/resource/types" 9 | "github.com/projecteru2/core/types" 10 | ) 11 | 12 | // Manager indicate manages 13 | // coretypes --> manager to rawparams --> plugins types 14 | type Manager interface { 15 | AddNode(context.Context, string, resourcetypes.Resources, *enginetypes.Info) (resourcetypes.Resources, error) 16 | RemoveNode(context.Context, string) error 17 | GetNodesDeployCapacity(context.Context, []string, resourcetypes.Resources) (map[string]*plugintypes.NodeDeployCapacity, int, error) 18 | SetNodeResourceCapacity(context.Context, string, resourcetypes.Resources, resourcetypes.Resources, bool, bool) (resourcetypes.Resources, resourcetypes.Resources, error) 19 | SetNodeResourceUsage(context.Context, string, resourcetypes.Resources, resourcetypes.Resources, []resourcetypes.Resources, bool, bool) (resourcetypes.Resources, resourcetypes.Resources, error) 20 | GetNodeResourceInfo(context.Context, string, []*types.Workload, bool) (resourcetypes.Resources, resourcetypes.Resources, []string, error) 21 | GetMostIdleNode(context.Context, []string) (string, error) 22 | 23 | Alloc(context.Context, string, int, resourcetypes.Resources) ([]resourcetypes.Resources, []resourcetypes.Resources, error) 24 | RollbackAlloc(context.Context, string, []resourcetypes.Resources) error 25 | Realloc(context.Context, string, resourcetypes.Resources, resourcetypes.Resources) (resourcetypes.Resources, resourcetypes.Resources, resourcetypes.Resources, error) 26 | RollbackRealloc(context.Context, string, resourcetypes.Resources) error 27 | Remap(context.Context, string, []*types.Workload) (map[string]resourcetypes.Resources, error) 28 | 29 | GetNodeMetrics(context.Context, *types.Node) ([]*plugintypes.Metrics, error) 30 | GetMetricsDescription(context.Context) ([]*plugintypes.MetricsDescription, error) 31 | } 32 | -------------------------------------------------------------------------------- /store/redis/pod.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | 8 | "github.com/cockroachdb/errors" 9 | "github.com/projecteru2/core/types" 10 | ) 11 | 12 | // AddPod adds a pod to core 13 | func (r *Rediaron) AddPod(ctx context.Context, name, desc string) (*types.Pod, error) { 14 | key := fmt.Sprintf(podInfoKey, name) 15 | pod := &types.Pod{Name: name, Desc: desc} 16 | 17 | bytes, err := json.Marshal(pod) 18 | if err != nil { 19 | return nil, err 20 | } 21 | err = r.BatchCreate(ctx, map[string]string{key: string(bytes)}) 22 | return pod, err 23 | } 24 | 25 | // RemovePod removes a pod by name 26 | func (r *Rediaron) RemovePod(ctx context.Context, podname string) error { 27 | key := fmt.Sprintf(podInfoKey, podname) 28 | 29 | ns, err := r.GetNodesByPod(ctx, &types.NodeFilter{Podname: podname, All: true}) 30 | if err != nil { 31 | return err 32 | } 33 | 34 | if l := len(ns); l != 0 { 35 | return errors.Wrapf(types.ErrPodHasNodes, "pod %s still has %d nodes, delete them first", podname, l) 36 | } 37 | 38 | _, err = r.cli.Del(ctx, key).Result() 39 | return err 40 | } 41 | 42 | // GetPod gets a pod by name 43 | func (r *Rediaron) GetPod(ctx context.Context, name string) (*types.Pod, error) { 44 | key := fmt.Sprintf(podInfoKey, name) 45 | 46 | data, err := r.cli.Get(ctx, key).Result() 47 | if err != nil { 48 | return nil, err 49 | } 50 | 51 | pod := &types.Pod{} 52 | if err = json.Unmarshal([]byte(data), pod); err != nil { 53 | return nil, err 54 | } 55 | return pod, err 56 | } 57 | 58 | // GetAllPods list all pods in core 59 | func (r *Rediaron) GetAllPods(ctx context.Context) ([]*types.Pod, error) { 60 | data, err := r.getByKeyPattern(ctx, fmt.Sprintf(podInfoKey, "*"), 0) 61 | if err != nil { 62 | return nil, err 63 | } 64 | 65 | pods := []*types.Pod{} 66 | for _, value := range data { 67 | pod := &types.Pod{} 68 | if err := json.Unmarshal([]byte(value), pod); err != nil { 69 | return nil, err 70 | } 71 | pods = append(pods, pod) 72 | } 73 | return pods, nil 74 | } 75 | -------------------------------------------------------------------------------- /source/mocks/Source.go: -------------------------------------------------------------------------------- 1 | // Code generated by mockery v2.31.4. DO NOT EDIT. 2 | 3 | package mocks 4 | 5 | import ( 6 | context "context" 7 | 8 | mock "github.com/stretchr/testify/mock" 9 | ) 10 | 11 | // Source is an autogenerated mock type for the Source type 12 | type Source struct { 13 | mock.Mock 14 | } 15 | 16 | // Artifact provides a mock function with given fields: ctx, artifact, path 17 | func (_m *Source) Artifact(ctx context.Context, artifact string, path string) error { 18 | ret := _m.Called(ctx, artifact, path) 19 | 20 | var r0 error 21 | if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { 22 | r0 = rf(ctx, artifact, path) 23 | } else { 24 | r0 = ret.Error(0) 25 | } 26 | 27 | return r0 28 | } 29 | 30 | // Security provides a mock function with given fields: path 31 | func (_m *Source) Security(path string) error { 32 | ret := _m.Called(path) 33 | 34 | var r0 error 35 | if rf, ok := ret.Get(0).(func(string) error); ok { 36 | r0 = rf(path) 37 | } else { 38 | r0 = ret.Error(0) 39 | } 40 | 41 | return r0 42 | } 43 | 44 | // SourceCode provides a mock function with given fields: ctx, repository, path, revision, submodule 45 | func (_m *Source) SourceCode(ctx context.Context, repository string, path string, revision string, submodule bool) error { 46 | ret := _m.Called(ctx, repository, path, revision, submodule) 47 | 48 | var r0 error 49 | if rf, ok := ret.Get(0).(func(context.Context, string, string, string, bool) error); ok { 50 | r0 = rf(ctx, repository, path, revision, submodule) 51 | } else { 52 | r0 = ret.Error(0) 53 | } 54 | 55 | return r0 56 | } 57 | 58 | // NewSource creates a new instance of Source. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. 59 | // The first argument is typically a *testing.T value. 60 | func NewSource(t interface { 61 | mock.TestingT 62 | Cleanup(func()) 63 | }) *Source { 64 | mock := &Source{} 65 | mock.Mock.Test(t) 66 | 67 | t.Cleanup(func() { mock.AssertExpectations(t) }) 68 | 69 | return mock 70 | } 71 | -------------------------------------------------------------------------------- /wal/mocks/WAL.go: -------------------------------------------------------------------------------- 1 | // Code generated by mockery v2.31.4. DO NOT EDIT. 2 | 3 | package mocks 4 | 5 | import ( 6 | context "context" 7 | 8 | wal "github.com/projecteru2/core/wal" 9 | mock "github.com/stretchr/testify/mock" 10 | ) 11 | 12 | // WAL is an autogenerated mock type for the WAL type 13 | type WAL struct { 14 | mock.Mock 15 | } 16 | 17 | // Close provides a mock function with given fields: 18 | func (_m *WAL) Close() error { 19 | ret := _m.Called() 20 | 21 | var r0 error 22 | if rf, ok := ret.Get(0).(func() error); ok { 23 | r0 = rf() 24 | } else { 25 | r0 = ret.Error(0) 26 | } 27 | 28 | return r0 29 | } 30 | 31 | // Log provides a mock function with given fields: _a0, _a1 32 | func (_m *WAL) Log(_a0 string, _a1 interface{}) (wal.Commit, error) { 33 | ret := _m.Called(_a0, _a1) 34 | 35 | var r0 wal.Commit 36 | var r1 error 37 | if rf, ok := ret.Get(0).(func(string, interface{}) (wal.Commit, error)); ok { 38 | return rf(_a0, _a1) 39 | } 40 | if rf, ok := ret.Get(0).(func(string, interface{}) wal.Commit); ok { 41 | r0 = rf(_a0, _a1) 42 | } else { 43 | if ret.Get(0) != nil { 44 | r0 = ret.Get(0).(wal.Commit) 45 | } 46 | } 47 | 48 | if rf, ok := ret.Get(1).(func(string, interface{}) error); ok { 49 | r1 = rf(_a0, _a1) 50 | } else { 51 | r1 = ret.Error(1) 52 | } 53 | 54 | return r0, r1 55 | } 56 | 57 | // Recover provides a mock function with given fields: _a0 58 | func (_m *WAL) Recover(_a0 context.Context) { 59 | _m.Called(_a0) 60 | } 61 | 62 | // Register provides a mock function with given fields: _a0 63 | func (_m *WAL) Register(_a0 wal.EventHandler) { 64 | _m.Called(_a0) 65 | } 66 | 67 | // NewWAL creates a new instance of WAL. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. 68 | // The first argument is typically a *testing.T value. 69 | func NewWAL(t interface { 70 | mock.TestingT 71 | Cleanup(func()) 72 | }) *WAL { 73 | mock := &WAL{} 74 | mock.Mock.Test(t) 75 | 76 | t.Cleanup(func() { mock.AssertExpectations(t) }) 77 | 78 | return mock 79 | } 80 | -------------------------------------------------------------------------------- /store/etcdv3/processing.go: -------------------------------------------------------------------------------- 1 | package etcdv3 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "path/filepath" 7 | "strconv" 8 | "strings" 9 | 10 | "github.com/projecteru2/core/log" 11 | "github.com/projecteru2/core/types" 12 | 13 | clientv3 "go.etcd.io/etcd/client/v3" 14 | ) 15 | 16 | func (m *Mercury) getProcessingKey(processing *types.Processing) string { 17 | return filepath.Join(workloadProcessingPrefix, processing.Appname, processing.Entryname, processing.Nodename, processing.Ident) 18 | } 19 | 20 | // CreateProcessing save processing status in etcd 21 | func (m *Mercury) CreateProcessing(ctx context.Context, processing *types.Processing, count int) error { 22 | _, err := m.Create(ctx, m.getProcessingKey(processing), fmt.Sprintf("%d", count)) 23 | return err 24 | } 25 | 26 | // DeleteProcessing delete processing status in etcd 27 | func (m *Mercury) DeleteProcessing(ctx context.Context, processing *types.Processing) error { 28 | _, err := m.Delete(ctx, m.getProcessingKey(processing)) 29 | return err 30 | } 31 | 32 | func (m *Mercury) doLoadProcessing(ctx context.Context, appname, entryname string) (map[string]int, error) { 33 | nodesCount := map[string]int{} 34 | // 显式地加 / 保证 prefix 一致性 35 | processingKey := filepath.Join(workloadProcessingPrefix, appname, entryname) + "/" 36 | resp, err := m.Get(ctx, processingKey, clientv3.WithPrefix()) 37 | if err != nil { 38 | return nil, err 39 | } 40 | if resp.Count == 0 { 41 | return nodesCount, nil 42 | } 43 | logger := log.WithFunc("store.etcdv3.doLoadProcessing") 44 | 45 | for _, ev := range resp.Kvs { 46 | key := string(ev.Key) 47 | parts := strings.Split(key, "/") 48 | nodename := parts[len(parts)-2] 49 | count, err := strconv.Atoi(string(ev.Value)) 50 | if err != nil { 51 | logger.Error(ctx, err, "Load processing status failed") 52 | continue 53 | } 54 | if _, ok := nodesCount[nodename]; !ok { 55 | nodesCount[nodename] = count 56 | continue 57 | } 58 | nodesCount[nodename] += count 59 | } 60 | logger.Debugf(ctx, "Processing result: %+v", nodesCount) 61 | return nodesCount, nil 62 | } 63 | -------------------------------------------------------------------------------- /store/redis/processing.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | "path/filepath" 6 | "strconv" 7 | "strings" 8 | 9 | "github.com/projecteru2/core/log" 10 | "github.com/projecteru2/core/types" 11 | ) 12 | 13 | func (r *Rediaron) getProcessingKey(processing *types.Processing) string { 14 | return filepath.Join(workloadProcessingPrefix, processing.Appname, processing.Entryname, processing.Nodename, processing.Ident) 15 | } 16 | 17 | // CreateProcessing save processing status in etcd 18 | func (r *Rediaron) CreateProcessing(ctx context.Context, processing *types.Processing, count int) error { 19 | processingKey := r.getProcessingKey(processing) 20 | return r.BatchCreate(ctx, map[string]string{processingKey: strconv.Itoa(count)}) 21 | } 22 | 23 | // DeleteProcessing delete processing status in etcd 24 | func (r *Rediaron) DeleteProcessing(ctx context.Context, processing *types.Processing) error { 25 | return r.BatchDelete(ctx, []string{r.getProcessingKey(processing)}) 26 | } 27 | 28 | // doLoadProcessing returns how many workloads are `processing` on each node 29 | func (r *Rediaron) doLoadProcessing(ctx context.Context, appname, entryname string) (map[string]int, error) { 30 | nodesCount := map[string]int{} 31 | // 显式地加 / 保证 prefix 一致性 32 | processingKey := filepath.Join(workloadProcessingPrefix, appname, entryname) + "/*" 33 | data, err := r.getByKeyPattern(ctx, processingKey, 0) 34 | if err != nil { 35 | return nil, err 36 | } 37 | 38 | if len(data) == 0 { 39 | return nodesCount, nil 40 | } 41 | logger := log.WithFunc("store.redis.doLoadProcessing") 42 | 43 | for k, v := range data { 44 | parts := strings.Split(k, "/") 45 | nodename := parts[len(parts)-2] 46 | count, err := strconv.Atoi(v) 47 | if err != nil { 48 | logger.Error(ctx, err, "Load processing status failed") 49 | continue 50 | } 51 | if _, ok := nodesCount[nodename]; !ok { 52 | nodesCount[nodename] = count 53 | continue 54 | } 55 | nodesCount[nodename] += count 56 | } 57 | 58 | logger.Debug(ctx, "Processing result: %+v", nodesCount) 59 | return nodesCount, nil 60 | } 61 | -------------------------------------------------------------------------------- /cluster/calcium/copy_test.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | enginemocks "github.com/projecteru2/core/engine/mocks" 8 | lockmocks "github.com/projecteru2/core/lock/mocks" 9 | storemocks "github.com/projecteru2/core/store/mocks" 10 | "github.com/projecteru2/core/types" 11 | 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/mock" 14 | ) 15 | 16 | func TestCopy(t *testing.T) { 17 | c := NewTestCluster() 18 | ctx := context.Background() 19 | 20 | // failed by target 21 | _, err := c.Copy(ctx, &types.CopyOptions{ 22 | Targets: map[string][]string{}, 23 | }) 24 | assert.Error(t, err) 25 | 26 | opts := &types.CopyOptions{ 27 | Targets: map[string][]string{ 28 | "cid": { 29 | "path1", 30 | "path2", 31 | }, 32 | }, 33 | } 34 | store := c.store.(*storemocks.Store) 35 | lock := &lockmocks.DistributedLock{} 36 | lock.On("Lock", mock.Anything).Return(ctx, nil) 37 | lock.On("Unlock", mock.Anything).Return(nil) 38 | store.On("CreateLock", mock.Anything, mock.Anything).Return(lock, nil) 39 | // failed by GetWorkload 40 | store.On("GetWorkload", mock.Anything, mock.Anything).Return(nil, types.ErrMockError).Once() 41 | ch, err := c.Copy(ctx, opts) 42 | assert.NoError(t, err) 43 | for r := range ch { 44 | assert.Error(t, r.Error) 45 | } 46 | workload := &types.Workload{ID: "cid"} 47 | engine := &enginemocks.API{} 48 | workload.Engine = engine 49 | store.On("GetWorkload", mock.Anything, mock.Anything).Return(workload, nil) 50 | // failed by VirtualizationCopyFrom 51 | engine.On("VirtualizationCopyFrom", mock.Anything, mock.Anything, mock.Anything).Return(nil, 0, 0, int64(0), types.ErrMockError).Twice() 52 | ch, err = c.Copy(ctx, opts) 53 | assert.NoError(t, err) 54 | for r := range ch { 55 | assert.Error(t, r.Error) 56 | } 57 | engine.On("VirtualizationCopyFrom", mock.Anything, mock.Anything, mock.Anything).Return([]byte("omg"), 0, 0, int64(0), nil) 58 | // success 59 | ch, err = c.Copy(ctx, opts) 60 | assert.NoError(t, err) 61 | for r := range ch { 62 | assert.NoError(t, r.Error) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /log/field.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/alphadose/haxmap" 7 | ) 8 | 9 | // Fields is a wrapper for zerolog.Entry 10 | // we need to insert some sentry captures here 11 | type Fields struct { 12 | kv *haxmap.Map[string, any] 13 | } 14 | 15 | // WithFunc is short for WithField 16 | func WithFunc(fname string) *Fields { 17 | return WithField("func", fname) 18 | } 19 | 20 | // WithField add kv into log entry 21 | func WithField(key string, value any) *Fields { 22 | r := haxmap.New[string, any]() 23 | r.Set(key, value) 24 | return &Fields{ 25 | kv: r, 26 | } 27 | } 28 | 29 | // WithField . 30 | func (f *Fields) WithField(key string, value any) *Fields { 31 | f.kv.Set(key, value) 32 | return f 33 | } 34 | 35 | // Fatalf forwards to sentry 36 | func (f Fields) Fatalf(ctx context.Context, err error, format string, args ...any) { 37 | fatalf(ctx, err, format, f.kv, args...) 38 | } 39 | 40 | // Warnf is Warnf 41 | func (f Fields) Warnf(ctx context.Context, format string, args ...any) { 42 | warnf(ctx, format, f.kv, args...) 43 | } 44 | 45 | // Warn is Warn 46 | func (f Fields) Warn(ctx context.Context, args ...any) { 47 | f.Warnf(ctx, "%+v", args...) 48 | } 49 | 50 | // Infof is Infof 51 | func (f Fields) Infof(ctx context.Context, format string, args ...any) { 52 | infof(ctx, format, f.kv, args...) 53 | } 54 | 55 | // Info is Info 56 | func (f Fields) Info(ctx context.Context, args ...any) { 57 | f.Infof(ctx, "%+v", args...) 58 | } 59 | 60 | // Debugf is Debugf 61 | func (f Fields) Debugf(ctx context.Context, format string, args ...any) { 62 | debugf(ctx, format, f.kv, args...) 63 | } 64 | 65 | // Debug is Debug 66 | func (f Fields) Debug(ctx context.Context, args ...any) { 67 | f.Debugf(ctx, "%+v", args...) 68 | } 69 | 70 | // Errorf forwards to sentry 71 | func (f Fields) Errorf(ctx context.Context, err error, format string, args ...any) { 72 | errorf(ctx, err, format, f.kv, args...) 73 | } 74 | 75 | // Error forwards to sentry 76 | func (f Fields) Error(ctx context.Context, err error, args ...any) { 77 | f.Errorf(ctx, err, "%+v", args...) 78 | } 79 | -------------------------------------------------------------------------------- /discovery/helium/helium_test.go: -------------------------------------------------------------------------------- 1 | package helium 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | storemocks "github.com/projecteru2/core/store/mocks" 9 | "github.com/projecteru2/core/types" 10 | 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/mock" 13 | ) 14 | 15 | func TestHelium(t *testing.T) { 16 | chAddr := make(chan []string) 17 | 18 | store := &storemocks.Store{} 19 | store.On("ServiceStatusStream", mock.Anything).Return(chAddr, nil) 20 | 21 | grpcConfig := types.GRPCConfig{ 22 | ServiceDiscoveryPushInterval: time.Duration(1) * time.Second, 23 | } 24 | service := New(context.TODO(), grpcConfig, store) 25 | ctx, cancel := context.WithCancel(context.Background()) 26 | defer cancel() 27 | uuid, chStatus := service.Subscribe(ctx) 28 | 29 | addresses1 := []string{ 30 | "10.0.0.1", 31 | "10.0.0.2", 32 | } 33 | addresses2 := []string{ 34 | "10.0.0.1", 35 | } 36 | 37 | go func() { 38 | chAddr <- addresses1 39 | chAddr <- addresses2 40 | }() 41 | 42 | status1 := <-chStatus 43 | status2 := <-chStatus 44 | assert.Equal(t, addresses1, status1.Addresses) 45 | assert.Equal(t, addresses2, status2.Addresses) 46 | assert.NotEqual(t, status1.Addresses, status2.Addresses) 47 | 48 | service.Unsubscribe(uuid) 49 | close(chAddr) 50 | } 51 | 52 | func TestPanic(t *testing.T) { 53 | chAddr := make(chan []string) 54 | 55 | store := &storemocks.Store{} 56 | store.On("ServiceStatusStream", mock.Anything).Return(chAddr, nil) 57 | 58 | grpcConfig := types.GRPCConfig{ 59 | ServiceDiscoveryPushInterval: time.Duration(1) * time.Second, 60 | } 61 | service := New(context.TODO(), grpcConfig, store) 62 | ctx, cancel := context.WithCancel(context.Background()) 63 | defer cancel() 64 | 65 | for i := 0; i < 1000; i++ { 66 | go func() { 67 | uuid, _ := service.Subscribe(ctx) 68 | time.Sleep(time.Second) 69 | service.Unsubscribe(uuid) 70 | //close(chStatus) 71 | }() 72 | } 73 | 74 | go func() { 75 | for i := 0; i < 1000; i++ { 76 | chAddr <- []string{"hhh", "hhh2"} 77 | } 78 | }() 79 | 80 | time.Sleep(5 * time.Second) 81 | } 82 | -------------------------------------------------------------------------------- /client/client.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/projecteru2/core/auth" 8 | "github.com/projecteru2/core/client/interceptor" 9 | _ "github.com/projecteru2/core/client/resolver/eru" // register grpc resolver: eru:// 10 | _ "github.com/projecteru2/core/client/resolver/static" // register grpc resolver: static:// 11 | pb "github.com/projecteru2/core/rpc/gen" 12 | "github.com/projecteru2/core/types" 13 | 14 | "google.golang.org/grpc" 15 | "google.golang.org/grpc/credentials/insecure" 16 | "google.golang.org/grpc/keepalive" 17 | ) 18 | 19 | // Client contain grpc conn 20 | type Client struct { 21 | addr string 22 | conn *grpc.ClientConn 23 | } 24 | 25 | // NewClient new a client 26 | func NewClient(ctx context.Context, addr string, authConfig types.AuthConfig) (*Client, error) { 27 | cc, err := dial(ctx, addr, authConfig) 28 | return &Client{ 29 | addr: addr, 30 | conn: cc, 31 | }, err 32 | } 33 | 34 | // GetConn return connection 35 | func (c *Client) GetConn() *grpc.ClientConn { 36 | return c.conn 37 | } 38 | 39 | // GetRPCClient return rpc client 40 | func (c *Client) GetRPCClient() pb.CoreRPCClient { 41 | return pb.NewCoreRPCClient(c.conn) 42 | } 43 | 44 | func dial(ctx context.Context, addr string, authConfig types.AuthConfig) (*grpc.ClientConn, error) { 45 | opts := []grpc.DialOption{ 46 | grpc.WithTransportCredentials(insecure.NewCredentials()), 47 | grpc.WithKeepaliveParams(keepalive.ClientParameters{Time: 6 * 60 * time.Second, Timeout: time.Second}), 48 | grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"round_robin"}`), // This sets the initial balancing policy, see https://github.com/grpc/grpc-go/blob/v1.40.x/examples/features/load_balancing/client/main.go 49 | grpc.WithUnaryInterceptor(interceptor.NewUnaryRetry(interceptor.RetryOptions{Max: 0})), 50 | grpc.WithStreamInterceptor(interceptor.NewStreamRetry(interceptor.RetryOptions{Max: 0})), 51 | } 52 | if authConfig.Username != "" { 53 | opts = append(opts, grpc.WithPerRPCCredentials(auth.NewCredential(authConfig))) 54 | } 55 | 56 | return grpc.DialContext(ctx, addr, opts...) 57 | } 58 | -------------------------------------------------------------------------------- /strategy/communism.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "container/heap" 5 | "context" 6 | 7 | "github.com/projecteru2/core/types" 8 | 9 | "github.com/cockroachdb/errors" 10 | ) 11 | 12 | type infoHeap struct { 13 | infos []Info 14 | limit int 15 | } 16 | 17 | func (h infoHeap) Len() int { 18 | return len(h.infos) 19 | } 20 | 21 | func (h infoHeap) Less(i, j int) bool { 22 | return h.infos[i].Count < h.infos[j].Count || (h.infos[i].Count == h.infos[j].Count && h.infos[i].Capacity > h.infos[j].Capacity) 23 | } 24 | 25 | func (h infoHeap) Swap(i, j int) { 26 | h.infos[i], h.infos[j] = h.infos[j], h.infos[i] 27 | } 28 | 29 | func (h *infoHeap) Push(x any) { 30 | info := x.(Info) 31 | if info.Capacity == 0 || (h.limit > 0 && info.Count >= h.limit) { 32 | return 33 | } 34 | h.infos = append(h.infos, info) 35 | } 36 | 37 | func (h *infoHeap) Pop() any { 38 | length := len(h.infos) 39 | x := h.infos[length-1] 40 | h.infos = h.infos[0 : length-1] 41 | return x 42 | } 43 | 44 | func newInfoHeap(infos []Info, limit int) heap.Interface { 45 | dup := infoHeap{ 46 | infos: []Info{}, 47 | limit: limit, 48 | } 49 | for _, info := range infos { 50 | if info.Capacity == 0 || (limit > 0 && info.Count >= limit) { 51 | continue 52 | } 53 | dup.infos = append(dup.infos, info) 54 | } 55 | return &dup 56 | } 57 | 58 | // CommunismPlan 吃我一记共产主义大锅饭 59 | // 部署完 N 个后全局尽可能平均 60 | func CommunismPlan(_ context.Context, infos []Info, need, total, limit int) (map[string]int, error) { 61 | if total < need { 62 | return nil, errors.Wrapf(types.ErrInsufficientResource, "need: %d, available: %d", need, total) 63 | } 64 | 65 | deploy := map[string]int{} 66 | iHeap := newInfoHeap(infos, limit) 67 | heap.Init(iHeap) 68 | for { 69 | if iHeap.Len() == 0 { 70 | return nil, errors.Wrapf(types.ErrInsufficientResource, "reached nodelimit, a node can host at most %d instances", limit) 71 | } 72 | info := heap.Pop(iHeap).(Info) 73 | deploy[info.Nodename]++ 74 | need-- 75 | if need == 0 { 76 | return deploy, nil 77 | } 78 | info.Count++ 79 | info.Capacity-- 80 | heap.Push(iHeap, info) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /cluster/calcium/workload.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | // All functions are just proxy to store, since I don't want store to be exported. 4 | // All these functions are meta data related. 5 | 6 | import ( 7 | "context" 8 | 9 | "github.com/projecteru2/core/log" 10 | "github.com/projecteru2/core/types" 11 | ) 12 | 13 | // GetWorkload get a workload 14 | func (c *Calcium) GetWorkload(ctx context.Context, ID string) (workload *types.Workload, err error) { 15 | logger := log.WithFunc("calcium.GetWorkload").WithField("ID", ID) 16 | if ID == "" { 17 | logger.Error(ctx, types.ErrEmptyWorkloadID) 18 | return workload, types.ErrEmptyWorkloadID 19 | } 20 | workload, err = c.store.GetWorkload(ctx, ID) 21 | logger.Error(ctx, err) 22 | return workload, err 23 | } 24 | 25 | // GetWorkloads get workloads 26 | func (c *Calcium) GetWorkloads(ctx context.Context, IDs []string) (workloads []*types.Workload, err error) { 27 | workloads, err = c.store.GetWorkloads(ctx, IDs) 28 | log.WithFunc("calcium.GetWorkloads").WithField("IDs", IDs).Error(ctx, err) 29 | return workloads, err 30 | } 31 | 32 | // ListWorkloads list workloads 33 | func (c *Calcium) ListWorkloads(ctx context.Context, opts *types.ListWorkloadsOptions) (workloads []*types.Workload, err error) { 34 | if workloads, err = c.store.ListWorkloads(ctx, opts.Appname, opts.Entrypoint, opts.Nodename, opts.Limit, opts.Labels); err != nil { 35 | log.WithFunc("calcium.ListWorkloads").WithField("opts", opts).Errorf(ctx, err, "Calcium.ListWorkloads] store list failed: %+v", err) 36 | } 37 | return workloads, err 38 | } 39 | 40 | // ListNodeWorkloads list workloads belong to one node 41 | func (c *Calcium) ListNodeWorkloads(ctx context.Context, nodename string, labels map[string]string) (workloads []*types.Workload, err error) { 42 | logger := log.WithFunc("calcium.ListNodeWorkloads").WithField("node", nodename).WithField("labels", labels) 43 | if nodename == "" { 44 | logger.Error(ctx, types.ErrEmptyNodeName) 45 | return workloads, types.ErrEmptyNodeName 46 | } 47 | workloads, err = c.store.ListNodeWorkloads(ctx, nodename, labels) 48 | logger.Error(ctx, err) 49 | return workloads, err 50 | } 51 | -------------------------------------------------------------------------------- /client/resolver/eru/resolver.go: -------------------------------------------------------------------------------- 1 | package eru 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | 7 | "github.com/projecteru2/core/client/servicediscovery" 8 | "github.com/projecteru2/core/log" 9 | "github.com/projecteru2/core/types" 10 | 11 | "google.golang.org/grpc/resolver" 12 | ) 13 | 14 | // Resolver for target eru://{addr} 15 | type Resolver struct { 16 | cc resolver.ClientConn 17 | cancel context.CancelFunc 18 | discovery servicediscovery.ServiceDiscovery 19 | } 20 | 21 | // New Resolver 22 | func New(cc resolver.ClientConn, endpoint string, authority string) *Resolver { 23 | var username, password string 24 | if authority != "" { 25 | parts := strings.Split(authority, ":") 26 | username, password = strings.TrimLeft(parts[0], "@"), parts[1] 27 | } 28 | authConfig := types.AuthConfig{Username: username, Password: password} 29 | r := &Resolver{ 30 | cc: cc, 31 | discovery: servicediscovery.New(endpoint, authConfig), 32 | } 33 | cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: endpoint}}}) //nolint 34 | go r.sync(context.TODO()) 35 | return r 36 | } 37 | 38 | // ResolveNow for interface 39 | func (r *Resolver) ResolveNow(_ resolver.ResolveNowOptions) {} 40 | 41 | // Close for interface 42 | func (r *Resolver) Close() { 43 | r.cancel() 44 | } 45 | 46 | func (r *Resolver) sync(ctx context.Context) { 47 | ctx, r.cancel = context.WithCancel(ctx) 48 | defer r.cancel() 49 | logger := log.WithFunc("resolver.sync") 50 | logger.Debug(ctx, "start sync service discovery") 51 | 52 | ch, err := r.discovery.Watch(ctx) 53 | if err != nil { 54 | logger.Error(ctx, err, "failed to watch service status") 55 | return 56 | } 57 | for { 58 | select { 59 | case <-ctx.Done(): 60 | logger.Error(ctx, ctx.Err(), "watch interrupted") 61 | return 62 | case endpoints, ok := <-ch: 63 | if !ok { 64 | logger.Info(ctx, nil, "watch closed") 65 | return 66 | } 67 | 68 | var addresses []resolver.Address 69 | for _, ep := range endpoints { 70 | addresses = append(addresses, resolver.Address{Addr: ep}) 71 | } 72 | r.cc.UpdateState(resolver.State{Addresses: addresses}) //nolint 73 | } 74 | } 75 | 76 | } 77 | -------------------------------------------------------------------------------- /engine/transform_test.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/mitchellh/mapstructure" 7 | resourcetypes "github.com/projecteru2/core/resource/types" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | type virtualizationResource struct { 12 | CPU map[string]int64 `json:"cpu_map" mapstructure:"cpu_map"` // for cpu binding 13 | Quota float64 `json:"cpu" mapstructure:"cpu"` // for cpu quota 14 | Memory int64 `json:"memory" mapstructure:"memory"` // for memory binding 15 | Storage int64 `json:"storage" mapstructure:"storage"` 16 | NUMANode string `json:"numa_node" mapstructure:"numa_node"` // numa node 17 | Volumes []string `json:"volumes" mapstructure:"volumes"` 18 | VolumePlan map[string]map[string]int64 `json:"volume_plan" mapstructure:"volume_plan"` // literal VolumePlan 19 | VolumeChanged bool `json:"volume_changed" mapstructure:"volume_changed"` // indicate whether new volumes contained in realloc request 20 | IOPSOptions map[string]string `json:"iops_options" mapstructure:"IOPS_options"` // format: {device_name: "read-IOPS:write-IOPS:read-bps:write-bps"} 21 | Remap bool `json:"remap" mapstructure:"remap"` 22 | } 23 | 24 | func TestMakeVirtualizationResource(t *testing.T) { 25 | engineParams := resourcetypes.Resources{ 26 | "cpumem": { 27 | "cpu_map": map[string]int64{"1": 100}, 28 | "cpu": 100.0, 29 | "memory": 10000, 30 | }, 31 | } 32 | 33 | dst := &virtualizationResource{} 34 | 35 | err := MakeVirtualizationResource(engineParams, dst, func(p resourcetypes.Resources, d *virtualizationResource) error { 36 | return mapstructure.Decode(p["cpumem"], d) 37 | }) 38 | assert.NoError(t, err) 39 | assert.Equal(t, dst.Quota, 100.0) 40 | assert.Len(t, dst.CPU, 1) 41 | err = MakeVirtualizationResource(engineParams, dst, func(p resourcetypes.Resources, d *virtualizationResource) error { 42 | return mapstructure.Decode(p["storage"], d) 43 | }) 44 | assert.NoError(t, err) 45 | } 46 | -------------------------------------------------------------------------------- /strategy/global.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "container/heap" 5 | "context" 6 | 7 | "github.com/cockroachdb/errors" 8 | "github.com/projecteru2/core/log" 9 | "github.com/projecteru2/core/types" 10 | ) 11 | 12 | type infoHeapForGlobalStrategy []Info 13 | 14 | // Len . 15 | func (r infoHeapForGlobalStrategy) Len() int { 16 | return len(r) 17 | } 18 | 19 | // Less . 20 | func (r infoHeapForGlobalStrategy) Less(i, j int) bool { 21 | return (r[i].Usage + r[i].Rate) < (r[j].Usage + r[j].Rate) 22 | } 23 | 24 | // Swap . 25 | func (r infoHeapForGlobalStrategy) Swap(i, j int) { 26 | r[i], r[j] = r[j], r[i] 27 | } 28 | 29 | // Push . 30 | func (r *infoHeapForGlobalStrategy) Push(x any) { 31 | *r = append(*r, x.(Info)) 32 | } 33 | 34 | // Pop . 35 | func (r *infoHeapForGlobalStrategy) Pop() any { 36 | old := *r 37 | n := len(old) 38 | x := old[n-1] 39 | *r = old[:n-1] 40 | return x 41 | } 42 | 43 | // GlobalPlan 基于全局资源配额 44 | // 尽量使得资源消耗平均 45 | func GlobalPlan(ctx context.Context, infos []Info, need, total, _ int) (map[string]int, error) { 46 | if total < need { 47 | return nil, errors.Wrapf(types.ErrInsufficientResource, "need: %d, available: %d", need, total) 48 | } 49 | strategyInfos := make([]Info, len(infos)) 50 | copy(strategyInfos, infos) 51 | deployMap := map[string]int{} 52 | 53 | infoHeap := &infoHeapForGlobalStrategy{} 54 | for _, info := range strategyInfos { 55 | if info.Capacity > 0 { 56 | infoHeap.Push(info) 57 | } 58 | } 59 | heap.Init(infoHeap) 60 | 61 | for i := 0; i < need; i++ { 62 | if infoHeap.Len() == 0 { 63 | return nil, errors.Wrapf(types.ErrInsufficientResource, "need: %d, available: %d", need, i) 64 | } 65 | infoWithMinUsage := heap.Pop(infoHeap).(Info) 66 | deployMap[infoWithMinUsage.Nodename]++ 67 | infoWithMinUsage.Usage += infoWithMinUsage.Rate 68 | infoWithMinUsage.Capacity-- 69 | 70 | if infoWithMinUsage.Capacity > 0 { 71 | heap.Push(infoHeap, infoWithMinUsage) 72 | } 73 | } 74 | 75 | // 这里 need 一定会为 0 出来,因为 volTotal 保证了一定大于 need 76 | // 这里并不需要再次排序了,理论上的排序是基于资源使用率得到的 Deploy 最终方案 77 | log.WithFunc("strategy.GlobalPlan").Debugf(ctx, "strategyInfos: %+v", strategyInfos) 78 | return deployMap, nil 79 | } 80 | -------------------------------------------------------------------------------- /store/redis/service.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | "time" 8 | 9 | "github.com/projecteru2/core/log" 10 | ) 11 | 12 | type endpoints map[string]struct{} 13 | 14 | func (e *endpoints) Add(endpoint string) (changed bool) { 15 | if _, ok := (*e)[endpoint]; !ok { 16 | (*e)[endpoint] = struct{}{} 17 | changed = true 18 | } 19 | return 20 | } 21 | 22 | func (e *endpoints) Remove(endpoint string) (changed bool) { 23 | if _, ok := (*e)[endpoint]; ok { 24 | delete(*e, endpoint) 25 | changed = true 26 | } 27 | return 28 | } 29 | 30 | func (e endpoints) ToSlice() (eps []string) { 31 | for ep := range e { 32 | eps = append(eps, ep) 33 | } 34 | return 35 | } 36 | 37 | // ServiceStatusStream watches /services/ --prefix 38 | func (r *Rediaron) ServiceStatusStream(ctx context.Context) (chan []string, error) { 39 | key := fmt.Sprintf(serviceStatusKey, "*") 40 | ch := make(chan []string) 41 | _ = r.pool.Invoke(func() { 42 | defer close(ch) 43 | 44 | watchC := r.KNotify(ctx, key) 45 | 46 | data, err := r.getByKeyPattern(ctx, key, 0) 47 | if err != nil { 48 | log.WithFunc("store.redis.ServiceStatusStream").Error(ctx, err, "failed to get current services") 49 | return 50 | } 51 | eps := endpoints{} 52 | for k := range data { 53 | eps.Add(parseServiceKey(k)) 54 | } 55 | ch <- eps.ToSlice() 56 | 57 | for message := range watchC { 58 | changed := false 59 | endpoint := parseServiceKey(message.Key) 60 | switch message.Action { 61 | case actionSet, actionExpire: 62 | changed = eps.Add(endpoint) 63 | case actionDel, actionExpired: 64 | changed = eps.Remove(endpoint) 65 | } 66 | if changed { 67 | ch <- eps.ToSlice() 68 | } 69 | } 70 | }) 71 | return ch, nil 72 | } 73 | 74 | // RegisterService put /services/{address} 75 | func (r *Rediaron) RegisterService(ctx context.Context, serviceAddress string, expire time.Duration) (<-chan struct{}, func(), error) { 76 | key := fmt.Sprintf(serviceStatusKey, serviceAddress) 77 | return r.StartEphemeral(ctx, key, expire) 78 | } 79 | 80 | func parseServiceKey(key string) (endpoint string) { 81 | parts := strings.Split(key, "/") 82 | return parts[len(parts)-1] 83 | } 84 | -------------------------------------------------------------------------------- /resource/plugins/types/node.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | resourcetypes "github.com/projecteru2/core/resource/types" 5 | ) 6 | 7 | // NodeResourceRequest use for raw data to create node 8 | type NodeResourceRequest = resourcetypes.RawParams 9 | 10 | // NodeResource use for indicate node's resource 11 | type NodeResource = resourcetypes.RawParams 12 | 13 | // AddNodeResponse . 14 | type AddNodeResponse struct { 15 | Capacity NodeResource `json:"capacity" mapstructure:"capacity"` 16 | Usage NodeResource `json:"usage" mapstructure:"usage"` 17 | } 18 | 19 | // RemoveNodeResponse . 20 | type RemoveNodeResponse struct{} 21 | 22 | // NodeDeployCapacity . 23 | type NodeDeployCapacity struct { 24 | Capacity int 25 | // Usage current resource usage 26 | Usage float64 27 | // Rate proportion of requested resources to total 28 | Rate float64 29 | // Weight used for weighted average 30 | Weight float64 31 | } 32 | 33 | // GetNodesDeployCapacityResponse . 34 | type GetNodesDeployCapacityResponse struct { 35 | NodeDeployCapacityMap map[string]*NodeDeployCapacity `json:"nodes_deploy_capacity_map" mapstructure:"nodes_deploy_capacity_map"` 36 | Total int `json:"total" mapstructure:"total"` 37 | } 38 | 39 | // SetNodeResourceCapacityResponse . 40 | type SetNodeResourceCapacityResponse struct { 41 | Before NodeResource `json:"before" mapstructure:"before"` 42 | After NodeResource `json:"after" mapstructure:"after"` 43 | } 44 | 45 | // GetNodeResourceInfoResponse , 46 | type GetNodeResourceInfoResponse struct { 47 | Capacity NodeResource `json:"capacity" mapstructure:"capacity"` 48 | Usage NodeResource `json:"usage" mapstructure:"usage"` 49 | Diffs []string `json:"diffs" mapstructure:"diffs"` 50 | } 51 | 52 | // SetNodeResourceInfoResponse . 53 | type SetNodeResourceInfoResponse struct{} 54 | 55 | // SetNodeResourceUsageResponse . 56 | type SetNodeResourceUsageResponse struct { 57 | Before NodeResource `json:"before" mapstructure:"before"` 58 | After NodeResource `json:"after" mapstructure:"after"` 59 | } 60 | 61 | // GetMostIdleNodeResponse . 62 | type GetMostIdleNodeResponse struct { 63 | Nodename string `json:"nodename"` 64 | Priority int `json:"priority"` 65 | } 66 | -------------------------------------------------------------------------------- /.github/workflows/dockerimage.yml: -------------------------------------------------------------------------------- 1 | name: docker-image 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | tags: 8 | - v* 9 | 10 | jobs: 11 | publish: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: checkout 15 | uses: actions/checkout@v4 16 | with: 17 | fetch-depth: 0 18 | 19 | - uses: docker/setup-qemu-action@v3 20 | - uses: docker/setup-buildx-action@v3 21 | 22 | - name: Log in to the ghcr 23 | uses: docker/login-action@v3 24 | with: 25 | registry: ghcr.io 26 | username: ${{ github.actor }} 27 | password: ${{ secrets.GITHUB_TOKEN }} 28 | 29 | - name: Log in to the docker 30 | uses: docker/login-action@v3 31 | with: 32 | username: ${{ secrets.DOCKER_USERNAME }} 33 | password: ${{ secrets.DOCKER_PASSWORD }} 34 | 35 | - name: Docker meta 36 | id: meta 37 | uses: docker/metadata-action@v5 38 | with: 39 | images: | 40 | ghcr.io/${{ github.repository }} 41 | ${{ github.repository }} 42 | tags: | 43 | type=ref,event=tag 44 | 45 | - name: Docker meta for debug version 46 | if: ${{ github.ref == 'refs/heads/master' }} 47 | id: debug-meta 48 | uses: docker/metadata-action@v5 49 | with: 50 | images: | 51 | ghcr.io/${{ github.repository }} 52 | ${{ github.repository }} 53 | tags: | 54 | type=sha,format=long,prefix= 55 | 56 | - name: Build and push image 57 | if: ${{ steps.meta.outputs.tags != '' }} 58 | uses: docker/build-push-action@v5 59 | with: 60 | context: "." 61 | platforms: linux/amd64,linux/arm64 62 | push: true 63 | tags: ${{ steps.meta.outputs.tags }} 64 | 65 | - name: "[debug version] Build and push image" 66 | if: ${{ github.ref == 'refs/heads/master' }} 67 | uses: docker/build-push-action@v5 68 | with: 69 | context: "." 70 | platforms: linux/amd64,linux/arm64 71 | push: true 72 | build-args: | 73 | KEEP_SYMBOL=1 74 | tags: ${{ steps.debug-meta.outputs.tags }} -------------------------------------------------------------------------------- /types/workload_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/projecteru2/core/engine/mocks" 8 | enginetypes "github.com/projecteru2/core/engine/types" 9 | 10 | "github.com/stretchr/testify/assert" 11 | "github.com/stretchr/testify/mock" 12 | ) 13 | 14 | func TestWorkloadInspect(t *testing.T) { 15 | mockEngine := &mocks.API{} 16 | r := &enginetypes.VirtualizationInfo{ID: "12345"} 17 | mockEngine.On("VirtualizationInspect", mock.Anything, mock.Anything).Return(r, nil) 18 | 19 | ctx := context.Background() 20 | c := Workload{} 21 | _, err := c.Inspect(ctx) 22 | assert.Error(t, err) 23 | c.Engine = mockEngine 24 | r2, _ := c.Inspect(ctx) 25 | assert.Equal(t, r.ID, r2.ID) 26 | } 27 | 28 | func TestWorkloadControl(t *testing.T) { 29 | mockEngine := &mocks.API{} 30 | mockEngine.On("VirtualizationStart", mock.Anything, mock.Anything).Return(nil) 31 | mockEngine.On("VirtualizationStop", mock.Anything, mock.Anything, mock.Anything).Return(nil) 32 | mockEngine.On("VirtualizationRemove", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) 33 | mockEngine.On("VirtualizationSuspend", mock.Anything, mock.Anything).Return(nil) 34 | mockEngine.On("VirtualizationResume", mock.Anything, mock.Anything).Return(nil) 35 | 36 | ctx := context.Background() 37 | c := Workload{} 38 | assert.Error(t, c.Start(ctx)) 39 | assert.Error(t, c.Stop(ctx, true)) 40 | assert.Error(t, c.Remove(ctx, true)) 41 | assert.Error(t, c.Suspend(ctx)) 42 | assert.Error(t, c.Resume(ctx)) 43 | 44 | c.Engine = mockEngine 45 | err := c.Start(ctx) 46 | assert.NoError(t, err) 47 | err = c.Stop(ctx, true) 48 | assert.NoError(t, err) 49 | err = c.Remove(ctx, true) 50 | assert.NoError(t, err) 51 | err = c.Suspend(ctx) 52 | assert.NoError(t, err) 53 | err = c.Resume(ctx) 54 | assert.NoError(t, err) 55 | } 56 | 57 | func TestRawEngine(t *testing.T) { 58 | mockEngine := &mocks.API{} 59 | mockEngine.On("RawEngine", mock.Anything, mock.Anything).Return(&enginetypes.RawEngineResult{}, nil) 60 | 61 | ctx := context.Background() 62 | c := Workload{} 63 | _, err := c.RawEngine(ctx, &RawEngineOptions{}) 64 | assert.Error(t, err) 65 | 66 | c.Engine = mockEngine 67 | _, err = c.RawEngine(ctx, &RawEngineOptions{}) 68 | assert.NoError(t, err) 69 | } 70 | -------------------------------------------------------------------------------- /utils/transaction.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/projecteru2/core/log" 8 | ) 9 | 10 | // ContextFunc . 11 | type contextFunc = func(context.Context) error 12 | 13 | // Txn provides unified API to perform txn 14 | func Txn(ctx context.Context, cond contextFunc, then contextFunc, rollback func(context.Context, bool) error, ttl time.Duration) (txnErr error) { 15 | var condErr, thenErr error 16 | txnCtx, txnCancel := context.WithTimeout(ctx, ttl) 17 | defer txnCancel() 18 | logger := log.WithFunc("utils.Txn") 19 | defer func() { // rollback 20 | txnErr = condErr 21 | if txnErr == nil { 22 | txnErr = thenErr 23 | } 24 | if txnErr == nil { 25 | return 26 | } 27 | if rollback == nil { 28 | logger.Warn(ctx, "txn failed but no rollback function") 29 | return 30 | } 31 | 32 | logger.Error(ctx, txnErr, "txn failed, rolling back") 33 | 34 | // forbid interrupting rollback 35 | rollbackCtx, rollBackCancel := context.WithTimeout(NewInheritCtx(ctx), ttl) 36 | defer rollBackCancel() 37 | failureByCond := condErr != nil 38 | if err := rollback(rollbackCtx, failureByCond); err != nil { 39 | logger.Warnf(ctx, "txn failed but rollback also failed: %+v", err) 40 | } 41 | }() 42 | 43 | // let caller decide process then or not 44 | if condErr = cond(txnCtx); condErr == nil && then != nil { 45 | // no rollback and forbid interrupting further process 46 | thenCtx := txnCtx 47 | var thenCancel context.CancelFunc 48 | if rollback == nil { 49 | thenCtx, thenCancel = context.WithTimeout(NewInheritCtx(ctx), ttl) 50 | defer thenCancel() 51 | } 52 | thenErr = then(thenCtx) 53 | } 54 | 55 | return txnErr 56 | } 57 | 58 | // PCR Prepare, Commit, Rollback. 59 | // `prepare` should be a pure calculation process without side effects. 60 | // `commit` writes the calculation result of `prepare` into database. 61 | // if `commit` returns error, `rollback` will be performed. 62 | func PCR(ctx context.Context, prepare func(ctx context.Context) error, commit func(ctx context.Context) error, rollback func(ctx context.Context) error, ttl time.Duration) error { 63 | return Txn(ctx, prepare, commit, func(ctx context.Context, failureByCond bool) error { 64 | if !failureByCond { 65 | return rollback(ctx) 66 | } 67 | return nil 68 | }, ttl) 69 | } 70 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | # This is an example goreleaser.yaml file with some sane defaults. 2 | # Make sure to check the documentation at http://goreleaser.com 3 | before: 4 | hooks: 5 | # You may remove this if you don't use go modules. 6 | - go mod download 7 | 8 | builds: 9 | - id: eru-core-debug 10 | binary: eru-core.dbg 11 | env: 12 | - CGO_ENABLED=0 13 | ldflags: 14 | - -X github.com/projecteru2/core/version.REVISION={{.Commit}} 15 | - -X github.com/projecteru2/core/version.VERSION={{.Env.VERSION}} 16 | - -X github.com/projecteru2/core/version.BUILTAT={{.Date}} 17 | hooks: 18 | post: 19 | - cp {{.Path}} ./eru-core-{{.Os}}.dbg 20 | goos: 21 | - darwin 22 | - linux 23 | goarch: 24 | - amd64 25 | 26 | # the best practice is using prebuilt builder 27 | # however it's a Pro feature 28 | - id: eru-core-linux 29 | binary: eru-core 30 | env: 31 | - CGO_ENABLED=0 32 | ldflags: 33 | - -X github.com/projecteru2/core/version.REVISION={{.Commit}} 34 | - -X github.com/projecteru2/core/version.VERSION={{.Env.VERSION}} 35 | - -X github.com/projecteru2/core/version.BUILTAT={{.Date}} 36 | hooks: 37 | post: 38 | - cp ./eru-core-{{.Os}}.dbg {{.Path}} 39 | - strip {{.Path}} 40 | goos: 41 | - linux 42 | goarch: 43 | - amd64 44 | 45 | - id: eru-core-darwin 46 | binary: eru-core 47 | env: 48 | - CGO_ENABLED=0 49 | ldflags: 50 | - -w -s 51 | - -X github.com/projecteru2/core/version.REVISION={{.Commit}} 52 | - -X github.com/projecteru2/core/version.VERSION={{.Env.VERSION}} 53 | - -X github.com/projecteru2/core/version.BUILTAT={{.Date}} 54 | goos: 55 | - darwin 56 | goarch: 57 | - amd64 58 | 59 | archives: 60 | - id: core 61 | name_template: >- 62 | {{- .ProjectName }}_{{- .Version }}_ 63 | {{- title .Os }}_ 64 | {{- if eq .Arch "amd64" }}x86_64 65 | {{- else if eq .Arch "386" }}i386 66 | {{- else }}{{ .Arch }}{{ end }} 67 | {{- if .Arm }}v{{ .Arm }}{{ end -}} 68 | 69 | checksum: 70 | name_template: 'checksums.txt' 71 | 72 | release: 73 | prerelease: auto 74 | 75 | snapshot: 76 | name_template: "{{ .Tag }}-next" 77 | 78 | changelog: 79 | sort: asc 80 | filters: 81 | exclude: 82 | - '^docs:' 83 | - '^test:' 84 | -------------------------------------------------------------------------------- /cluster/calcium/network.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/cockroachdb/errors" 7 | enginetypes "github.com/projecteru2/core/engine/types" 8 | "github.com/projecteru2/core/log" 9 | "github.com/projecteru2/core/types" 10 | ) 11 | 12 | // ListNetworks by podname 13 | // get one node from a pod 14 | // and list networks 15 | // only get those driven by network driver 16 | func (c *Calcium) ListNetworks(ctx context.Context, podname string, driver string) ([]*enginetypes.Network, error) { 17 | logger := log.WithFunc("calcium.ListNetworks").WithField("podname", podname).WithField("driver", driver) 18 | networks := []*enginetypes.Network{} 19 | nodes, err := c.store.GetNodesByPod(ctx, &types.NodeFilter{Podname: podname}) 20 | if err != nil { 21 | logger.Error(ctx, err) 22 | return networks, err 23 | } 24 | 25 | if len(nodes) == 0 { 26 | err := errors.Wrapf(types.ErrPodNoNodes, "pod: %s", podname) 27 | logger.Error(ctx, err) 28 | return networks, err 29 | } 30 | 31 | drivers := []string{} 32 | if driver != "" { 33 | drivers = append(drivers, driver) 34 | } 35 | 36 | node := nodes[0] 37 | 38 | networks, err = node.Engine.NetworkList(ctx, drivers) 39 | logger.Error(ctx, err) 40 | return networks, err 41 | } 42 | 43 | // ConnectNetwork connect to a network 44 | func (c *Calcium) ConnectNetwork(ctx context.Context, network, target, ipv4, ipv6 string) ([]string, error) { 45 | logger := log.WithFunc("calcium.ConnectNetwork").WithField("network", network).WithField("target", target).WithField("ipv4", ipv4).WithField("ipv6", ipv6) 46 | workload, err := c.GetWorkload(ctx, target) 47 | if err != nil { 48 | return nil, err 49 | } 50 | 51 | networks, err := workload.Engine.NetworkConnect(ctx, network, target, ipv4, ipv6) 52 | logger.Error(ctx, err) 53 | return networks, err 54 | } 55 | 56 | // DisconnectNetwork connect to a network 57 | func (c *Calcium) DisconnectNetwork(ctx context.Context, network, target string, force bool) error { 58 | logger := log.WithFunc("calcium.DisconnectNetwork").WithField("network", network).WithField("target", target).WithField("force", force) 59 | workload, err := c.GetWorkload(ctx, target) 60 | if err != nil { 61 | logger.Error(ctx, err) 62 | return err 63 | } 64 | if err = workload.Engine.NetworkDisconnect(ctx, network, target, force); err != nil { 65 | logger.Error(ctx, err) 66 | } 67 | return err 68 | } 69 | -------------------------------------------------------------------------------- /lock/redis/lock.go: -------------------------------------------------------------------------------- 1 | package redislock 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | "time" 7 | 8 | "github.com/muroq/redislock" 9 | "github.com/projecteru2/core/types" 10 | ) 11 | 12 | var opts = &redislock.Options{ 13 | RetryStrategy: redislock.LinearBackoff(500 * time.Millisecond), 14 | } 15 | 16 | // RedisLock is a redis SET NX based lock 17 | type RedisLock struct { 18 | key string 19 | timeout time.Duration 20 | ttl time.Duration 21 | lc *redislock.Client 22 | l *redislock.Lock 23 | } 24 | 25 | // New creates a lock 26 | // key: name of the lock 27 | // waitTimeout: timeout before getting the lock, Lock returns error if the lock is not acquired after this time 28 | // lockTTL: ttl of lock, after this time, lock will be released automatically 29 | func New(cli redislock.RedisClient, key string, waitTimeout, lockTTL time.Duration) (*RedisLock, error) { 30 | if key == "" { 31 | return nil, types.ErrLockKeyInvaild 32 | } 33 | 34 | if !strings.HasPrefix(key, "/") { 35 | key = "/" + key 36 | } 37 | 38 | locker := redislock.New(cli) 39 | return &RedisLock{ 40 | key: key, 41 | timeout: waitTimeout, 42 | ttl: lockTTL, 43 | lc: locker, 44 | }, nil 45 | } 46 | 47 | // Lock acquires the lock 48 | // will try waitTimeout time before getting the lock 49 | func (r *RedisLock) Lock(ctx context.Context) (context.Context, error) { 50 | lockCtx, cancel := context.WithTimeout(ctx, r.timeout) 51 | defer cancel() 52 | return r.lock(lockCtx, opts) 53 | } 54 | 55 | // TryLock tries to lock 56 | // returns error if the lock is already acquired by someone else 57 | // will not retry to get lock 58 | func (r *RedisLock) TryLock(ctx context.Context) (context.Context, error) { 59 | return r.lock(ctx, nil) 60 | } 61 | 62 | // Unlock releases the lock 63 | // if the lock is not acquired, will return ErrLockNotHeld 64 | func (r *RedisLock) Unlock(ctx context.Context) error { 65 | if r.l == nil { 66 | return redislock.ErrLockNotHeld 67 | } 68 | 69 | lockCtx, cancel := context.WithTimeout(ctx, r.ttl) 70 | defer cancel() 71 | return r.l.Release(lockCtx) 72 | } 73 | 74 | func (r *RedisLock) lock(ctx context.Context, opts *redislock.Options) (context.Context, error) { 75 | l, err := r.lc.Obtain(ctx, r.key, r.timeout, r.ttl, opts) 76 | if err != nil { 77 | return nil, err 78 | } 79 | 80 | r.l = l 81 | return context.TODO(), nil // no need wrapped, not like etcd 82 | } 83 | -------------------------------------------------------------------------------- /cluster/calcium/pod_test.go: -------------------------------------------------------------------------------- 1 | package calcium 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | lockmocks "github.com/projecteru2/core/lock/mocks" 8 | storemocks "github.com/projecteru2/core/store/mocks" 9 | "github.com/projecteru2/core/types" 10 | 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/mock" 13 | ) 14 | 15 | func TestAddPod(t *testing.T) { 16 | c := NewTestCluster() 17 | ctx := context.Background() 18 | 19 | _, err := c.AddPod(ctx, "", "") 20 | assert.Error(t, err) 21 | 22 | name := "test" 23 | pod := &types.Pod{ 24 | Name: name, 25 | } 26 | 27 | store := c.store.(*storemocks.Store) 28 | store.On("AddPod", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(pod, nil) 29 | 30 | p, err := c.AddPod(ctx, name, "") 31 | assert.NoError(t, err) 32 | assert.Equal(t, p.Name, name) 33 | } 34 | 35 | func TestRemovePod(t *testing.T) { 36 | c := NewTestCluster() 37 | ctx := context.Background() 38 | 39 | // failed by validating 40 | assert.Error(t, c.RemovePod(ctx, "")) 41 | 42 | store := c.store.(*storemocks.Store) 43 | lock := &lockmocks.DistributedLock{} 44 | lock.On("Lock", mock.Anything).Return(ctx, nil) 45 | lock.On("Unlock", mock.Anything).Return(nil) 46 | store.On("CreateLock", mock.Anything, mock.Anything).Return(lock, nil) 47 | store.On("RemovePod", mock.Anything, mock.Anything).Return(nil) 48 | store.On("GetNodesByPod", mock.Anything, mock.Anything).Return( 49 | []*types.Node{{NodeMeta: types.NodeMeta{Name: "test"}}}, nil) 50 | 51 | assert.NoError(t, c.RemovePod(ctx, "podname")) 52 | store.AssertExpectations(t) 53 | } 54 | 55 | func TestGetPod(t *testing.T) { 56 | c := NewTestCluster() 57 | ctx := context.Background() 58 | 59 | _, err := c.GetPod(ctx, "") 60 | assert.Error(t, err) 61 | 62 | name := "test" 63 | pod := &types.Pod{Name: name} 64 | store := c.store.(*storemocks.Store) 65 | store.On("GetPod", mock.Anything, mock.Anything).Return(pod, nil) 66 | 67 | p, err := c.GetPod(ctx, name) 68 | assert.NoError(t, err) 69 | assert.Equal(t, p.Name, name) 70 | } 71 | 72 | func TestListPods(t *testing.T) { 73 | c := NewTestCluster() 74 | ctx := context.Background() 75 | 76 | name := "test" 77 | pods := []*types.Pod{ 78 | {Name: name}, 79 | } 80 | 81 | store := c.store.(*storemocks.Store) 82 | store.On("GetAllPods", mock.Anything).Return(pods, nil) 83 | 84 | ps, err := c.ListPods(ctx) 85 | assert.NoError(t, err) 86 | assert.Equal(t, len(ps), 1) 87 | assert.Equal(t, ps[0].Name, name) 88 | } 89 | -------------------------------------------------------------------------------- /strategy/fill_test.go: -------------------------------------------------------------------------------- 1 | package strategy 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "sort" 7 | "testing" 8 | 9 | "github.com/projecteru2/core/types" 10 | 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func TestFillPlan(t *testing.T) { 15 | // 正常的全量补充 16 | n := 10 17 | nodes := deployedNodes() 18 | r, err := FillPlan(context.Background(), nodes, n, 0, 0) 19 | assert.NoError(t, err) 20 | finalCounts := []int{} 21 | for _, node := range nodes { 22 | finalCounts = append(finalCounts, node.Count+r[node.Nodename]) 23 | } 24 | sort.Ints(finalCounts) 25 | assert.ElementsMatch(t, []int{10, 10, 10, 10}, finalCounts) 26 | 27 | // 局部补充 28 | n = 5 29 | nodes = deployedNodes() 30 | r, err = FillPlan(context.Background(), nodes, n, 0, 0) 31 | assert.NoError(t, err) 32 | finalCounts = []int{} 33 | for _, node := range nodes { 34 | finalCounts = append(finalCounts, node.Count+r[node.Nodename]) 35 | } 36 | sort.Ints(finalCounts) 37 | assert.ElementsMatch(t, []int{5, 5, 5, 7}, finalCounts) 38 | 39 | // 局部补充不能 40 | n = 15 41 | nodes = deployedNodes() 42 | _, err = FillPlan(context.Background(), nodes, n, 0, 0) 43 | assert.True(t, errors.Is(err, types.ErrInsufficientResource)) 44 | 45 | // 全局补充不能 46 | n = 1 47 | nodes = deployedNodes() 48 | _, err = FillPlan(context.Background(), nodes, n, 0, 0) 49 | assert.Error(t, err) 50 | assert.Contains(t, err.Error(), "each node has enough workloads") 51 | 52 | // LimitNode 53 | n = 10 54 | nodes = deployedNodes() 55 | _, err = FillPlan(context.Background(), nodes, n, 0, 2) 56 | assert.NoError(t, err) 57 | 58 | // 局部补充 59 | n = 1 60 | nodes = []Info{ 61 | { 62 | Nodename: "65", 63 | Capacity: 0, 64 | Count: 0, 65 | }, 66 | { 67 | Nodename: "67", 68 | Capacity: 10, 69 | Count: 0, 70 | }, 71 | } 72 | 73 | _, err = FillPlan(context.Background(), nodes, n, 0, 3) 74 | assert.Error(t, err) 75 | assert.Contains(t, err.Error(), "cannot alloc a fill node plan") 76 | 77 | nodes = genNodesByCapCount([]int{1, 2, 3, 4, 5}, []int{3, 3, 3, 3, 3}) 78 | r, err = FillPlan(context.Background(), nodes, 4, 0, 3) 79 | assert.Nil(t, err) 80 | assert.ElementsMatch(t, []int{3, 3, 4, 4, 4}, getFinalStatus(r, nodes)) 81 | assert.EqualValues(t, 1, r["4"]) 82 | assert.EqualValues(t, 1, r["3"]) 83 | assert.EqualValues(t, 1, r["2"]) 84 | 85 | _, err = FillPlan(context.Background(), nodes, 5, 1000, 0) 86 | assert.Contains(t, err.Error(), "not enough nodes that can fill up to 5 instances, require 1 nodes") 87 | } 88 | -------------------------------------------------------------------------------- /engine/docker/docker.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "strings" 7 | 8 | dockerapi "github.com/docker/docker/client" 9 | 10 | "github.com/projecteru2/core/engine" 11 | enginetypes "github.com/projecteru2/core/engine/types" 12 | "github.com/projecteru2/core/log" 13 | coretypes "github.com/projecteru2/core/types" 14 | "github.com/projecteru2/core/utils" 15 | ) 16 | 17 | const ( 18 | // TCPPrefixKey indicate tcp prefix 19 | TCPPrefixKey = "tcp://" 20 | // SockPrefixKey indicate sock prefix 21 | SockPrefixKey = "unix://" 22 | // Type indicate docker 23 | Type = "docker" 24 | ) 25 | 26 | // Engine is engine for docker 27 | type Engine struct { 28 | client dockerapi.APIClient 29 | config coretypes.Config 30 | ep *enginetypes.Params 31 | } 32 | 33 | // MakeClient make docker cli 34 | func MakeClient(ctx context.Context, config coretypes.Config, nodename, endpoint, ca, cert, key string) (engine.API, error) { 35 | var client *http.Client 36 | var err error 37 | logger := log.WithFunc("engine.docker.MakeClient") 38 | if strings.HasPrefix(endpoint, "unix://") { 39 | client = utils.GetUnixSockClient() 40 | } else { 41 | client, err = utils.GetHTTPSClient(ctx, config.CertPath, nodename, ca, cert, key) 42 | if err != nil { 43 | logger.Errorf(ctx, err, "GetHTTPSClient for %s %s", nodename, endpoint) 44 | return nil, err 45 | } 46 | } 47 | 48 | logger.Debugf(ctx, "Create new http.Client for %s, %s", endpoint, config.Docker.APIVersion) 49 | e, err := makeDockerClient(ctx, config, client, endpoint) 50 | if err != nil { 51 | return nil, err 52 | } 53 | e.ep = &enginetypes.Params{ 54 | Nodename: nodename, 55 | Endpoint: endpoint, 56 | CA: ca, 57 | Cert: cert, 58 | Key: key, 59 | } 60 | return e, nil 61 | } 62 | 63 | // Info show node info 64 | // 2 seconds timeout 65 | // used to be 5, but client won't wait that long 66 | func (e *Engine) Info(ctx context.Context) (*enginetypes.Info, error) { 67 | r, err := e.client.Info(ctx) 68 | if err != nil { 69 | return nil, err 70 | } 71 | return &enginetypes.Info{Type: Type, ID: r.ID, NCPU: r.NCPU, MemTotal: r.MemTotal}, nil 72 | } 73 | 74 | func (e *Engine) GetParams() *enginetypes.Params { 75 | return e.ep 76 | } 77 | 78 | // Ping test connection 79 | func (e *Engine) Ping(ctx context.Context) error { 80 | _, err := e.client.Ping(ctx) 81 | return err 82 | } 83 | 84 | // CloseConn close connection 85 | func (e *Engine) CloseConn() error { 86 | return e.client.Close() 87 | } 88 | --------------------------------------------------------------------------------