├── AUTHORS ├── .gitignore ├── utils ├── protohelpers.go ├── errorhandling.go ├── expressions.go ├── formatter.go ├── defaultConfig.go └── defaultConfig_test.go ├── SECURITY.md ├── nray.go ├── .github └── workflows │ ├── go.yml │ └── codeql-analysis.yml ├── .goreleaser.yml ├── core ├── type_node.go ├── type_job.go ├── server_test.go ├── type_globalconfig.go ├── targetGeneration │ ├── blacklist.go │ ├── networkExpansion.go │ ├── standardTGBackend.go │ ├── targetGenerator_test.go │ └── targetGenerator.go ├── messageQueue.go ├── scannernode.go ├── type_pool.go ├── messageStuff.go └── server.go ├── events ├── eventhandler.go ├── TerminalEventHandler.go └── JSONFileEventHandler.go ├── schemas ├── events.proto ├── messages.proto └── events.pb.go ├── Makefile ├── cmd ├── root.go ├── server.go ├── node.go └── scan.go ├── go.mod ├── scanner ├── tcp.go ├── scanner.go ├── udp.go └── types.go ├── README.md ├── nray-conf_discovery_example.yaml ├── nray-conf.yaml └── go.sum /AUTHORS: -------------------------------------------------------------------------------- 1 | List of contributors and authors; these are the copyright holders for nray. 2 | 3 | Michael Eder 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # IDE 2 | .vscode/ 3 | 4 | # build files 5 | build/ 6 | 7 | # Binaries for programs and plugins 8 | *.exe 9 | *.exe~ 10 | *.dll 11 | *.so 12 | *.dylib 13 | 14 | # Test binary, build with `go test -c` 15 | *.test 16 | 17 | # Output of the go coverage tool, specifically when used with LiteIDE 18 | *.out 19 | 20 | 21 | # release 22 | release.zip 23 | nray 24 | dist/ 25 | -------------------------------------------------------------------------------- /utils/protohelpers.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "github.com/golang/protobuf/jsonpb" 5 | structpb "github.com/golang/protobuf/ptypes/struct" 6 | ) 7 | 8 | // JSONtoProtoValue converts any json to a protobuf value 9 | func JSONtoProtoValue(json []byte) (*structpb.Value, error) { 10 | result := &structpb.Value{} 11 | if err := jsonpb.UnmarshalString(string(json), result); err != nil { 12 | return nil, err 13 | } 14 | return result, nil 15 | } 16 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | Latest release version plus current master branch. It is a free time project and I don't have time maintaining various version lines. 6 | 7 | ## Reporting 8 | 9 | Just open an issue. Pull requests are appreciated, so if it is an easy one, feel free to also provide a solution approach right away. 10 | 11 | ## Bounties 12 | 13 | None, but if we ever meet in person on a conference or so, be assured I'll buy you a beer / coffee / club mate and have a chat with you :) -------------------------------------------------------------------------------- /nray.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/nray-scanner/nray/cmd" 7 | ) 8 | 9 | // This variables can be set at build time :) 10 | var server string 11 | var port string 12 | 13 | // These are set by goreleaser 14 | var ( 15 | version = "dev" 16 | commit = "none" 17 | date = "unknown" 18 | builtBy = "unknown" 19 | ) 20 | 21 | func main() { 22 | printMeta() 23 | cmd.SetHardcodedServerAndPort(server, port) 24 | cmd.Execute() 25 | } 26 | 27 | func printMeta() { 28 | fmt.Printf("nray %s\nBuilt on %s from commit %s\n", version, date, commit) 29 | } 30 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | on: 3 | push: 4 | branches: 5 | - master 6 | - develop 7 | pull_request: 8 | branches: 9 | - master 10 | - develop 11 | jobs: 12 | test: 13 | strategy: 14 | matrix: 15 | go-version: [1.21, 1.22] 16 | platform: [ubuntu-latest, macos-latest, windows-latest] 17 | runs-on: ${{ matrix.platform }} 18 | steps: 19 | - name: Install Go 20 | uses: actions/setup-go@v4 21 | with: 22 | go-version: ${{ matrix.go-version }} 23 | - name: Checkout code 24 | uses: actions/checkout@v4 25 | - name: Test 26 | run: go test ./... 27 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | name: "CodeQL" 2 | 3 | on: 4 | push: 5 | branches: [master, develop] 6 | pull_request: 7 | branches: [master] 8 | schedule: 9 | - cron: '0 22 * * 3' 10 | 11 | jobs: 12 | analyze: 13 | runs-on: ubuntu-latest 14 | permissions: 15 | actions: read 16 | contents: read 17 | security-events: write 18 | 19 | strategy: 20 | fail-fast: false 21 | matrix: 22 | language: ['go'] 23 | 24 | steps: 25 | - uses: actions/checkout@v4 26 | 27 | - uses: github/codeql-action/init@v3 28 | with: 29 | languages: ${{ matrix.language }} 30 | 31 | - uses: github/codeql-action/autobuild@v3 32 | 33 | - uses: github/codeql-action/analyze@v3 34 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | # This is an example .goreleaser.yml file with some sane defaults. 2 | # Make sure to check the documentation at http://goreleaser.com 3 | builds: 4 | - env: 5 | - CGO_ENABLED=0 6 | goos: 7 | - linux 8 | - windows 9 | - darwin 10 | goarch: 11 | - amd64 12 | - arm64 13 | goarm: 14 | - "7" # Only needed for ARMv7 (Raspberry Pi 3/4 32-bit), can be omitted if only ARM64 is needed 15 | archives: 16 | - id: default 17 | format: zip 18 | files: 19 | - nray* 20 | checksum: 21 | name_template: 'checksums.txt' 22 | snapshot: 23 | name_template: "{{ .Tag }}-next" 24 | changelog: 25 | sort: asc 26 | filters: 27 | exclude: 28 | - '^docs:' 29 | - '^test:' 30 | release: 31 | prerelease: "true" -------------------------------------------------------------------------------- /core/type_node.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | targetgeneration "github.com/nray-scanner/nray/core/targetGeneration" 8 | ) 9 | 10 | // Node represents relevant information about a node 11 | type Node struct { 12 | ID string 13 | Name string 14 | MetaInfo string 15 | LastHeartbeat time.Time 16 | CurrentWork *targetgeneration.AnyTargets 17 | heartBeatLock sync.RWMutex 18 | scanPaused bool 19 | stopNode bool 20 | stopLock sync.RWMutex 21 | } 22 | 23 | func (node *Node) setStop(value bool) { 24 | node.stopLock.Lock() 25 | defer node.stopLock.Unlock() 26 | node.stopNode = value 27 | } 28 | 29 | func (node *Node) getStop() bool { 30 | node.stopLock.Lock() 31 | defer node.stopLock.Unlock() 32 | return node.stopNode 33 | } 34 | -------------------------------------------------------------------------------- /core/type_job.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "sync/atomic" 5 | "time" 6 | 7 | targetgeneration "github.com/nray-scanner/nray/core/targetGeneration" 8 | ) 9 | 10 | var globalJobCounter uint64 = 1 11 | 12 | // JobState defines the state a Job is currently in 13 | type JobState int 14 | 15 | const ( 16 | waiting JobState = iota 17 | inProgress 18 | ) 19 | 20 | // Job keeps the state regarding work items 21 | type Job struct { 22 | id uint64 23 | workItems targetgeneration.AnyTargets 24 | state JobState 25 | started time.Time 26 | nodeIDWorkingOnJob string 27 | timedOutCounter uint 28 | } 29 | 30 | func createJob(target targetgeneration.AnyTargets) Job { 31 | // Atomically increment counter and generate our own ID in one step: 32 | nextID := atomic.AddUint64(&globalJobCounter, 1) - 1 33 | job := Job{ 34 | id: nextID, 35 | workItems: target, 36 | state: waiting, 37 | } 38 | return job 39 | } 40 | -------------------------------------------------------------------------------- /core/server_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "testing" 7 | 8 | "github.com/nray-scanner/nray/utils" 9 | "github.com/spf13/viper" 10 | ) 11 | 12 | func TestInitGlobalConfig(t *testing.T) { 13 | globalConfigPositive(t) 14 | } 15 | 16 | func globalConfigPositive(t *testing.T) { 17 | testdata := []byte(`listen: [8601, '80', "443"]`) 18 | parsedShouldBe := []uint32{8601, 80, 443} 19 | v := viper.New() 20 | v.SetConfigType("yaml") 21 | 22 | v.ReadConfig(bytes.NewBuffer(testdata)) 23 | utils.ApplyDefaultConfig(v) 24 | err := InitGlobalServerConfig(v) 25 | if err != nil { 26 | fmt.Println(err.Error()) 27 | t.Errorf("Unable to init global server config") 28 | } 29 | if len(CurrentConfig.ListenPorts) != len(parsedShouldBe) { 30 | t.Errorf("Not listening on all ports that should be listened on") 31 | } 32 | for pos, elem := range CurrentConfig.ListenPorts { 33 | if elem != parsedShouldBe[pos] { 34 | t.Errorf("Listening on wrong port") 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /utils/errorhandling.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bytes" 5 | "os" 6 | "runtime" 7 | "strconv" 8 | 9 | log "github.com/sirupsen/logrus" 10 | ) 11 | 12 | // CheckError provides general error handling 13 | func CheckError(err error, critical bool) { 14 | if err != nil { 15 | if critical { 16 | log.WithFields(log.Fields{ 17 | "module": "utils.errorhandling", 18 | "src": "CheckError", 19 | }).Errorf("An error occured: %v", err.Error()) 20 | os.Exit(1) 21 | } 22 | log.WithFields(log.Fields{ 23 | "module": "utils.errorhandling", 24 | "src": "CheckError", 25 | }).Warningf("An error occured: %v", err.Error()) 26 | } 27 | } 28 | 29 | // DbgGetGID returns the number of the goroutine calling this function. DEBUG ONLY!! 30 | func DbgGetGID() uint64 { 31 | b := make([]byte, 64) 32 | b = b[:runtime.Stack(b, false)] 33 | b = bytes.TrimPrefix(b, []byte("goroutine ")) 34 | b = b[:bytes.IndexByte(b, ' ')] 35 | n, _ := strconv.ParseUint(string(b), 10, 64) 36 | return n 37 | } 38 | -------------------------------------------------------------------------------- /events/eventhandler.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import ( 4 | "github.com/golang/protobuf/jsonpb" 5 | nraySchema "github.com/nray-scanner/nray/schemas" 6 | "github.com/spf13/viper" 7 | ) 8 | 9 | // RegisteredHandlers contains all handlers that may be configured by a user 10 | var RegisteredHandlers = []string{"json-file", "terminal", "elasticsearch"} 11 | 12 | var protomarshaller = jsonpb.Marshaler{ 13 | EnumsAsInts: false, 14 | EmitDefaults: true, 15 | Indent: "", 16 | OrigName: true, 17 | AnyResolver: nil, 18 | } 19 | 20 | // GetEventHandler returns the correct event handler for a event handler name 21 | func GetEventHandler(EventHandlerName string) EventHandler { 22 | switch EventHandlerName { 23 | case "json-file": 24 | return &JSONFileEventHandler{} 25 | case "terminal": 26 | return &TerminalEventHandler{} 27 | default: 28 | return nil 29 | } 30 | } 31 | 32 | // EventHandler is the interface each type of handling events has to implement 33 | type EventHandler interface { 34 | Configure(*viper.Viper) error 35 | ProcessEvents([]*nraySchema.Event) 36 | ProcessEventStream(<-chan *nraySchema.Event) 37 | Close() error 38 | } 39 | -------------------------------------------------------------------------------- /schemas/events.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | import "google/protobuf/timestamp.proto"; 3 | import "google/protobuf/struct.proto"; 4 | 5 | package nraySchema; 6 | 7 | /* Event is a container for everything that happens 8 | at a node and should later on be handled by EventHandlers */ 9 | message Event { 10 | string nodeID = 1; 11 | string nodeName = 2; 12 | google.protobuf.Timestamp timestamp = 3; 13 | string scannername = 6; 14 | oneof EventData { 15 | EnvironmentInformation environment = 7; 16 | ScanResult result = 8; 17 | } 18 | } 19 | 20 | message ScanResult { 21 | string target = 4; 22 | uint32 port = 5; 23 | oneof result { 24 | PortScanResult portscan = 8; 25 | ZGrab2ScanResult zgrabscan = 9; 26 | } 27 | } 28 | 29 | /* EnvironmentInformation tells the server 30 | under which circumstances nodes are running */ 31 | message EnvironmentInformation { 32 | string hostname = 1; 33 | string os = 2; 34 | string pid = 3; 35 | string processname = 4; 36 | string username = 5; 37 | string cpumodelname = 6; 38 | } 39 | 40 | /* TCPScanResult contains the outcome of 41 | a TCP scan against a single port on a single host */ 42 | message PortScanResult { 43 | string target = 1; 44 | uint32 port = 2; 45 | bool open = 3; 46 | string scantype = 4; 47 | uint32 timeout = 5; 48 | } 49 | 50 | message ZGrab2ScanResult { 51 | google.protobuf.Value jsonResult = 1; 52 | } 53 | 54 | -------------------------------------------------------------------------------- /utils/expressions.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "regexp" 5 | "strings" 6 | ) 7 | 8 | // RegexIPv4 matches on an IPv4 address 9 | const RegexIPv4 = "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" 10 | 11 | // RegexNetIPv4 matches on an CIDR IPv4 network specification 12 | const RegexNetIPv4 = "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\\/(3[0-2]|[1-2][0-9]|[0-9]))$" 13 | 14 | // RegexPortRange matches strings of the form "{number}-{number}" where number are 1 to 5 digits 15 | const RegexPortRange = "^[0-9]{1,5}-[0-9]{1,5}$" 16 | 17 | // RegexTopPorts matches strings like "top25" or "Top2500" 18 | const RegexTopPorts = "^[tT]op[-]?[0-9]{1,4}$" 19 | 20 | // RegexThousandNumber matches all numbers between 1000 and 9999 plus 0000 21 | const RegexThousandNumber = "[0-9]{1,4}" 22 | 23 | // Ipv4Regexpr is the above IPv4 regex, already conveniently compiled 24 | var Ipv4Regexpr = regexp.MustCompile(RegexIPv4) 25 | 26 | // Ipv4NetRegexpr is the above IPv4 CIDR regex, already conveniently compiled 27 | var Ipv4NetRegexpr = regexp.MustCompile(RegexNetIPv4) 28 | 29 | // MayBeFQDN returns true if there are no slashes or colons in the string 30 | func MayBeFQDN(toCheck string) bool { 31 | // If there is no scheme and no port, we may be good 32 | // Simply check if there are any ":" or "/" in the string, 33 | // otherwise give it a try 34 | return !strings.ContainsAny(toCheck, ":/") 35 | } 36 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | TARGET_NAME=nray 2 | 3 | all: prepare build-localarch 4 | 5 | clean: 6 | rm -rf ./build/ 7 | 8 | build-all: build-linux-amd64 build-linux-arm64 build-linux-armv7 build-windows-amd64 build-windows-arm64 build-darwin-amd64 build-darwin-arm64 9 | 10 | prepare: 11 | mkdir -p ./build 12 | cp nray-conf.yaml ./build/ 13 | 14 | 15 | build-localarch: 16 | go build -race -ldflags "-X main.server=127.0.0.1 -X main.port=8601" -o build/$(TARGET_NAME)_localhardcoded ./nray.go 17 | go build -race -o build/$(TARGET_NAME) ./nray.go 18 | 19 | build-linux-amd64: 20 | CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "-s -w" -o build/$(TARGET_NAME)-linux-amd64 ./nray.go 21 | 22 | build-linux-arm64: 23 | CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "-s -w" -o build/$(TARGET_NAME)-linux-arm64 ./nray.go 24 | 25 | build-linux-armv7: 26 | CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -ldflags "-s -w" -o build/$(TARGET_NAME)-linux-armv7 ./nray.go 27 | 28 | build-windows-amd64: 29 | CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -ldflags "-s -w" -o build/$(TARGET_NAME)-windows-amd64.exe ./nray.go 30 | 31 | build-windows-arm64: 32 | CGO_ENABLED=0 GOOS=windows GOARCH=arm64 go build -ldflags "-s -w" -o build/$(TARGET_NAME)-windows-arm64.exe ./nray.go 33 | 34 | build-darwin-amd64: 35 | CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -ldflags "-s -w" -o build/$(TARGET_NAME)-darwin-amd64 ./nray.go 36 | 37 | build-darwin-arm64: 38 | CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -ldflags "-s -w" -o build/$(TARGET_NAME)-darwin-arm64 ./nray.go 39 | 40 | calculate-hashes: 41 | cd build; sha256sum * > ./checksums.txt; cd .. 42 | 43 | create-archive: 44 | zip -r release.zip build/ 45 | 46 | release: clean prepare build-all calculate-hashes -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/nray-scanner/nray/core" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | var cfgFile string 12 | var nodeCmdArgs core.NodeCmdArgs 13 | 14 | // SetHardcodedServerAndPort is a workaround to get values set by the linker into 15 | // the namespace of the cmd package. Unfortunately setting these values directly 16 | // for the cmd package does not work since the linker is not able to assign them 17 | // when initializers are used, a concept cobra heavily builds on. 18 | func SetHardcodedServerAndPort(hardcodedServer string, hardcodedPort string) { 19 | nodeCmdArgs = core.NodeCmdArgs{ 20 | Server: hardcodedServer, 21 | Port: hardcodedPort, 22 | } 23 | } 24 | 25 | // rootCmd represents the base command when called without any subcommands 26 | var rootCmd = &cobra.Command{ 27 | Use: "nray", 28 | Short: "A modern, performant, distributed port-scanner", 29 | Long: `nray is port scanner written from scratch that is built 30 | in order to get work done fast and reliably. It allows to attach 31 | multiple scanner nodes and to distribute work amongst them in order 32 | to speed up scans and improve the accuracy of results. 33 | `, 34 | // Uncomment the following line if your bare application 35 | // has an action associated with it: 36 | Run: func(cmd *cobra.Command, args []string) { 37 | if nodeCmdArgs.Server == "" || nodeCmdArgs.Port == "" { 38 | cmd.Help() 39 | os.Exit(1) 40 | } 41 | core.RunNode(nodeCmdArgs) 42 | }, 43 | } 44 | 45 | // Execute adds all child commands to the root command and sets flags appropriately. 46 | // This is called by main.main(). It only needs to happen once to the rootCmd. 47 | func Execute() { 48 | if err := rootCmd.Execute(); err != nil { 49 | fmt.Println(err) 50 | os.Exit(1) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /cmd/server.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "time" 5 | 6 | log "github.com/sirupsen/logrus" 7 | 8 | "github.com/nray-scanner/nray/utils" 9 | 10 | "github.com/nray-scanner/nray/core" 11 | "github.com/spf13/cobra" 12 | "github.com/spf13/viper" 13 | ) 14 | 15 | var serverCmd = &cobra.Command{ 16 | Use: "server", 17 | Short: "Run as server, waiting for nodes to connect and perform a scan", 18 | Long: `Scanning with nodes unleashes all of nray's powers. 19 | Perform scanning with all configuration options and multiple scanner nodes at once`, 20 | Run: func(cmd *cobra.Command, args []string) { 21 | config := initServerConfig() 22 | err := core.InitGlobalServerConfig(config) 23 | utils.CheckError(err, false) 24 | core.Start() 25 | }, 26 | } 27 | 28 | func init() { 29 | //cobra.OnInitialize(initConfig) 30 | log.SetFormatter(&log.TextFormatter{ 31 | FullTimestamp: true, 32 | TimestampFormat: time.RFC3339, 33 | }) 34 | 35 | rootCmd.AddCommand(serverCmd) 36 | serverCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "config file") 37 | serverCmd.MarkPersistentFlagRequired("config") 38 | } 39 | 40 | // initConfig reads in config file and ENV variables if set. 41 | func initServerConfig() *viper.Viper { 42 | log.SetFormatter(&utils.Formatter{ 43 | HideKeys: true, 44 | }) 45 | config := viper.New() 46 | if cfgFile != "" { 47 | // Use config file from the flag. 48 | config.SetConfigFile(cfgFile) 49 | } else { 50 | // We want config to be explicitly set 51 | } 52 | 53 | // If a config file is found, read it in. 54 | if err := config.ReadInConfig(); err == nil { 55 | log.WithFields(log.Fields{ 56 | "module": "cmd.server", 57 | "src": "initServerConfig", 58 | }).Infof("Using config file: %s", config.ConfigFileUsed()) 59 | } else { 60 | // Debug 61 | utils.CheckError(err, true) 62 | } 63 | config = utils.ApplyDefaultConfig(config) 64 | return config 65 | } 66 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/nray-scanner/nray 2 | 3 | go 1.24.0 4 | 5 | toolchain go1.24.2 6 | 7 | require ( 8 | github.com/apparentlymart/go-cidr v1.1.0 9 | github.com/denisbrodbeck/machineid v1.0.1 10 | github.com/golang/protobuf v1.5.4 11 | github.com/golang/time v0.12.0 12 | github.com/shirou/gopsutil v3.21.11+incompatible 13 | github.com/sirupsen/logrus v1.9.3 14 | github.com/spf13/cobra v1.9.1 15 | github.com/spf13/viper v1.20.1 16 | github.com/zmap/go-iptree v0.0.0-20210731043055-d4e632617837 17 | nanomsg.org/go/mangos/v2 v2.0.8 18 | ) 19 | 20 | require ( 21 | github.com/asergeyev/nradix v0.0.0-20220715161825-e451993e425c // indirect 22 | github.com/fsnotify/fsnotify v1.9.0 // indirect 23 | github.com/go-ole/go-ole v1.3.0 // indirect 24 | github.com/go-viper/mapstructure/v2 v2.3.0 // indirect 25 | github.com/google/go-cmp v0.7.0 // indirect 26 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 27 | github.com/pelletier/go-toml/v2 v2.2.4 // indirect 28 | github.com/rogpeppe/go-internal v1.14.1 // indirect 29 | github.com/sagikazarmark/locafero v0.9.0 // indirect 30 | github.com/sourcegraph/conc v0.3.0 // indirect 31 | github.com/spf13/afero v1.14.0 // indirect 32 | github.com/spf13/cast v1.9.2 // indirect 33 | github.com/spf13/pflag v1.0.6 // indirect 34 | github.com/subosito/gotenv v1.6.0 // indirect 35 | github.com/tklauser/go-sysconf v0.3.15 // indirect 36 | github.com/tklauser/numcpus v0.10.0 // indirect 37 | github.com/yusufpapurcu/wmi v1.2.4 // indirect 38 | go.uber.org/multierr v1.11.0 // indirect 39 | golang.org/x/sys v0.33.0 // indirect 40 | golang.org/x/text v0.26.0 // indirect 41 | google.golang.org/protobuf v1.36.6 // indirect 42 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect 43 | gopkg.in/yaml.v3 v3.0.1 // indirect 44 | ) 45 | 46 | replace github.com/golang/time => golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 47 | 48 | replace golang.org/x/time => github.com/golang/time v0.0.0-20190308202827-9d24e82272b4 49 | -------------------------------------------------------------------------------- /core/type_globalconfig.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "crypto/tls" 5 | 6 | "github.com/nray-scanner/nray/events" 7 | nraySchema "github.com/nray-scanner/nray/schemas" 8 | ) 9 | 10 | // GlobalConfig holds configuration settings 11 | // that are relevant for the operation of the 12 | // core 13 | type GlobalConfig struct { 14 | ListenPorts []uint32 15 | ListenHost string 16 | TLSConfig *tls.Config 17 | Pools []*Pool 18 | EventHandlers []events.EventHandler 19 | } 20 | 21 | // Returns a pointer to the node with the given ID 22 | func (gc GlobalConfig) getNodeFromID(searchID string) *Node { 23 | for _, pool := range gc.Pools { 24 | if node, exists := pool.getNodeFromID(searchID); exists { 25 | return node 26 | } 27 | } 28 | return nil 29 | } 30 | 31 | func (gc GlobalConfig) getPoolFromNodeID(searchID string) *Pool { 32 | for _, pool := range gc.Pools { 33 | if _, exists := pool.getNodeFromID(searchID); exists { 34 | return pool 35 | } 36 | } 37 | return nil 38 | } 39 | 40 | func (gc GlobalConfig) getPool(poolID int) *Pool { 41 | if poolID > 0 && len(gc.Pools) > poolID { 42 | return gc.Pools[poolID] 43 | } 44 | return nil 45 | } 46 | 47 | // Returns a pointer to the pool with the fewest members 48 | func (gc GlobalConfig) getSmallestPool() *Pool { 49 | size := 0 50 | var smallest *Pool 51 | for _, pool := range gc.Pools { 52 | // In case we have no smallest yet, initialise it with the first pool 53 | if smallest == nil { 54 | smallest = pool 55 | size = pool.getCurrentPoolSize() 56 | } else if thisPoolSize := pool.getCurrentPoolSize(); thisPoolSize < size { 57 | smallest = pool 58 | size = thisPoolSize 59 | } 60 | } 61 | return smallest 62 | } 63 | 64 | // LogEvents sends a slice of events to all registered event handlers 65 | func (gc GlobalConfig) LogEvents(events []*nraySchema.Event) { 66 | for _, handler := range gc.EventHandlers { 67 | handler.ProcessEvents(events) 68 | } 69 | } 70 | 71 | // CloseEventHandlers calls Close() on all registered event handlers 72 | func (gc GlobalConfig) CloseEventHandlers() { 73 | for _, eventHandler := range gc.EventHandlers { 74 | eventHandler.Close() 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /scanner/tcp.go: -------------------------------------------------------------------------------- 1 | package scanner 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "strings" 7 | "time" 8 | 9 | "github.com/nray-scanner/nray/utils" 10 | log "github.com/sirupsen/logrus" 11 | "github.com/spf13/viper" 12 | ) 13 | 14 | // PortscanResult is the struct that contains all information about the scan and the results 15 | type PortscanResult struct { 16 | Target string `json:"Target"` 17 | Port uint32 `json:"Port"` 18 | Open bool `json:"Open"` 19 | Scantype string `json:"Scantype"` 20 | Timeout time.Duration `json:"Timeout"` 21 | } 22 | 23 | // TCPConnectIsOpen uses the operating system's mechanism to open a 24 | // TCP connection to a given target IP address at a given port. 25 | // Timeout specifies how long to wait before aborting the connection 26 | // attempt 27 | func TCPConnectIsOpen(target string, port uint32, timeout time.Duration) (*PortscanResult, error) { 28 | if target == "" { 29 | return nil, fmt.Errorf("target is nil") 30 | } 31 | conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", target, port), timeout) 32 | if err != nil { 33 | if strings.Contains(err.Error(), "too many open files") { 34 | log.WithFields(log.Fields{ 35 | "module": "scanner.tcp", 36 | "src": "tcpConnectIsOpen", 37 | }).Warning("Too many open files. You are running too many scan workers and the OS is limiting file descriptors. YOU ARE MISSING SCAN RESULTS. Scan with less workers") 38 | } 39 | return nil, nil // port is closed 40 | } 41 | defer conn.Close() 42 | result := PortscanResult{ 43 | Target: target, 44 | Port: port, 45 | Open: true, 46 | Scantype: "tcpconnect", 47 | Timeout: timeout, 48 | } 49 | return &result, nil 50 | } 51 | 52 | // TCPScanner represents the built-in TCP scanning functionality of nray 53 | // If using other existing scanners or different scanning approaches are 54 | // required, it should not be hard to replace this 55 | type TCPScanner struct { 56 | timeout time.Duration 57 | } 58 | 59 | // Configure loads a viper configuration and sets the appropriate values 60 | func (tcpscan *TCPScanner) Configure(config *viper.Viper) { 61 | config = utils.ApplyDefaultScannerTCPConfig(config) 62 | tcpscan.timeout = config.GetDuration("timeout") 63 | } 64 | -------------------------------------------------------------------------------- /events/TerminalEventHandler.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import ( 4 | nraySchema "github.com/nray-scanner/nray/schemas" 5 | "github.com/nray-scanner/nray/utils" 6 | 7 | log "github.com/sirupsen/logrus" 8 | "github.com/spf13/viper" 9 | ) 10 | 11 | // TerminalEventHandler prints result to stdout 12 | type TerminalEventHandler struct { 13 | eventChan chan string 14 | eventFilter map[string]interface{} 15 | } 16 | 17 | // Configure does currently nothing as there is nothing to configure yet 18 | func (t *TerminalEventHandler) Configure(config *viper.Viper) error { 19 | config = utils.ApplyDefaultEventTerminalConfig(config) 20 | t.eventChan = make(chan string, config.GetInt("internal.channelsize")) 21 | t.eventFilter = config.GetStringMap("filter") 22 | go t.startEventPrinter() 23 | log.WithFields(log.Fields{ 24 | "module": "events.TerminalEventHandler", 25 | "src": "Configure", 26 | }).Debug("Started TerminalEventHandler") 27 | return nil 28 | } 29 | 30 | // ProcessEvents logs all events to stdout 31 | func (t *TerminalEventHandler) ProcessEvents(events []*nraySchema.Event) { 32 | go func(events []*nraySchema.Event) { 33 | for _, event := range events { 34 | serialized, err := protomarshaller.MarshalToString(event) 35 | utils.CheckError(err, false) 36 | t.eventChan <- string(serialized) 37 | } 38 | }(events) 39 | } 40 | 41 | // ProcessEventStream works like ProcessEvents but reads events from a stream 42 | func (t *TerminalEventHandler) ProcessEventStream(eventStream <-chan *nraySchema.Event) { 43 | log.WithFields(log.Fields{ 44 | "module": "events.TerminalEventHandler", 45 | "src": "ProcessEventStream", 46 | }).Debug("Processing Event Stream") 47 | for event := range eventStream { 48 | serialized, err := protomarshaller.MarshalToString(event) 49 | utils.CheckError(err, false) 50 | t.eventChan <- string(serialized) 51 | } 52 | } 53 | 54 | // Close has to do nothing since no output channel has to be closed 55 | func (t *TerminalEventHandler) Close() error { 56 | return nil 57 | } 58 | 59 | func (t *TerminalEventHandler) startEventPrinter() { 60 | log.WithFields(log.Fields{ 61 | "module": "events.TerminalEventHandler", 62 | "src": "startEventPrinter", 63 | }).Debug("Starting event printer") 64 | for { 65 | event, more := <-t.eventChan 66 | if more { 67 | log.Infof("Event: %s", event) 68 | } else { 69 | return 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /cmd/node.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/nray-scanner/nray/core" 5 | "github.com/spf13/cobra" 6 | ) 7 | 8 | // These are required, otherwise cobra's default initializers overwrite values passed at compile time 9 | 10 | var nodeCmd = &cobra.Command{ 11 | Use: "node", 12 | Short: "node is the scanner node component that joins a scanning fleet controlled by an upstream server.", 13 | Long: `The nray node connects to a upstream nray server and performs network discovery 14 | scans on behalf of the server. For itself, it is useless.`, 15 | Run: func(cmd *cobra.Command, args []string) { 16 | core.RunNode(nodeCmdArgs) 17 | }, 18 | } 19 | 20 | // Get configuration from command line 21 | func parseCmdLine() { 22 | nodeCmd.PersistentFlags().BoolVar(&nodeCmdArgs.Debug, "debug", false, "Enable debug output") 23 | 24 | nodeCmd.PersistentFlags().StringVarP(&nodeCmdArgs.Server, "server", "s", "", 25 | "upstream nray server address") 26 | nodeCmd.PersistentFlags().StringVarP(&nodeCmdArgs.Port, "port", "p", "", 27 | "upstream nray server port") 28 | nodeCmd.PersistentFlags().Int32Var(&nodeCmdArgs.PreferredPool, "preferred-pool", -1, 29 | "Pool to be preferably placed in at the server. If configured, the server respects this as long as the pool exists") 30 | nodeCmd.PersistentFlags().StringVar(&nodeCmdArgs.NodeName, "node-name", "", 31 | "Assign a name to this scanning node. Useful if you are running multiple nodes and want to distinguish results.") 32 | nodeCmd.PersistentFlags().BoolVar(&nodeCmdArgs.UseTLS, "use-tls", false, "Set true to use TLS") 33 | nodeCmd.PersistentFlags().BoolVar(&nodeCmdArgs.TLSIgnoreServerCertificate, "tls-insecure", 34 | false, "Literally. Trust anybody. Requires --use-tls") 35 | nodeCmd.PersistentFlags().StringVar(&nodeCmdArgs.TLSCACertPath, "tls-ca-cert", "", 36 | "path to ca certificate if TLS is used. Requires --use-tls") 37 | nodeCmd.PersistentFlags().StringVar(&nodeCmdArgs.TLSClientKeyPath, "tls-client-key", "", 38 | "path to tls client key. Requires --use-tls") 39 | nodeCmd.PersistentFlags().StringVar(&nodeCmdArgs.TLSClientCertPath, "tls-client-cert", "", 40 | "path to tls client cert. Requires --use-tls") 41 | nodeCmd.PersistentFlags().StringVar(&nodeCmdArgs.TLSServerSAN, "tls-server-SAN", "", 42 | "subject alternative name of the server. Go's TLS implementation checks this value against the values provided in the certificate and refuses to connect if no match is found") 43 | 44 | } 45 | 46 | func init() { 47 | rootCmd.AddCommand(nodeCmd) 48 | parseCmdLine() 49 | } 50 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # nray 2 | 3 | ## Overview 4 | 5 | Nray is a free, platform and architecture independent port and application layer scanner. 6 | nray runs in a distributed manner to speed up scans and to perform scans from different vantage points. 7 | Event-based results allow to further process information during the scan, e.g. using tools like `jq`. 8 | 9 | If you are looking for user documentation, have a look at the [project homepage](https://nray-scanner.org). 10 | For information related to developing and contributing to nray, continue reading. 11 | 12 | ## Building 13 | 14 | You can build the project after a git checkout by calling `go build`. 15 | Nray is written in pure Go and care was taken to select only dependencies that also fulfill this requirement, therefore a standard Go installation (plus git) is enough to build nray on and for any supported platform - this means that there is also **no** dependency on libraries like `libpcap`. 16 | 17 | ### With makefile 18 | 19 | Nevertheless, there is a makefile that is supposed to be used for building production versions (`make release`) - it ensures that no C dependencies are linked in and symbols are stripped from binaries to save space. 20 | Also, binaries for most common operating systems are created automatically. 21 | A call to `make` will build a local development version, tailored to your current OS and architecture with C libraries and Go's race detector linked in. 22 | 23 | ### Without makefile 24 | 25 | Simply run `go build` - in case cross compiling is desired, `GOOS` and `GOARCH` parameters control target OS and architecture. 26 | For nodes, it is possible to inject server location and port directly into the binary: `go build -ldflags "-X main.server=10.0.0.1 -X main.port=8601"`. 27 | To get smaller binaries, strip stuff that is not necessary away via `-ldflags="-s -w"` when calling `go build`. 28 | If you need to rebuild the protobuf schemas (this is not required unless you change the wire protocol!), run `make create-schemas` (which requires the protobuf compiler on your system). 29 | 30 | ## Contributing and Development 31 | 32 | Just grab the code and fix stuff that annoys you or hack in new awesome features! 33 | Every contribution is welcome and the goal is to make nray an awesome project for users and contributors! 34 | 35 | Your code should pass standard checks performed by go vet and go lint. 36 | Nray is always developed against the latest Go release, so if you are having trouble building nray, check if you have the latest go version installed. 37 | 38 | ## Legal stuff 39 | 40 | Copyright 2019 by Michael Eder. 41 | Licensed under GPLv3. See LICENSE. 42 | -------------------------------------------------------------------------------- /nray-conf_discovery_example.yaml: -------------------------------------------------------------------------------- 1 | debug: true 2 | listen: [8601] 3 | host: "0.0.0.0" 4 | targetgenerator: 5 | bufferSize: 5 6 | # The default target generator 7 | standard: 8 | enabled: true 9 | targets: ["172.0.0.0/16"] 10 | #targetFile: "./targets.txt" 11 | tcpports: ["top25", "80","443","8080","445","22","1099","3389","5432","111","2049","8443","8000","3306","5900","21","8888","8009","1433", "5985", "5986", "8008", "8010", "8020", "1434"] 12 | # https://twitter.com/ptswarm/status/1311310897592315905 13 | #tcpports: ["1090","1098","1099","4444","11099","47001","47002","10999", "7000-7004","8000-8003","9000-9003","9503","7070","7071", "45000","45001","8686","9012","50500","4848","11111","4444","4445","4786","5555","5556"] 14 | udpports: ["top5", "1433"] 15 | blacklist: [] 16 | #blacklistFile: "./blacklist.txt" 17 | maxHostsPerBatch: 500 18 | maxTcpPortsPerBatch: 50 19 | maxUdpPortsPerBatch: 25 20 | 21 | # Configuration of scanners goes here 22 | scannerconfig: 23 | workers: 1000 24 | # ratelimit defines how many workers call the scan() function per second. 25 | # Having a rate limit allows us to utilize most ressources by having lots 26 | # of workers that may wait for network IO/timeouts whereas in case of a 27 | # burst (e.g. start of a scan) the rate limit blocks all workers from 28 | # starting their job at once 29 | # Expects a number or 'none' (lowercase!) if no limit should be applied. 30 | #ratelimit: "none" 31 | 32 | # tcp port scanner 33 | tcp: 34 | # Connect timeout in milliseconds 35 | timeout: 1500ms 36 | 37 | udp: 38 | # Fast sends only probes for known protocols 39 | fast: false 40 | # Default payload. This is sent when the scanner is not aware of the protocol. See documentation 41 | # for a complete list of protocols supported by the scanner 42 | defaultHexPayload: "\x6e\x72\x61\x79" 43 | # You may define/overwrite port:payloads at your wish. For encoding arbitrary data, see https://golang.org/ref/spec#Rune_literals 44 | #customHexPayloads: 45 | # "19": "A" # chargen. "A" is the same as "\x41" (hex) or "\101" (oct) 46 | # Timeout to wait for a response 47 | timeout: 1000ms 48 | 49 | events: 50 | terminal: 51 | # Any matching filter is going to be printed 52 | filter: 53 | environment: # empty filter is printed if a element of this type exists 54 | #result.port: 80 55 | result.portscan.open: true 56 | internal: 57 | channelsize: 1000 58 | json-file: 59 | filename: "nray-results-fastrun.json" 60 | overwriteExisting: false 61 | internal: # Don't touch these unless you know what you do 62 | channelsize: 10000 # Internal event buffer 63 | synctimer: 10s # flush interval 64 | -------------------------------------------------------------------------------- /core/targetGeneration/blacklist.go: -------------------------------------------------------------------------------- 1 | package targetgeneration 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | 7 | "github.com/apparentlymart/go-cidr/cidr" 8 | 9 | "github.com/nray-scanner/nray/utils" 10 | "github.com/zmap/go-iptree/blacklist" 11 | ) 12 | 13 | // NrayBlacklist allows to add/query ip/net/dns blacklisted items 14 | type NrayBlacklist struct { 15 | ipBlacklist *blacklist.Blacklist 16 | dnsBlacklist *map[string]bool // value type not relevant, taking bool.. 17 | addressCount uint64 18 | } 19 | 20 | // NewBlacklist returns a new blacklist 21 | func NewBlacklist() *NrayBlacklist { 22 | _dnsblacklist := make(map[string]bool) 23 | return &NrayBlacklist{ 24 | ipBlacklist: blacklist.New(), 25 | dnsBlacklist: &_dnsblacklist, 26 | } 27 | } 28 | 29 | // AddToBlacklist can be used if the type of the element 30 | // is unclear 31 | func (blacklist *NrayBlacklist) AddToBlacklist(element string) uint64 { 32 | if utils.Ipv4NetRegexpr.MatchString(element) { // An IPv4 network 33 | blacklist.AddNetToBlacklist(element) 34 | _, ipnet, err := net.ParseCIDR(element) 35 | utils.CheckError(err, true) 36 | return cidr.AddressCount(ipnet) 37 | } else if utils.Ipv4Regexpr.MatchString(element) { // An IPv4 address 38 | blacklist.AddNetToBlacklist(fmt.Sprintf("%s/32", element)) 39 | return 1 40 | } else if utils.MayBeFQDN(element) { // Probably a FQDN 41 | blacklist.AddDNSNameToBlacklist(element) 42 | return 1 43 | } else { 44 | // Don't care as target generation won't add anything not matching 45 | // the criteria above 46 | return 0 47 | } 48 | } 49 | 50 | // AddNetToBlacklist adds a CIDR network range to the blacklist 51 | // /32 achieves the same for a single IP 52 | func (blacklist *NrayBlacklist) AddNetToBlacklist(network string) { 53 | _, parsedNet, err := net.ParseCIDR(network) 54 | utils.CheckError(err, false) 55 | blacklist.addressCount += cidr.AddressCount(parsedNet) 56 | blacklist.ipBlacklist.AddEntry(network) 57 | } 58 | 59 | // AddDNSNameToBlacklist adds a FQDN to the blacklist 60 | func (blacklist *NrayBlacklist) AddDNSNameToBlacklist(dnsName string) { 61 | if !(*blacklist.dnsBlacklist)[dnsName] { 62 | blacklist.addressCount++ 63 | } 64 | (*blacklist.dnsBlacklist)[dnsName] = true 65 | } 66 | 67 | // IsIPBlacklisted returns true if the given IP is contained 68 | // in a network in the blacklist 69 | func (blacklist *NrayBlacklist) IsIPBlacklisted(ip string) bool { 70 | result, err := blacklist.ipBlacklist.IsBlacklisted(ip) 71 | utils.CheckError(err, false) 72 | return result 73 | } 74 | 75 | // IsDNSNameBlacklisted returns true if a given DNS name 76 | // is blacklisted 77 | func (blacklist *NrayBlacklist) IsDNSNameBlacklisted(dnsName string) bool { 78 | _, blacklisted := (*blacklist.dnsBlacklist)[dnsName] 79 | return blacklisted 80 | } 81 | -------------------------------------------------------------------------------- /core/targetGeneration/networkExpansion.go: -------------------------------------------------------------------------------- 1 | // This is a (probably simplified) Go implementation of ZMap's 2 | // ip calculation sharding algorithm used to pseudo-randomize 3 | // an IP address space without performing a precalculation of 4 | // all IPs and making sure that every IP is returned exactly once 5 | 6 | package targetgeneration 7 | 8 | import ( 9 | "math/big" 10 | "math/rand" 11 | ) 12 | 13 | type cyclicGroup struct { 14 | prime uint64 15 | knownPrimroot uint64 16 | primeFactors []uint64 17 | numPrimeFactors uint64 18 | } 19 | 20 | type cycle struct { 21 | group *cyclicGroup 22 | generator uint64 23 | order uint64 24 | offset uint32 25 | } 26 | 27 | var cyclicGroups [5]cyclicGroup 28 | 29 | func init() { 30 | cyclicGroups = [5]cyclicGroup{ 31 | {257, 3, []uint64{2}, 1}, // 2^8 + 1 32 | {65537, 3, []uint64{2}, 1}, // 2^16 + 1 33 | {16777259, 2, []uint64{2, 23, 103, 3541}, 4}, // 2^24 + 43 34 | {268435459, 2, []uint64{2, 3, 19, 87211}, 4}, // 2^28 + 3 35 | {4294967311, 3, []uint64{2, 3, 5, 131, 364289}, 5}} // 2^32 + 15 36 | } 37 | 38 | func isCoprime(check uint64, group *cyclicGroup) bool { 39 | for i := uint64(0); i < group.numPrimeFactors; i++ { 40 | if (group.primeFactors[i] > check) && 41 | (group.primeFactors[i]%check == 0) { 42 | return false 43 | } else if (group.primeFactors[i] < check) && 44 | (check%group.primeFactors[i] == 0) { 45 | return false 46 | } else if group.primeFactors[i] == check { 47 | return false 48 | } 49 | } 50 | return true 51 | } 52 | 53 | func findPrimroot(group *cyclicGroup, seed int64) uint32 { 54 | rand.Seed(seed) 55 | candidate := (rand.Uint64() & 0xFFFFFFFF) % group.prime 56 | if candidate == 0 { 57 | candidate++ 58 | } 59 | for isCoprime(candidate, group) != true { 60 | candidate++ 61 | if candidate >= group.prime { 62 | candidate = 1 63 | } 64 | } 65 | return uint32(isomorphism(candidate, group)) 66 | } 67 | 68 | func isomorphism(additiveElt uint64, multGroup *cyclicGroup) uint64 { 69 | if !(additiveElt < multGroup.prime) { 70 | panic("Assertion failed") 71 | } 72 | var base, power, prime, primroot big.Int 73 | base.SetUint64(multGroup.knownPrimroot) 74 | power.SetUint64(additiveElt) 75 | prime.SetUint64(multGroup.prime) 76 | primroot.Exp(&base, &power, &prime) 77 | return primroot.Uint64() 78 | } 79 | 80 | func getGroup(minSize uint64) *cyclicGroup { 81 | for i := 0; i < len(cyclicGroups); i++ { 82 | if cyclicGroups[i].prime > minSize { 83 | return &cyclicGroups[i] 84 | } 85 | } 86 | panic("No cyclic group found with prime large enough. This is impossible.") 87 | } 88 | 89 | func makeCycle(group *cyclicGroup, seed int64) cycle { 90 | generator := findPrimroot(group, seed) 91 | offset := (rand.Uint64() & 0xFFFFFFFF) % group.prime 92 | return cycle{group, uint64(generator), group.prime - 1, uint32(offset)} 93 | } 94 | 95 | func first(c *cycle) uint64 { 96 | var generator, exponentBegin, prime, start big.Int 97 | generator.SetUint64(c.generator) 98 | prime.SetUint64(c.group.prime) 99 | exponentBegin.SetUint64(c.order) 100 | start.Exp(&generator, &exponentBegin, &prime) 101 | return start.Uint64() 102 | } 103 | 104 | func next(c *cycle, current *uint64) { 105 | *current = (*current * c.generator) % c.group.prime 106 | } 107 | -------------------------------------------------------------------------------- /schemas/messages.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | import "google/protobuf/timestamp.proto"; 3 | import "schemas/events.proto"; 4 | 5 | package nraySchema; 6 | 7 | /* Everything a server sends is packed in a nray server message */ 8 | message NrayServerMessage { 9 | oneof MessageContent { 10 | RegisteredNode registeredNode = 1; 11 | MoreWorkReply jobBatch = 2; 12 | HeartbeatAck heartbeatAck = 3; 13 | WorkDoneAck workDoneAck = 4; 14 | GoodbyeAck goodbyeAck = 5; 15 | Unregistered nodeIsUnregistered = 6; 16 | } 17 | } 18 | 19 | /* Everything the nodes sends is packed in a nray node message */ 20 | message NrayNodeMessage { 21 | oneof MessageContent { 22 | NodeRegister nodeRegister = 1; 23 | Heartbeat heartbeat = 2; 24 | MoreWorkRequest moreWork = 3; 25 | WorkDone workDone = 4; 26 | Goodbye goodbye = 5; 27 | } 28 | } 29 | 30 | /* ScanTargets may be dnsNames/ips and ports. 31 | ip is encoded as byte array. 32 | dnsName is a FQDN and mustn't contain a protocol specification */ 33 | message ScanTargets { 34 | repeated string rhosts = 1; 35 | repeated uint32 tcpports = 2; 36 | repeated uint32 udpports = 3; 37 | 38 | } 39 | 40 | /* This message is sent every time a node registers at 41 | the server. It contains a unique node ID so there are 42 | not multiple scanner nodes running on the same machine */ 43 | message NodeRegister { 44 | string machineID = 1; 45 | int32 preferredPool = 2; 46 | string preferredNodeName = 3; 47 | Event envinfo = 4; 48 | } 49 | 50 | /* This message is sent by the server and indicates that 51 | the server does not know this node and that the node should 52 | register again */ 53 | message Unregistered { 54 | string nodeID = 1; 55 | } 56 | 57 | /* This message tells the scanner that it is registered at 58 | the server and assigns a unique scanner ID as well as it carries 59 | the timestamp of the server, so nodes that have no correct clock 60 | can still perform somewhat accurate timestamping of events */ 61 | message RegisteredNode { 62 | string NodeID = 1; 63 | google.protobuf.Timestamp ServerClock = 2; 64 | //int32 pool = 3; 65 | bytes scannerconfig = 4; 66 | } 67 | 68 | /* A heartbeat message that is sent regularly from any node 69 | to the server to signal that it is still alive */ 70 | message Heartbeat { 71 | string NodeID = 1; 72 | google.protobuf.Timestamp BeatTime = 2; 73 | } 74 | 75 | /* Acknowledgement of a heartbeat message. A server 76 | may indicate to stop scanning and if the scanner should 77 | exit */ 78 | message HeartbeatAck { 79 | bool Scanning = 1; 80 | bool Running = 2; 81 | } 82 | 83 | /* Sent by a node if it has no work */ 84 | message MoreWorkRequest { 85 | string NodeID = 1; 86 | } 87 | 88 | /* Contains more work for a scanner */ 89 | message MoreWorkReply { 90 | uint64 batchid = 1; 91 | ScanTargets targets = 3; 92 | } 93 | 94 | /* Indicates that a node is done with a work batch 95 | and contains the results */ 96 | message WorkDone { 97 | string nodeID = 1; 98 | uint64 batchid = 2; 99 | repeated Event events = 3; 100 | } 101 | 102 | /* Acknowledges a WorkDone packet */ 103 | message WorkDoneAck {} 104 | 105 | /* Node is going to exit */ 106 | message Goodbye { 107 | string nodeID = 1; 108 | } 109 | 110 | /* Server ACKs the goodbye and signals if 111 | the node is allowed to exit */ 112 | message GoodbyeAck { 113 | bool ok = 1; 114 | } 115 | 116 | -------------------------------------------------------------------------------- /utils/formatter.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | // Losely based on https://github.com/antonfisher/nested-logrus-formatter/blob/master/formatter.go - MIT licensed 4 | 5 | import ( 6 | "bytes" 7 | "fmt" 8 | "sort" 9 | "strings" 10 | "time" 11 | 12 | "github.com/sirupsen/logrus" 13 | ) 14 | 15 | // Formatter - logrus formatter, implements logrus.Formatter 16 | type Formatter struct { 17 | FieldsOrder []string // default: fields sorted alphabetically 18 | TimestampFormat string // default: time.StampMilli = "Jan _2 15:04:05.000" 19 | HideKeys bool // show [fieldValue] instead of [fieldKey:fieldValue] 20 | NoColors bool // disable colors 21 | NoFieldsColors bool // color only level, default is level + fields 22 | ShowFullLevel bool // true to show full level [WARNING] instead [WARN] 23 | TrimMessages bool // true to trim whitespace on messages 24 | } 25 | 26 | // Format an log entry 27 | func (f *Formatter) Format(entry *logrus.Entry) ([]byte, error) { 28 | levelColor := getColorByLevel(entry.Level) 29 | 30 | timestampFormat := time.RFC3339 31 | 32 | // output buffer 33 | b := &bytes.Buffer{} 34 | 35 | // write time 36 | b.WriteString(entry.Time.Format(timestampFormat)) 37 | 38 | // write level 39 | level := strings.ToUpper(entry.Level.String()) 40 | 41 | if !f.NoColors { 42 | fmt.Fprintf(b, "\x1b[%dm", levelColor) 43 | } 44 | 45 | b.WriteString(" [") 46 | if f.ShowFullLevel { 47 | b.WriteString(level) 48 | } else { 49 | b.WriteString(level[:4]) 50 | } 51 | b.WriteString("] ") 52 | 53 | if !f.NoColors && f.NoFieldsColors { 54 | b.WriteString("\x1b[0m") 55 | } 56 | 57 | // write fields 58 | if f.FieldsOrder == nil { 59 | f.writeFields(b, entry) 60 | } else { 61 | f.writeOrderedFields(b, entry) 62 | } 63 | 64 | if !f.NoColors && !f.NoFieldsColors { 65 | b.WriteString("\x1b[0m") 66 | } 67 | 68 | // write message 69 | if f.TrimMessages { 70 | b.WriteString(strings.TrimSpace(entry.Message)) 71 | } else { 72 | b.WriteString(entry.Message) 73 | } 74 | b.WriteByte('\n') 75 | 76 | return b.Bytes(), nil 77 | } 78 | 79 | func (f *Formatter) writeFields(b *bytes.Buffer, entry *logrus.Entry) { 80 | if len(entry.Data) != 0 { 81 | fields := make([]string, 0, len(entry.Data)) 82 | for field := range entry.Data { 83 | fields = append(fields, field) 84 | } 85 | 86 | sort.Strings(fields) 87 | 88 | for _, field := range fields { 89 | f.writeField(b, entry, field) 90 | } 91 | } 92 | } 93 | 94 | func (f *Formatter) writeOrderedFields(b *bytes.Buffer, entry *logrus.Entry) { 95 | length := len(entry.Data) 96 | foundFieldsMap := map[string]bool{} 97 | for _, field := range f.FieldsOrder { 98 | if _, ok := entry.Data[field]; ok { 99 | foundFieldsMap[field] = true 100 | length-- 101 | f.writeField(b, entry, field) 102 | } 103 | } 104 | 105 | if length > 0 { 106 | notFoundFields := make([]string, 0, length) 107 | for field := range entry.Data { 108 | if foundFieldsMap[field] == false { 109 | notFoundFields = append(notFoundFields, field) 110 | } 111 | } 112 | 113 | sort.Strings(notFoundFields) 114 | 115 | for _, field := range notFoundFields { 116 | f.writeField(b, entry, field) 117 | } 118 | } 119 | } 120 | 121 | func (f *Formatter) writeField(b *bytes.Buffer, entry *logrus.Entry, field string) { 122 | if f.HideKeys { 123 | fmt.Fprintf(b, "[%v] ", entry.Data[field]) 124 | } else { 125 | fmt.Fprintf(b, "[%s:%v] ", field, entry.Data[field]) 126 | } 127 | } 128 | 129 | const ( 130 | colorRed = 31 131 | colorGreen = 32 132 | colorYellow = 33 133 | colorBlue = 36 134 | colorGray = 37 135 | ) 136 | 137 | func getColorByLevel(level logrus.Level) int { 138 | switch level { 139 | case logrus.DebugLevel: 140 | return colorBlue 141 | case logrus.WarnLevel: 142 | return colorYellow 143 | case logrus.ErrorLevel, logrus.FatalLevel, logrus.PanicLevel: 144 | return colorRed 145 | default: 146 | return colorGreen 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /core/messageQueue.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "crypto/rand" 5 | "crypto/tls" 6 | "crypto/x509" 7 | "fmt" 8 | "io/ioutil" 9 | "os" 10 | 11 | "github.com/nray-scanner/nray/utils" 12 | log "github.com/sirupsen/logrus" 13 | mangos "nanomsg.org/go/mangos/v2" 14 | "nanomsg.org/go/mangos/v2/protocol/rep" 15 | "nanomsg.org/go/mangos/v2/protocol/req" 16 | ) 17 | 18 | // Creates the server's rep socket 19 | func createRepSock(host string, ports []uint32, tlsconfig *tls.Config) mangos.Socket { 20 | sock, err := rep.NewSocket() 21 | utils.CheckError(err, false) 22 | listenOptions := make(map[string]interface{}) 23 | for _, port := range ports { 24 | var listenAddr string 25 | if tlsconfig != nil { 26 | listenOptions[mangos.OptionTLSConfig] = tlsconfig 27 | listenAddr = fmt.Sprintf("tls+tcp://%s:%d", host, port) 28 | 29 | } else { 30 | listenAddr = fmt.Sprintf("tcp://%s:%d", host, port) 31 | } 32 | log.WithFields(log.Fields{ 33 | "module": "core.messageQueue", 34 | "src": "createRepSock", 35 | }).Debugf("Trying to listen on: %s", listenAddr) 36 | sock.ListenOptions(listenAddr, listenOptions) 37 | utils.CheckError(err, true) 38 | } 39 | return sock 40 | } 41 | 42 | // Checks server and port and connects to the server 43 | func initServerConnection(server, port string, socketconfig map[string]interface{}) mangos.Socket { 44 | if server == "" || port == "" { 45 | log.WithFields(log.Fields{ 46 | "module": "core.messageQueue", 47 | "src": "initServerConnection", 48 | }).Error("Please specify a server and the port of the upstream nray server") 49 | os.Exit(1) 50 | } 51 | sock, err := req.NewSocket() 52 | utils.CheckError(err, true) 53 | sock.SetOption(mangos.OptionRecvDeadline, recvDeadline) 54 | sock.SetOption(mangos.OptionSendDeadline, sendDeadline) 55 | var serverAddress string 56 | if socketconfig[mangos.OptionTLSConfig] != nil { 57 | serverAddress = fmt.Sprintf("tls+tcp://%s:%s", server, port) 58 | } else { 59 | serverAddress = fmt.Sprintf("tcp://%s:%s", server, port) 60 | } 61 | log.WithFields(log.Fields{ 62 | "module": "core.messageQueue", 63 | "src": "InitServerConnection", 64 | }).Infof("Connecting to: %s", serverAddress) 65 | err = sock.DialOptions(serverAddress, socketconfig) 66 | utils.CheckError(err, true) 67 | return sock 68 | } 69 | 70 | func setupMangosClientTLSConfig(useTLS bool, ignoreServerCertificate bool, serverCertPath string, clientCertPath string, clientKeyPath string, serverName string) (map[string]interface{}, error) { 71 | connectOptions := make(map[string]interface{}) 72 | if !useTLS { 73 | return connectOptions, nil 74 | } 75 | var tlsConfig = &tls.Config{} 76 | tlsConfig.Rand = rand.Reader 77 | 78 | // Ignore server cert? 79 | if ignoreServerCertificate { 80 | log.WithFields(log.Fields{ 81 | "module": "core.messageQueue", 82 | "src": "setupMangosClientTLSConfig", 83 | }).Warning("Server certificate checks are disabled. Anybody may intercept and modify your traffic.") 84 | tlsConfig.InsecureSkipVerify = ignoreServerCertificate 85 | } 86 | 87 | // Pin server cert? 88 | if serverCertPath != "" { 89 | cert, err := ioutil.ReadFile(serverCertPath) 90 | utils.CheckError(err, true) 91 | roots := x509.NewCertPool() 92 | ok := roots.AppendCertsFromPEM(cert) 93 | if !ok { 94 | return nil, fmt.Errorf("Failed to parse server certificate from file %s", serverCertPath) 95 | } 96 | tlsConfig.RootCAs = roots 97 | } 98 | 99 | // Set server name 100 | // See https://stackoverflow.com/a/12122718 comments 101 | tlsConfig.ServerName = serverName 102 | 103 | // Client key for mutual auth? 104 | if clientCertPath != "" && clientKeyPath != "" { 105 | cert, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath) 106 | utils.CheckError(err, true) 107 | tlsConfig.Certificates = []tls.Certificate{cert} 108 | } 109 | 110 | tlsConfig.BuildNameToCertificate() 111 | connectOptions[mangos.OptionTLSConfig] = tlsConfig 112 | return connectOptions, nil 113 | } 114 | -------------------------------------------------------------------------------- /utils/defaultConfig.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/spf13/viper" 7 | ) 8 | 9 | // ApplyDefaultConfig initializes top level configuration 10 | // and some options that fit best here 11 | func ApplyDefaultConfig(config *viper.Viper) *viper.Viper { 12 | defaultConfig := viper.New() 13 | defaultConfig.SetDefault("debug", false) 14 | defaultConfig.SetDefault("listen", []string{"8601"}) 15 | defaultConfig.SetDefault("host", "127.0.0.1") 16 | defaultConfig.SetDefault("TLS.enabled", false) 17 | defaultConfig.SetDefault("TLS.CA", "") 18 | defaultConfig.SetDefault("TLS.cert", "") 19 | defaultConfig.SetDefault("TLS.key", "") 20 | defaultConfig.SetDefault("TLS.forceClientAuth", false) 21 | defaultConfig.SetDefault("statusPrintInterval", 15*time.Second) 22 | defaultConfig.SetDefault("pools", 1) 23 | defaultConfig.SetDefault("considerClientPoolPreference", true) 24 | defaultConfig.SetDefault("internal.nodeExpiryTime", 30) 25 | defaultConfig.SetDefault("internal.nodeExpiryCheckInterval", 10) 26 | defaultConfig.SetDefault("targetgenerator.bufferSize", 5) 27 | if config != nil { 28 | defaultConfig.MergeConfigMap(config.AllSettings()) 29 | } 30 | return defaultConfig 31 | } 32 | 33 | // ApplyDefaultTargetgeneratorStandardConfig sets default values for standard target generator 34 | func ApplyDefaultTargetgeneratorStandardConfig(config *viper.Viper) *viper.Viper { 35 | defaultConfig := viper.New() 36 | defaultConfig.SetDefault("enabled", false) 37 | defaultConfig.SetDefault("targets", []string{""}) 38 | defaultConfig.SetDefault("targetFile", "") 39 | defaultConfig.SetDefault("tcpports", []string{"top25"}) 40 | defaultConfig.SetDefault("udpports", []string{"top25"}) 41 | defaultConfig.SetDefault("blacklist", []string{""}) 42 | defaultConfig.SetDefault("blacklistFile", "") 43 | defaultConfig.SetDefault("maxHostsPerBatch", 150) 44 | defaultConfig.SetDefault("maxTcpPortsPerBatch", 25) 45 | defaultConfig.SetDefault("maxUdpPortsPerBatch", 25) 46 | if config != nil { 47 | defaultConfig.MergeConfigMap(config.AllSettings()) 48 | } 49 | return defaultConfig 50 | } 51 | 52 | // ApplyDefaultScannerConfig is called when the node applies the configuration sent 53 | // by the server in order to have defaults in place 54 | func ApplyDefaultScannerConfig(config *viper.Viper) *viper.Viper { 55 | defaultConfig := viper.New() 56 | defaultConfig.SetDefault("workers", 250) 57 | defaultConfig.SetDefault("ratelimit", "none") 58 | if config != nil { 59 | defaultConfig.MergeConfigMap(config.AllSettings()) 60 | } 61 | return defaultConfig 62 | } 63 | 64 | // ApplyDefaultScannerTCPConfig is called when the TCP scanner is initialized 65 | func ApplyDefaultScannerTCPConfig(config *viper.Viper) *viper.Viper { 66 | defaultConfig := viper.New() 67 | defaultConfig.SetDefault("timeout", "2500ms") 68 | if config != nil { 69 | defaultConfig.MergeConfigMap(config.AllSettings()) 70 | } 71 | return defaultConfig 72 | } 73 | 74 | // ApplyDefaultScannerUDPConfig is called when the UDP scanner is initialized 75 | func ApplyDefaultScannerUDPConfig(config *viper.Viper) *viper.Viper { 76 | defaultConfig := viper.New() 77 | defaultConfig.SetDefault("fast", false) 78 | defaultConfig.SetDefault("defaultHexPayload", "\x6e\x72\x61\x79") // "nray" 79 | defaultConfig.SetDefault("customHexPayloads", map[string]string{}) 80 | defaultConfig.SetDefault("timeout", "2500ms") 81 | if config != nil { 82 | defaultConfig.MergeConfigMap(config.AllSettings()) 83 | 84 | } 85 | return defaultConfig 86 | } 87 | 88 | // ApplyDefaultEventTerminalConfig is called when the TerminalEventHandler is initialized 89 | func ApplyDefaultEventTerminalConfig(config *viper.Viper) *viper.Viper { 90 | defaultConfig := viper.New() 91 | defaultConfig.SetDefault("internal.channelsize", 1000) 92 | if config != nil { 93 | defaultConfig.MergeConfigMap(config.AllSettings()) 94 | } 95 | return defaultConfig 96 | } 97 | 98 | // ApplyDefaultEventJSONFileConfig is called when the JSONFileEventHandler is initialized 99 | func ApplyDefaultEventJSONFileConfig(config *viper.Viper) *viper.Viper { 100 | defaultConfig := viper.New() 101 | defaultConfig.SetDefault("filename", "nray-output.json") 102 | defaultConfig.SetDefault("overwriteExisting", false) 103 | defaultConfig.SetDefault("internal.channelsize", 10000) 104 | defaultConfig.SetDefault("internal.synctimer", 10*time.Second) 105 | if config != nil { 106 | defaultConfig.MergeConfigMap(config.AllSettings()) 107 | } 108 | return defaultConfig 109 | } 110 | -------------------------------------------------------------------------------- /nray-conf.yaml: -------------------------------------------------------------------------------- 1 | ## Full documentation and further information can be found at https://nray-scanner.org 2 | ## Nray's advanced scanning is supposed to be configured solely via this configuration file 3 | ## Each directive is documented. It is probably best to start with the default configuration 4 | ## and tweak it to fit your use case. 5 | ## Note: Changing values with "internal" in the name / hierarchy may break nray in brutal 6 | ## and/or subtile ways because you are changing *internals*. You have been warned. 7 | 8 | # IMPORTANT NOTE ON BLACKLISTS: 9 | # This affects only target generation on the server. DNS resolution 10 | # happens on the scannernode. This means that if example.local is at 11 | # 10.0.0.10 and example.local is on the blacklist, the IP will 12 | # still get scanned if it is in the target list. Of course, this 13 | # affects also a blacklisted IP which is going to be scanned 14 | # if a DNS entry not on the blacklist is pointing to it 15 | 16 | # Enables Debug output 17 | #debug: false 18 | 19 | # Specifies on which port to listen for connections by scanner nodes 20 | # Having multiple ports listening is fine 21 | # This setting is only allowed for advanced scans 22 | listen: [8601] 23 | 24 | # Specify the hostname/address to listen on. 0.0.0.0 listens on all 25 | # interfaces, 127.0.0.1 binds to the loopback interface. 26 | host: "127.0.0.1" 27 | 28 | # Enable TLS between server and nodes 29 | #TLS: 30 | # enabled: false 31 | # CA: "/path/to/ca.pem" 32 | # cert: "/path/to/servercert.pem" 33 | # key: "/path/to/servercert-key.pem" 34 | # forceClientAuth: false 35 | 36 | # The interval that status information is printed to stdout 37 | #statusPrintInterval: 15s 38 | 39 | # Pools defines how many worker pools are available and therefore 40 | # how often a target is scanned by different scanners 41 | #pools: 1 42 | 43 | # Set considerClientPoolPreference to true if clients should be able 44 | # to request to be placed in a pool specified by them. If the pool 45 | # doesn't exist, the server will fall back to assign pools with 46 | # fewest nodes. 47 | #considerClientPoolPreference: true 48 | 49 | # This randomizes the nodeID, allowing to run multiple nodes on the same 50 | # machine or in scenarios where no unique ID can be generated from the 51 | # environment, for example container environments like Kubernetes 52 | #allowMultipleNodesPerHost: false 53 | 54 | #internal: 55 | # # Seconds until a node that has not sent any heart beat expires 56 | # nodeExpiryTime: 30 57 | # # This setting affects the interval in seconds of expiry checks 58 | # nodeExpiryCheckInterval: 10 59 | 60 | # All targetgenerators are configured here 61 | targetgenerator: 62 | bufferSize: 5 63 | # The default target generator 64 | standard: 65 | enabled: true 66 | targets: ["192.168.178.1/28"] 67 | #targetFile: "./targets.txt" 68 | tcpports: ["top25"] 69 | udpports: ["top25"] 70 | blacklist: [] 71 | #blacklistFile: "./blacklist.txt" 72 | maxHostsPerBatch: 150 73 | maxTcpPortsPerBatch: 25 74 | maxUdpPortsPerBatch: 25 75 | 76 | # Configuration of scanners goes here 77 | scannerconfig: 78 | workers: 900 79 | # ratelimit defines how many workers call the scan() function per second. 80 | # Having a rate limit allows us to utilize most ressources by having lots 81 | # of workers that may wait for network IO/timeouts whereas in case of a 82 | # burst (e.g. start of a scan) the rate limit blocks all workers from 83 | # starting their job at once 84 | # Expects a number or 'none' (lowercase!) if no limit should be applied. 85 | #ratelimit: "none" 86 | 87 | # tcp port scanner 88 | tcp: 89 | # Connect timeout in milliseconds 90 | timeout: 1000ms 91 | 92 | udp: 93 | # Fast sends only probes for known protocols 94 | fast: false 95 | # Default payload. This is sent when the scanner is not aware of the protocol. See documentation 96 | # for a complete list of protocols supported by the scanner 97 | defaultHexPayload: "\x6e\x72\x61\x79" 98 | # You may define/overwrite port:payloads at your wish. For encoding arbitrary data, see https://golang.org/ref/spec#Rune_literals 99 | #customHexPayloads: 100 | # "19": "A" # chargen. "A" is the same as "\x41" (hex) or "\101" (oct) 101 | # Timeout to wait for a response 102 | timeout: 1000ms 103 | 104 | # Everything in the event node controls if and how data is written 105 | events: 106 | terminal: 107 | internal: 108 | channelsize: 1000 109 | json-file: 110 | filename: "nray-output.json" 111 | # If set to false, overwriteExisting will prevent nray to overwrite 112 | # any existing output file. 113 | overwriteExisting: false 114 | internal: # Don't touch these unless you know what you do 115 | channelsize: 10000 # Internal event buffer 116 | synctimer: 10s # flush interval -------------------------------------------------------------------------------- /events/JSONFileEventHandler.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "sync" 7 | "time" 8 | 9 | nraySchema "github.com/nray-scanner/nray/schemas" 10 | "github.com/nray-scanner/nray/utils" 11 | log "github.com/sirupsen/logrus" 12 | "github.com/spf13/viper" 13 | ) 14 | 15 | // JSONFileEventHandler implements the EventHandler interface and writes 16 | // events to a file 17 | type JSONFileEventHandler struct { 18 | filedescriptor *os.File 19 | eventChan chan string 20 | flushChan chan bool 21 | eventFilter map[string]interface{} 22 | waitgroup sync.WaitGroup 23 | } 24 | 25 | // Configure takes a viper configuration for this event handler and reads the following values: 26 | // filename: Where to store the file 27 | // internal.channelsize: the size of the internally used buffering channel 28 | // internal.synctimer: intervall to periodically flush events in seconds. 29 | func (handler *JSONFileEventHandler) Configure(config *viper.Viper) error { 30 | config = utils.ApplyDefaultEventJSONFileConfig(config) 31 | var err error 32 | log.WithFields(log.Fields{ 33 | "module": "events.JSONFileEventHandler", 34 | "src": "Configure", 35 | }).Debug("Checking if filedescriptor already exists") 36 | 37 | if handler.filedescriptor != nil { 38 | log.Debug("File descriptor already exists, returning an error") 39 | return fmt.Errorf("This EventHandler is already configured") 40 | } 41 | log.WithFields(log.Fields{ 42 | "module": "events.JSONFileEventHandler", 43 | "src": "Configure", 44 | }).Debugf("Opening file: %s", config.GetString("filename")) 45 | mode := os.O_RDWR | os.O_CREATE | os.O_EXCL 46 | if config.GetBool("overwriteExisting") { 47 | mode = os.O_RDWR | os.O_CREATE 48 | } 49 | handler.filedescriptor, err = os.OpenFile(config.GetString("filename"), mode, 0644) 50 | if err != nil { 51 | return err 52 | } 53 | log.WithFields(log.Fields{ 54 | "module": "events.JSONFileEventHandler", 55 | "src": "Configure", 56 | }).Debug("Creating channels") 57 | log.WithFields(log.Fields{ 58 | "module": "events.JSONFileEventHandler", 59 | "src": "Configure", 60 | }).Debugf("Event channel size is going to be %d", config.GetInt("internal.channelsize")) 61 | handler.eventChan = make(chan string, config.GetInt("internal.channelsize")) 62 | handler.flushChan = make(chan bool) 63 | handler.eventFilter = config.GetStringMap("filter") 64 | log.WithFields(log.Fields{ 65 | "module": "events.JSONFileEventHandler", 66 | "src": "Configure", 67 | }).Debug("Starting goroutines") 68 | go handler.startFlushTicker(config.GetDuration("internal.synctimer")) 69 | go handler.startEventWriter() 70 | return nil 71 | } 72 | 73 | // ProcessEvents takes a pointer to an array with events and passes them 74 | // to the internal processing 75 | func (handler *JSONFileEventHandler) ProcessEvents(events []*nraySchema.Event) { 76 | go func(events []*nraySchema.Event) { 77 | handler.waitgroup.Add(1) 78 | for _, event := range events { 79 | serialized, err := protomarshaller.MarshalToString(event) 80 | utils.CheckError(err, false) 81 | handler.eventChan <- serialized 82 | } 83 | handler.waitgroup.Done() 84 | }(events) 85 | } 86 | 87 | // ProcessEventStream takes a channel, reads the events and sends them to the internal 88 | // processing where they are written. This function is useful for running in a dedicated 89 | // goroutine 90 | func (handler *JSONFileEventHandler) ProcessEventStream(eventStream <-chan *nraySchema.Event) { 91 | log.WithFields(log.Fields{ 92 | "module": "events.JSONFileEventHandler", 93 | "src": "ProcessEventStream", 94 | }).Debug("Processing events") 95 | for event := range eventStream { 96 | serialized, err := protomarshaller.MarshalToString(event) 97 | utils.CheckError(err, false) 98 | handler.eventChan <- serialized 99 | } 100 | } 101 | 102 | // Close waits until the events are written and closes the file descriptor 103 | func (handler *JSONFileEventHandler) Close() error { 104 | log.WithFields(log.Fields{ 105 | "module": "events.JSONFileEventHandler", 106 | "src": "Close", 107 | }).Println("Closing EventHandler") 108 | handler.waitgroup.Wait() 109 | for len(handler.eventChan) > 0 { // Give time to flush events still in queue 110 | time.Sleep(1 * time.Second) 111 | } 112 | close(handler.eventChan) 113 | 114 | err := handler.filedescriptor.Close() 115 | return err 116 | } 117 | 118 | func (handler *JSONFileEventHandler) startFlushTicker(interval time.Duration) { 119 | log.WithFields(log.Fields{ 120 | "module": "events.JSONFileEventHandler", 121 | "src": "startFlushTicker", 122 | }).Debug("Flush ticker started") 123 | ticker := time.NewTicker(interval) 124 | for range ticker.C { 125 | handler.flushChan <- true 126 | } 127 | } 128 | 129 | func (handler *JSONFileEventHandler) startEventWriter() { 130 | log.WithFields(log.Fields{ 131 | "module": "events.JSONFileEventHandler", 132 | "src": "startEventWriter", 133 | }).Debug("Starting event writer") 134 | for { 135 | select { 136 | case event, more := <-handler.eventChan: 137 | if more { 138 | handler.filedescriptor.Write([]byte(event)) 139 | handler.filedescriptor.Write([]byte{'\n'}) 140 | } else { 141 | handler.filedescriptor.Write([]byte{'\n'}) 142 | return 143 | } 144 | case <-handler.flushChan: 145 | handler.filedescriptor.Sync() 146 | } 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /scanner/scanner.go: -------------------------------------------------------------------------------- 1 | package scanner 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "sync/atomic" 7 | "time" 8 | 9 | nraySchema "github.com/nray-scanner/nray/schemas" 10 | "github.com/nray-scanner/nray/utils" 11 | log "github.com/sirupsen/logrus" 12 | ) 13 | 14 | var internalTimeOffset time.Duration 15 | 16 | func currentTime() time.Time { 17 | return time.Now().Add(internalTimeOffset) 18 | } 19 | 20 | // RunNodeScannerLoop orchestrates the actual scanning. It knows when to request work and how to perform 21 | // communication between which scanning components 22 | // pause is only used for controlling the scanning, so pause/continue can be supported. Reading/writing to it has to be synchronized 23 | // The workbatch channel is used to send workbatches 24 | // Data sent to dataChan will be picked by the nodes send/recv loop and transferred to the server, 25 | // so it is used for sending requests for more work or reporting results 26 | // TODO: Scan options 27 | func RunNodeScannerLoop(controller *ScanController, workBatchChan <-chan *nraySchema.MoreWorkReply, dataChan chan<- *nraySchema.NrayNodeMessage) { 28 | var tcpscanner = &TCPScanner{} 29 | var udpscanner = &UDPScanner{} 30 | tcpscanner.Configure(controller.scannerConfig.Sub("tcp")) // TODO: actual configuration and create struct via New() 31 | udpscanner.Configure(controller.scannerConfig.Sub("udp")) 32 | for { 33 | // if the scan is paused, sleep 2 seconds before checking again 34 | if controller.Pause.GetValue() { 35 | time.Sleep(2 * time.Second) 36 | continue 37 | } 38 | 39 | // Get more work 40 | log.WithFields(log.Fields{ 41 | "module": "scanner.scanner", 42 | "src": "RunNodeScannerLoop", 43 | }).Info("Requesting new work batch") 44 | dataChan <- requestBatch(controller.nodeID) 45 | 46 | // Get the work 47 | workBatch := <-workBatchChan 48 | if workBatch.Batchid == 0 { // Server has no work yet, sleep 2s 49 | time.Sleep(time.Second * 2) 50 | continue 51 | } 52 | 53 | controller.Refresh() // Resets internal channels and starts house keeping goroutines 54 | 55 | // Spin up workers 56 | // Each worker has access to ScanController's work queue 57 | // Work queue contains functions that are fully prepared, this means they 58 | // have full state regarding targets, timeouts, configuration, where and how 59 | // to report. Workers are just here to control the level of concurrency 60 | var wg sync.WaitGroup 61 | log.WithFields(log.Fields{ 62 | "module": "scanner.scanner", 63 | "src": "RunNodeScannerLoop", 64 | }).Debugf("Starting workers: %d", controller.scannerConfig.GetInt("workers")) 65 | for i := 0; i < controller.scannerConfig.GetInt("workers"); i++ { 66 | wg.Add(1) 67 | go func(queue <-chan func()) { 68 | for queuedTask := range queue { 69 | atomic.AddInt64(&controller.scansRunning, 1) 70 | controller.ratelimiter.Wait(context.TODO()) 71 | queuedTask() 72 | atomic.AddInt64(&controller.scansRunning, -1) 73 | } 74 | wg.Done() 75 | }(controller.scanQueue) 76 | } 77 | 78 | for scanFunc := range PrepareScanFuncs(tcpscanner, udpscanner, workBatch, controller.portscanResultQueue) { 79 | controller.scanQueue <- scanFunc 80 | } 81 | 82 | go controller.waitForScanToFinishAndEventsToBeProcessed() 83 | 84 | // STEPS 85 | 86 | // 1: Register modules (in scannernode.go) 87 | // 2: Implement abstract port scanning 88 | // 2.1: message format should also support stuff like networks (for ZMAP) 89 | // 2.2: which port scanner to chose is definied in the controller (see comment above) 90 | // 3: Port scan results are sent to controller 91 | // 4: Controller parses results and creates/forwards events to send them upstream 92 | // 5: Controller triggers higher level scanners to do their job 93 | // 6: Done when 94 | // 6.1: Port scanner is done AND 95 | // 6.2: No higher level scans are queued (queue should be empty) 96 | // 6.3: All higher level scans have been performed (use a semaphore for counting active tasks?) 97 | 98 | wg.Wait() 99 | controller.workersDone = true 100 | close(controller.portscanResultQueue) 101 | log.WithFields(log.Fields{ 102 | "module": "scanner.scanner", 103 | "src": "RunNodeScannerLoop", 104 | }).Info("Finished work batch, submitting results") 105 | dataChan <- reportResults(controller.nodeID, workBatch.Batchid, controller.getResults()) 106 | } 107 | } 108 | 109 | // build a MoreWorkRequest and return the serialized message 110 | func requestBatch(id string) *nraySchema.NrayNodeMessage { 111 | workRequest := nraySchema.MoreWorkRequest{ 112 | NodeID: id, 113 | } 114 | message := &nraySchema.NrayNodeMessage{ 115 | MessageContent: &nraySchema.NrayNodeMessage_MoreWork{ 116 | MoreWork: &workRequest, 117 | }, 118 | } 119 | return message 120 | } 121 | 122 | // send the collected results to the server 123 | func reportResults(nodeID string, batchID uint64, events []*nraySchema.Event) *nraySchema.NrayNodeMessage { 124 | workDone := nraySchema.WorkDone{ 125 | NodeID: nodeID, 126 | Batchid: batchID, 127 | Events: events, 128 | } 129 | message := &nraySchema.NrayNodeMessage{ 130 | MessageContent: &nraySchema.NrayNodeMessage_WorkDone{ 131 | WorkDone: &workDone, 132 | }, 133 | } 134 | return message 135 | } 136 | 137 | // PrepareScanFuncs returns a channel where scan functions are sent over 138 | // They are completely prepared and just have to be called 139 | func PrepareScanFuncs(tcpscanner *TCPScanner, udpscanner *UDPScanner, targetMsg *nraySchema.MoreWorkReply, results chan<- *PortscanResult) <-chan func() { 140 | scanFuncs := make(chan func(), 100) 141 | 142 | go func(targetMsg *nraySchema.MoreWorkReply, results chan<- *PortscanResult) { 143 | for _, target := range targetMsg.Targets.GetRhosts() { 144 | for _, targetTCPPort := range targetMsg.Targets.GetTcpports() { 145 | // Reassign variables in new scope to avoid data race 146 | t := target 147 | port := targetTCPPort 148 | timeout := tcpscanner.timeout 149 | scanFuncs <- func() { 150 | result, err := TCPConnectIsOpen(t, port, timeout) 151 | utils.CheckError(err, false) 152 | results <- result 153 | } 154 | } 155 | for _, targetUDPPort := range targetMsg.Targets.GetUdpports() { 156 | t := target 157 | port := targetUDPPort 158 | scanFuncs <- func() { 159 | result, err := UDPProtoScan(t, port, *udpscanner) 160 | utils.CheckError(err, false) 161 | results <- result 162 | } 163 | } 164 | } 165 | close(scanFuncs) 166 | }(targetMsg, results) 167 | 168 | return scanFuncs 169 | } 170 | -------------------------------------------------------------------------------- /core/targetGeneration/standardTGBackend.go: -------------------------------------------------------------------------------- 1 | package targetgeneration 2 | 3 | import ( 4 | "bufio" 5 | "net" 6 | "os" 7 | "strings" 8 | 9 | "github.com/apparentlymart/go-cidr/cidr" 10 | "github.com/spf13/viper" 11 | 12 | "github.com/nray-scanner/nray/utils" 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | // standardTargetGenerator is the default target generator. 17 | // It generates single domain targets as well as IP targets 18 | // derived from networks using the ZMap algorithm 19 | type standardTGBackend struct { 20 | rawConfig *viper.Viper 21 | rawTargetCount uint64 22 | rawTargets []string 23 | tcpPorts []uint16 24 | udpPorts []uint16 25 | maxHosts uint 26 | maxTCPPorts uint 27 | maxUDPPorts uint 28 | blacklist *NrayBlacklist 29 | } 30 | 31 | // Configure is called to set up the generator 32 | func (generator *standardTGBackend) configure(conf *viper.Viper) error { 33 | conf = utils.ApplyDefaultTargetgeneratorStandardConfig(conf) 34 | generator.rawConfig = conf 35 | generator.rawTargets = conf.GetStringSlice("targets") 36 | generator.maxHosts = uint(conf.GetInt("maxHostsPerBatch")) 37 | generator.maxTCPPorts = uint(conf.GetInt("maxTcpPortsPerBatch")) 38 | generator.maxUDPPorts = uint(conf.GetInt("maxUdpPortsPerBatch")) 39 | 40 | if conf.IsSet("targetFile") && strings.Trim(conf.GetString("targetFile"), " ") != "" { 41 | file, err := os.Open(conf.GetString("targetFile")) 42 | utils.CheckError(err, false) 43 | defer file.Close() 44 | 45 | var targetHosts []string 46 | scanner := bufio.NewScanner(file) 47 | for scanner.Scan() { 48 | line := strings.TrimSpace(scanner.Text()) 49 | if line != "" { 50 | targetHosts = append(targetHosts, line) 51 | } 52 | } 53 | err = scanner.Err() 54 | utils.CheckError(err, false) 55 | for _, target := range targetHosts { 56 | generator.rawTargets = append(generator.rawTargets, target) 57 | } 58 | } 59 | 60 | generator.blacklist = NewBlacklist() 61 | for _, blacklistItem := range conf.GetStringSlice("blacklist") { 62 | _ = generator.blacklist.AddToBlacklist(blacklistItem) 63 | } 64 | if conf.IsSet("blacklistFile") && strings.Trim(conf.GetString("blacklistFile"), " ") != "" { 65 | file, err := os.Open(conf.GetString("blacklistFile")) 66 | utils.CheckError(err, false) 67 | defer file.Close() 68 | 69 | scanner := bufio.NewScanner(file) 70 | for scanner.Scan() { 71 | line := strings.TrimSpace(scanner.Text()) 72 | if line != "" { 73 | _ = generator.blacklist.AddToBlacklist(line) 74 | } 75 | } 76 | err = scanner.Err() 77 | utils.CheckError(err, false) 78 | } 79 | 80 | generator.tcpPorts = ParsePorts(conf.GetStringSlice("tcpports"), "tcp") 81 | generator.udpPorts = ParsePorts(conf.GetStringSlice("udpports"), "udp") 82 | 83 | // Count targets 84 | for _, rawTarget := range generator.rawTargets { 85 | if rawTarget == "" { 86 | continue 87 | } else if utils.Ipv4NetRegexpr.MatchString(rawTarget) { // An IPv4 network 88 | _, ipnet, err := net.ParseCIDR(rawTarget) 89 | utils.CheckError(err, true) 90 | generator.rawTargetCount += cidr.AddressCount(ipnet) 91 | } else if utils.Ipv4Regexpr.MatchString(rawTarget) { // An IPv4 address 92 | generator.rawTargetCount++ 93 | } else if utils.MayBeFQDN(rawTarget) { // Probably a FQDN 94 | generator.rawTargetCount++ 95 | } else { 96 | } 97 | } 98 | return nil 99 | } 100 | 101 | // ReceiveTargets implements the interface stub and returns a channel with targets 102 | // All targets have been generated when the channel is closed 103 | func (generator *standardTGBackend) receiveTargets() <-chan AnyTargets { 104 | resultChan := make(chan AnyTargets, 10) // Keeping 10 Targets waiting should be sufficient 105 | 106 | // All targets are sent over this channel 107 | targets := make(chan string, 50) 108 | // Decides if input is an IP, net or domain and fills the target channel with target strings 109 | go func(targetChan chan<- string, rawTargets []string) { 110 | for _, rawTarget := range rawTargets { 111 | if rawTarget == "" { 112 | continue 113 | } else if utils.Ipv4NetRegexpr.MatchString(rawTarget) { // An IPv4 network 114 | _, ipnet, err := net.ParseCIDR(rawTarget) 115 | utils.CheckError(err, true) 116 | ipStream := GenerateIPStreamFromCIDR(ipnet, generator.blacklist) 117 | for ip := range ipStream { 118 | targets <- ip.String() 119 | } 120 | } else if utils.Ipv4Regexpr.MatchString(rawTarget) { // An IPv4 address 121 | if !generator.blacklist.IsIPBlacklisted(rawTarget) { 122 | targets <- rawTarget 123 | } 124 | } else if utils.MayBeFQDN(rawTarget) { // Probably a FQDN 125 | if !generator.blacklist.IsDNSNameBlacklisted(rawTarget) { 126 | targets <- rawTarget 127 | } 128 | } else { 129 | log.WithFields(log.Fields{ 130 | "module": "targetgeneration.standardTGBackend", 131 | "src": "receiveTargets", 132 | }).Debugf("This does not look like a valid target: %s", rawTarget) 133 | } 134 | } 135 | close(targets) 136 | }(targets, generator.rawTargets) 137 | 138 | // The idea is as follows: 139 | // 0. Do as long as the internal generator is creating targets: 140 | // 1. Get maxHosts many next targets from the internal generator 141 | // 2. Get a stream for TCP and UDP ports 142 | // 3. As long as both streams are not closed, do: 143 | // 4. Create new AnyTarget with hosts generated earlier included 144 | // 5. use the streams to fill TCP and UDP ports of AnyTarget object up to 145 | // maxTcp/maxUdpPorts or streams are closed (done in chunkPorts()) 146 | // 6. send the AnyTarget back 147 | // 7. When the host generator is done, close the stream 148 | go func(resultChan chan<- AnyTargets, targets <-chan string) { 149 | var stop bool 150 | for !stop { 151 | // Get the hosts 152 | hosts := make([]string, 0) 153 | 154 | for i := uint(0); i < generator.maxHosts; i++ { 155 | elem, ok := <-targets 156 | if !ok { // We're done, set stop mark, process remaining hosts and stop 157 | stop = true 158 | break 159 | } 160 | hosts = append(hosts, elem) 161 | } 162 | for _, target := range chunkPorts(hosts, generator.tcpPorts, generator.udpPorts, generator.maxTCPPorts, generator.maxUDPPorts) { 163 | resultChan <- target 164 | } 165 | } 166 | close(resultChan) 167 | }(resultChan, targets) 168 | 169 | return resultChan 170 | } 171 | 172 | func (generator *standardTGBackend) targetCount() (uint64, error) { 173 | allTargets := generator.rawTargetCount * uint64(len(generator.tcpPorts)+len(generator.udpPorts)) 174 | blacklistedCount := generator.blacklist.addressCount * uint64(len(generator.tcpPorts)+len(generator.udpPorts)) 175 | return allTargets - blacklistedCount, nil 176 | } 177 | -------------------------------------------------------------------------------- /core/targetGeneration/targetGenerator_test.go: -------------------------------------------------------------------------------- 1 | package targetgeneration 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "sort" 7 | "testing" 8 | 9 | "github.com/apparentlymart/go-cidr/cidr" 10 | "github.com/nray-scanner/nray/utils" 11 | ) 12 | 13 | func TestReceiveTargets(t *testing.T) { 14 | g := standardTGBackend{ 15 | maxHosts: 192, 16 | maxTCPPorts: 50, 17 | maxUDPPorts: 50, 18 | } 19 | 20 | g.rawTargets = []string{"192.168.0.0/24"} 21 | g.tcpPorts = []uint16{21} 22 | ips := make([]string, 0) 23 | 24 | _, ipnet, _ := net.ParseCIDR(g.rawTargets[0]) 25 | targetChan := g.receiveTargets() 26 | for targets := range targetChan { 27 | for _, target := range targets.RemoteHosts { 28 | if !ipnet.Contains(net.ParseIP(target)) { 29 | t.Fail() 30 | } 31 | ips = append(ips, target) 32 | } 33 | } 34 | 35 | if cidr.AddressCount(ipnet) != uint64(len(ips)) { 36 | t.Fail() 37 | } 38 | 39 | // A subnet should be included in a bigger net 40 | g.rawTargets = []string{"192.168.0.0/25"} 41 | 42 | ips = make([]string, 0) 43 | targetChan = g.receiveTargets() 44 | for targets := range targetChan { 45 | for _, target := range targets.RemoteHosts { 46 | if !ipnet.Contains(net.ParseIP(target)) { 47 | t.Fail() 48 | } 49 | ips = append(ips, target) 50 | } 51 | } 52 | _, blacklistnet, _ := net.ParseCIDR("192.168.0.0/25") 53 | 54 | if cidr.AddressCount(ipnet)-cidr.AddressCount(blacklistnet) != uint64(len(ips)) { 55 | t.Fail() 56 | } 57 | 58 | g.rawTargets = []string{"10.10.43.0/12"} 59 | g.tcpPorts = []uint16{21, 80, 443} 60 | _, ipnet, _ = net.ParseCIDR("10.10.43.0/12") 61 | ips = make([]string, 0) 62 | targetChan = g.receiveTargets() 63 | for targets := range targetChan { 64 | for _, target := range targets.RemoteHosts { 65 | if !ipnet.Contains(net.ParseIP(target)) { 66 | t.Fail() 67 | } 68 | ips = append(ips, target) 69 | } 70 | } 71 | if cidr.AddressCount(ipnet) != uint64(len(ips)) { 72 | t.Fail() 73 | } 74 | 75 | g.rawTargets = []string{"172.24.12.0/28"} 76 | g.tcpPorts = []uint16{8080} 77 | _, ipnet, _ = net.ParseCIDR("172.24.12.0/28") 78 | ips = make([]string, 0) 79 | targetChan = g.receiveTargets() 80 | for targets := range targetChan { 81 | for _, target := range targets.RemoteHosts { 82 | if target == "172.24.12.25" { 83 | t.Fail() 84 | } 85 | if !ipnet.Contains(net.ParseIP(target)) { 86 | t.Fail() 87 | } 88 | ips = append(ips, target) 89 | } 90 | 91 | } 92 | if cidr.AddressCount(ipnet) != uint64(len(ips)) { 93 | t.Fail() 94 | } 95 | 96 | g.rawTargets = []string{"127.0.0.1", "scanme.nmap.org", "honeypot.local", "www.google.com", "https://scanme.nmap.org:443/"} 97 | g.tcpPorts = []uint16{80, 443, 25} 98 | g.blacklist = NewBlacklist() 99 | g.blacklist.AddToBlacklist("honeypot.local") 100 | targetChan = g.receiveTargets() 101 | target := <-targetChan 102 | 103 | if target.RemoteHosts[0] != "127.0.0.1" || target.RemoteHosts[1] != "scanme.nmap.org" || target.RemoteHosts[2] != "www.google.com" { 104 | t.Fail() 105 | } 106 | if len(target.RemoteHosts) != 3 { 107 | t.Fail() 108 | } 109 | for _, port := range target.TCPPorts { 110 | if !(port == 80 || port == 443 || port == 25) { 111 | t.Fail() 112 | } 113 | } 114 | 115 | } 116 | 117 | func TestParsePorts(t *testing.T) { 118 | var results []uint16 119 | var expected []uint16 120 | 121 | // 22 122 | expected = []uint16{22} 123 | results = ParsePorts([]string{"22"}, "tcp") 124 | if len(results) != len(expected) { 125 | t.Fail() 126 | } 127 | for pos := range results { 128 | if results[pos] != expected[pos] { 129 | t.Fail() 130 | } 131 | } 132 | 133 | // 80,443 134 | expected = []uint16{80, 443} 135 | results = ParsePorts([]string{"80", "443"}, "tcp") 136 | sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) 137 | if len(results) != len(expected) { 138 | t.Fail() 139 | } 140 | for pos := range results { 141 | if results[pos] != expected[pos] { 142 | t.Fail() 143 | } 144 | } 145 | 146 | // "8080-8888" 147 | expected = []uint16{8080, 8081, 8082, 8083, 8084, 8085, 8086, 8087, 8088} 148 | results = ParsePorts([]string{"8080-8088"}, "tcp") 149 | sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) 150 | if len(results) != len(expected) { 151 | t.Fail() 152 | } 153 | for pos := range results { 154 | if results[pos] != expected[pos] { 155 | t.Fail() 156 | } 157 | } 158 | 159 | // "30-22" 160 | expected = []uint16{22, 23, 24, 25, 26, 27, 28, 29, 30} 161 | results = ParsePorts([]string{"30-22"}, "tcp") 162 | sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) 163 | if len(results) != len(expected) { 164 | t.Fail() 165 | } 166 | for pos := range results { 167 | if results[pos] != expected[pos] { 168 | t.Fail() 169 | } 170 | } 171 | 172 | // "top10" 173 | expected = []uint16{21, 22, 23, 25, 80, 110, 139, 443, 445, 3389} 174 | results = ParsePorts([]string{"top10"}, "tcp") 175 | sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) 176 | if len(results) != len(expected) { 177 | t.Fail() 178 | } 179 | for pos := range results { 180 | if results[pos] != expected[pos] { 181 | t.Fail() 182 | } 183 | } 184 | 185 | // misc tests 186 | expected = []uint16{21, 22, 23, 25, 80, 110, 139, 443, 445, 3389} 187 | results = ParsePorts([]string{"top10", "top5", "139", "443", "21-23"}, "tcp") 188 | sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) 189 | if len(results) != len(expected) { 190 | t.Fail() 191 | } 192 | for pos := range results { 193 | if results[pos] != expected[pos] { 194 | t.Fail() 195 | } 196 | } 197 | 198 | // errorchan 199 | expected = []uint16{21, 22, 23, 25, 80, 110, 139, 443, 445, 3389} 200 | results = ParsePorts([]string{"top10", "top5", "139", "443", "21-23", "lorem ipsum", "www.google.com"}, "tcp") 201 | sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) 202 | if len(results) != len(expected) { 203 | t.Fail() 204 | } 205 | for pos := range results { 206 | if results[pos] != expected[pos] { 207 | t.Fail() 208 | } 209 | } 210 | } 211 | 212 | func TestMayBeFQDN(t *testing.T) { 213 | // FQDN 214 | fqdns := []string{"www.google.com", "127.0.0.1", "localhost", "some.long.domain.local"} 215 | for _, element := range fqdns { 216 | if !utils.MayBeFQDN(element) { 217 | fmt.Printf("%s should be recognized as FQDN\n", element) 218 | t.Fail() 219 | } 220 | } 221 | 222 | // No FQDN 223 | notfqdns := []string{"https://www.google.com/", "localhost:8080", "http://localhost:8100"} 224 | for _, element := range notfqdns { 225 | if utils.MayBeFQDN(element) { 226 | fmt.Printf("%s should not be recognized as FQDN\n", element) 227 | t.Fail() 228 | } 229 | } 230 | } 231 | -------------------------------------------------------------------------------- /scanner/udp.go: -------------------------------------------------------------------------------- 1 | package scanner 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "net" 7 | "strconv" 8 | "strings" 9 | "time" 10 | 11 | "encoding/hex" 12 | 13 | "github.com/nray-scanner/nray/utils" 14 | log "github.com/sirupsen/logrus" 15 | "github.com/spf13/viper" 16 | ) 17 | 18 | // UDPScanner contains the configuration for this scanner 19 | type UDPScanner struct { 20 | fast bool 21 | timeout time.Duration 22 | payloads *map[uint32][]byte 23 | defaultPayload []byte 24 | } 25 | 26 | // UDPProtoScan uses the operating system's mechanism to open a 27 | // UDP connection to a given target IP address at a given port. 28 | // Timeout specifies how long to wait before aborting the connection 29 | // attempt 30 | func UDPProtoScan(target string, port uint32, config UDPScanner) (*PortscanResult, error) { 31 | // Get proto payload 32 | payload, ok := (*config.payloads)[port] 33 | if !ok { 34 | if config.fast { 35 | return nil, fmt.Errorf("Fast UDP scanning enabled and no payload known for UDP port %d", port) 36 | } 37 | // Load default payload 38 | payload = config.defaultPayload 39 | } 40 | 41 | if target == "" { 42 | return nil, fmt.Errorf("target is nil") 43 | } 44 | // UDP is connectionless, so establishing the "connection" has the timeout applied for e.g. DNS resolution 45 | // In case of an IP address this should return immediately 46 | conn, err := net.DialTimeout("udp", fmt.Sprintf("%s:%d", target, port), config.timeout) 47 | if err != nil && strings.Contains(err.Error(), "socket: too many open files") { 48 | return nil, fmt.Errorf("Too many open files. You are running too many scan workers and the OS is limiting file descriptors. YOU ARE MISSING SCAN RESULTS. Scan with less workers") 49 | } 50 | if err != nil { 51 | utils.CheckError(err, false) 52 | return nil, nil 53 | } 54 | defer conn.Close() 55 | // This is the real timeout that is applied. We send a packet and wait for a response or receive an error in case of timeout 56 | conn.SetDeadline(time.Now().Add(config.timeout)) 57 | conn.Write(payload) 58 | if err != nil { 59 | log.WithFields(log.Fields{ 60 | "module": "scanner.udp", 61 | "src": "udpProtoScan", 62 | }).Warning(err.Error()) 63 | 64 | utils.CheckError(err, false) 65 | return nil, nil 66 | } 67 | // If reading throws an error, the port is closed (indicated by ICMP Type 3 Code 3 sent by target or by a timeout) 68 | buf := make([]byte, 1024) 69 | _, err = conn.Read(buf) 70 | if err != nil { 71 | return nil, nil 72 | } 73 | result := PortscanResult{ 74 | Target: target, 75 | Port: port, 76 | Open: true, 77 | Scantype: "udp", 78 | Timeout: config.timeout, 79 | } 80 | return &result, nil 81 | } 82 | 83 | // Configure sets relevant configuration on this scanner 84 | func (udpscan *UDPScanner) Configure(config *viper.Viper) { 85 | config = utils.ApplyDefaultScannerUDPConfig(config) 86 | udpscan.timeout = config.GetDuration("timeout") 87 | udpscan.fast = config.GetBool("fast") 88 | decoded := []byte(config.GetString("defaultHexPayload")) 89 | udpscan.defaultPayload = []byte(decoded) 90 | p := make(map[uint32][]byte) 91 | udpscan.payloads = &p 92 | (*udpscan.payloads)[1604] = probePktCitrix() 93 | (*udpscan.payloads)[53] = probePktDNS() 94 | (*udpscan.payloads)[137] = probePktNetBios() 95 | (*udpscan.payloads)[123] = probePktNTP() 96 | (*udpscan.payloads)[524] = probePktDB2DISCO() 97 | (*udpscan.payloads)[5093] = probePktSentinel() 98 | (*udpscan.payloads)[1434] = probePktMSSQL() 99 | (*udpscan.payloads)[161] = probePktSNMPv2() 100 | (*udpscan.payloads)[111] = probePktPortmap() 101 | 102 | customPayloads := config.GetStringMapString("customHexPayloads") 103 | for customPayloadPort, customPayload := range customPayloads { 104 | p, err := strconv.ParseUint(customPayloadPort, 10, 32) 105 | utils.CheckError(err, false) 106 | (*udpscan.payloads)[uint32(p)] = []byte(customPayload) 107 | } 108 | } 109 | 110 | // https://github.com/rapid7/metasploit-framework/blob/eeed14d2a27759e369d48331b0959008a0b24df8/modules/auxiliary/scanner/discovery/udp_sweep.rb#L476 111 | func probePktCitrix() []byte { 112 | res, err := hex.DecodeString("1e00013002fda8e300000000000000000000000000000000000000000000") 113 | utils.CheckError(err, false) 114 | return res 115 | } 116 | 117 | // https://github.com/rapid7/metasploit-framework/blob/eeed14d2a27759e369d48331b0959008a0b24df8/modules/auxiliary/scanner/discovery/udp_sweep.rb#L374 118 | func probePktDNS() []byte { 119 | // Not cryptographically relevant, so seeding with time should be OK 120 | r := rand.New(rand.NewSource(time.Now().UnixNano())) 121 | // DNS session ID is randomized 122 | dnsSessionID := make([]byte, 2) 123 | r.Read(dnsSessionID) 124 | // Ask for resolution of "VERSION.BIND" 125 | body := []byte("\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00" + 126 | "\x07" + "VERSION" + 127 | "\x04" + "BIND" + 128 | "\x00\x00\x10\x00\x03") 129 | return append(dnsSessionID, body...) 130 | } 131 | 132 | // https://github.com/rapid7/metasploit-framework/blob/eeed14d2a27759e369d48331b0959008a0b24df8/modules/auxiliary/scanner/discovery/udp_sweep.rb#L384 133 | func probePktNetBios() []byte { 134 | // Not cryptographically relevant, so seeding with time should be OK 135 | r := rand.New(rand.NewSource(time.Now().UnixNano())) 136 | sessionID := make([]byte, 2) 137 | r.Read(sessionID) 138 | body := []byte("\x00\x00\x00\x01\x00\x00\x00\x00" + 139 | "\x00\x00\x20\x43\x4b\x41\x41\x41" + 140 | "\x41\x41\x41\x41\x41\x41\x41\x41" + 141 | "\x41\x41\x41\x41\x41\x41\x41\x41" + 142 | "\x41\x41\x41\x41\x41\x41\x41\x41" + 143 | "\x41\x41\x41\x00\x00\x21\x00\x01") 144 | return append(sessionID, body...) 145 | } 146 | 147 | // https://github.com/rapid7/metasploit-framework/blob/eeed14d2a27759e369d48331b0959008a0b24df8/modules/auxiliary/scanner/discovery/udp_sweep.rb#L417 148 | func probePktNTP() []byte { 149 | return []byte("\xe3\x00\x04\xfa\x00\x01\x00\x00\x00\x01\x00\x00\x00" + 150 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + 151 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + 152 | "\x00\xc5\x4f\x23\x4b\x71\xb1\x52\xf3") 153 | } 154 | 155 | // https://github.com/rapid7/metasploit-framework/blob/eeed14d2a27759e369d48331b0959008a0b24df8/modules/auxiliary/scanner/discovery/udp_sweep.rb#L471 156 | func probePktDB2DISCO() []byte { 157 | return []byte("DB2GETADDR\x00SQL05000\x00") 158 | } 159 | 160 | // https://github.com/rapid7/metasploit-framework/blob/eeed14d2a27759e369d48331b0959008a0b24df8/modules/auxiliary/scanner/discovery/udp_sweep.rb#L427 161 | func probePktSentinel() []byte { 162 | return []byte("\x7a\x00\x00\x00\x00\x00") 163 | } 164 | 165 | // https://github.com/rapid7/metasploit-framework/blob/eeed14d2a27759e369d48331b0959008a0b24df8/modules/auxiliary/scanner/discovery/udp_sweep.rb#L413 166 | func probePktMSSQL() []byte { 167 | return []byte("\x02") 168 | } 169 | 170 | // https://github.com/rapid7/metasploit-framework/blob/eeed14d2a27759e369d48331b0959008a0b24df8/modules/auxiliary/scanner/discovery/udp_sweep.rb#L451 171 | func probePktSNMPv2() []byte { 172 | // TODO: Go down the ASN.1 rabbit hole. Until then, the payload extracted from a network capture has to suffice 173 | return []byte("0)\x02\x01\x01\x04\x06public\xa0\x1c\x02\x04w]l\xb1\x02\x01\x00\x02\x01\x000\x0e0\x0c\x06\x08+\x06\x01\x02\x01\x01\x01\x00\x05\x00") 174 | } 175 | 176 | // https://github.com/rapid7/metasploit-framework/blob/eeed14d2a27759e369d48331b0959008a0b24df8/modules/auxiliary/scanner/discovery/udp_sweep.rb#L397 177 | func probePktPortmap() []byte { 178 | // Not cryptographically relevant, so seeding with time should be OK 179 | r := rand.New(rand.NewSource(time.Now().UnixNano())) 180 | XID := make([]byte, 4) 181 | r.Read(XID) 182 | payload := []byte("\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x86\xa0\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00") 183 | return append(XID, payload...) 184 | } 185 | -------------------------------------------------------------------------------- /core/scannernode.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "time" 7 | 8 | "github.com/golang/protobuf/proto" 9 | 10 | "github.com/nray-scanner/nray/scanner" 11 | nraySchema "github.com/nray-scanner/nray/schemas" 12 | "github.com/nray-scanner/nray/utils" 13 | 14 | "github.com/spf13/viper" 15 | 16 | "github.com/shirou/gopsutil/cpu" 17 | "github.com/shirou/gopsutil/host" 18 | "github.com/shirou/gopsutil/process" 19 | log "github.com/sirupsen/logrus" 20 | 21 | // TCP transport for nanomsg 22 | _ "nanomsg.org/go/mangos/v2/transport/tcp" 23 | _ "nanomsg.org/go/mangos/v2/transport/tlstcp" 24 | ) 25 | 26 | // TODO: Get rid of globals / unify them to a struct 27 | var nodeID string 28 | var timeOffset time.Duration 29 | var scannerConfig *viper.Viper 30 | 31 | // These variables are currently hardcoded and should be configurable in the future 32 | const sendDeadline = 30 * time.Second 33 | const recvDeadline = 30 * time.Second 34 | const heartBeatTick = 5 * time.Second 35 | 36 | // NodeCmdArgs holds user data that is passed on to scanner node 37 | type NodeCmdArgs struct { 38 | Server string 39 | Port string 40 | Debug bool 41 | PreferredPool int32 42 | NodeName string 43 | UseTLS bool 44 | TLSIgnoreServerCertificate bool 45 | TLSCACertPath string 46 | TLSClientKeyPath string 47 | TLSClientCertPath string 48 | TLSServerSAN string 49 | } 50 | 51 | // RunNode is called by the main function of the node binary and gets everything up and running 52 | func RunNode(args NodeCmdArgs) { 53 | if args.Debug { 54 | log.SetLevel(log.DebugLevel) 55 | log.SetFormatter(&utils.Formatter{}) 56 | } 57 | // Bring sanity back... 58 | if len(args.NodeName) > 32 { 59 | args.NodeName = args.NodeName[:32] 60 | log.WithFields(log.Fields{ 61 | "module": "core.scannernode", 62 | "src": "RunNode", 63 | }).Debugf("Truncating name to %s", args.NodeName) 64 | } 65 | if args.Server == "" { 66 | log.Printf("Server is not specified, using localhost") 67 | args.Server = "localhost" 68 | } 69 | if args.Port == "" { 70 | log.Printf("Port is not specified, using 8601") 71 | args.Port = "8601" 72 | } 73 | 74 | var socketConfig map[string]interface{} 75 | socketConfig, err := setupMangosClientTLSConfig(args.UseTLS, args.TLSIgnoreServerCertificate, args.TLSCACertPath, 76 | args.TLSClientCertPath, args.TLSClientKeyPath, args.TLSServerSAN) 77 | utils.CheckError(err, true) 78 | sock := initServerConnection(args.Server, args.Port, socketConfig) // establish network connection to server 79 | defer sock.Close() 80 | 81 | nodeID, timeOffset, err = registerNode(sock, args.NodeName, args.PreferredPool) // makes node known to server and sets nodeID and timeOffset 82 | utils.CheckError(err, true) 83 | 84 | // Everything sent to this channel will be sent to the server 85 | dataChan := make(chan *nraySchema.NrayNodeMessage, 10) 86 | log.WithFields(log.Fields{ 87 | "module": "core.scannernode", 88 | "src": "RunNode", 89 | }).Debugf("Node name is set to %s", args.NodeName) 90 | scanController := scanner.CreateScanController(nodeID, args.NodeName, timeOffset, scannerConfig) 91 | 92 | // JobBatches are sent here 93 | workBatchChan := make(chan *nraySchema.MoreWorkReply) 94 | 95 | // makeHeartbeats runs asynchronously in its own goroutine and sends regular heartbeats 96 | go makeHeartbeats(dataChan, heartBeatTick, timeOffset) 97 | 98 | // here does the actual scanning work happen 99 | go scanner.RunNodeScannerLoop(scanController, workBatchChan, dataChan) 100 | 101 | // After the client is registered, this is the main program loop 102 | // that sends and receives messages and passes them to the appropriate 103 | // functions 104 | mainloop: 105 | for { 106 | // Get message from internal data channel and send it to server 107 | nextNodeMessage := <-dataChan 108 | marshalled, err := proto.Marshal(nextNodeMessage) 109 | utils.CheckError(err, false) 110 | err = sock.Send(marshalled) 111 | utils.CheckError(err, false) 112 | 113 | // Receive response 114 | msg, err := sock.Recv() 115 | utils.CheckError(err, false) 116 | 117 | // Unpack it 118 | skeleton := &nraySchema.NrayServerMessage{} 119 | err = proto.Unmarshal(msg, skeleton) 120 | utils.CheckError(err, false) 121 | 122 | // Depending on the content of the message, do someting 123 | switch skeleton.MessageContent.(type) { 124 | case *nraySchema.NrayServerMessage_RegisteredNode: 125 | log.WithFields(log.Fields{ 126 | "module": "core.scannernode", 127 | "src": "RunNode", 128 | }).Debug("Register message") 129 | nodeID, timeOffset, scannerConfig = HandleRegisteredNode(skeleton.GetRegisteredNode()) 130 | case *nraySchema.NrayServerMessage_HeartbeatAck: 131 | log.WithFields(log.Fields{ 132 | "module": "core.scannernode", 133 | "src": "RunNode", 134 | }).Debug("Heartbeat ACK") 135 | scanning, running := HandleHeartbeatAck(skeleton.GetHeartbeatAck()) 136 | scanController.Pause.SetValue(!scanning || !running) 137 | if !running { 138 | message := &nraySchema.NrayNodeMessage{ 139 | MessageContent: &nraySchema.NrayNodeMessage_Goodbye{ 140 | Goodbye: &nraySchema.Goodbye{ 141 | NodeID: nodeID, 142 | }, 143 | }, 144 | } 145 | dataChan <- message 146 | } 147 | case *nraySchema.NrayServerMessage_JobBatch: 148 | b := skeleton.GetJobBatch() 149 | log.WithFields(log.Fields{ 150 | "module": "core.scannernode", 151 | "src": "RunNode", 152 | }).Debugf("Job Batch with ID %d. It contains %d targets, %d tcp and %d udp ports", b.Batchid, len(b.GetTargets().GetRhosts()), len(b.GetTargets().GetTcpports()), len(b.GetTargets().GetUdpports())) 153 | workBatchChan <- skeleton.GetJobBatch() 154 | case *nraySchema.NrayServerMessage_WorkDoneAck: 155 | log.WithFields(log.Fields{ 156 | "module": "core.scannernode", 157 | "src": "RunNode", 158 | }).Debug("WorkDoneAck") 159 | case *nraySchema.NrayServerMessage_GoodbyeAck: 160 | log.WithFields(log.Fields{ 161 | "module": "core.scannernode", 162 | "src": "RunNode", 163 | }).Debug("GoodbyeAck") 164 | if skeleton.GetGoodbyeAck().Ok { 165 | log.Debug("Breaking mainloop") 166 | break mainloop 167 | } 168 | case *nraySchema.NrayServerMessage_NodeIsUnregistered: 169 | nodeID, timeOffset, err = registerNode(sock, args.NodeName, args.PreferredPool) 170 | utils.CheckError(err, true) 171 | if _, ok := nextNodeMessage.MessageContent.(*nraySchema.NrayNodeMessage_Heartbeat); ok { 172 | dataChan <- nextNodeMessage // retransmit the last message unless it was a heartbeat 173 | } 174 | case nil: 175 | log.WithFields(log.Fields{ 176 | "module": "core.scannernode", 177 | "src": "RunNode", 178 | }).Warning("Message sent by server is empty. This should not happen, if you can reproduce this please file a bug report. Continuing operation...") 179 | default: 180 | log.WithFields(log.Fields{ 181 | "module": "core.scannernode", 182 | "src": "RunNode", 183 | }).Error("Cannot decode message sent by server") 184 | } 185 | } 186 | } 187 | 188 | func gatherEnvironmentInformation() *nraySchema.EnvironmentInformation { 189 | var err error 190 | var hostname, hostos, processname, username, cpumodelname string 191 | hostinfo, err := host.Info() 192 | utils.CheckError(err, false) 193 | if err != nil { 194 | hostname = "unknown" 195 | hostos = "unknown" 196 | } else { 197 | hostname = hostinfo.Hostname 198 | hostos = hostinfo.OS 199 | } 200 | 201 | pid := os.Getpid() 202 | proc, err := process.NewProcess(int32(pid)) 203 | utils.CheckError(err, false) 204 | processname, err = proc.Name() 205 | utils.CheckError(err, false) 206 | if err != nil { 207 | processname = "unknown" 208 | } 209 | username, err = proc.Username() 210 | utils.CheckError(err, false) 211 | if err != nil { 212 | username = "unknown" 213 | } 214 | cpuinfo, err := cpu.Info() 215 | utils.CheckError(err, false) 216 | if err != nil || len(cpuinfo) == 0 { 217 | cpumodelname = "unknown" 218 | } else { 219 | cpumodelname = cpuinfo[0].ModelName 220 | } 221 | 222 | message := &nraySchema.EnvironmentInformation{ 223 | Hostname: hostname, 224 | Os: hostos, 225 | Pid: fmt.Sprintf("%d", pid), 226 | Processname: processname, 227 | Username: username, 228 | Cpumodelname: cpumodelname, 229 | } 230 | return message 231 | } 232 | -------------------------------------------------------------------------------- /cmd/scan.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "net" 5 | "strings" 6 | "sync" 7 | "time" 8 | 9 | "github.com/apparentlymart/go-cidr/cidr" 10 | "github.com/golang/protobuf/ptypes" 11 | log "github.com/sirupsen/logrus" 12 | 13 | targetgeneration "github.com/nray-scanner/nray/core/targetGeneration" 14 | "github.com/nray-scanner/nray/events" 15 | "github.com/nray-scanner/nray/scanner" 16 | nraySchema "github.com/nray-scanner/nray/schemas" 17 | "github.com/nray-scanner/nray/utils" 18 | "github.com/spf13/cobra" 19 | "github.com/spf13/viper" 20 | ) 21 | 22 | var rawPorts string 23 | var rawTargets string 24 | var scanUDP bool 25 | var targetCount uint64 26 | var scannedCount uint64 27 | var timeout time.Duration 28 | var outputFile string 29 | var workers uint 30 | 31 | var scanCmd = &cobra.Command{ 32 | Use: "scan", 33 | Short: "Starts a scan with parameters provided on the command line", 34 | Long: `If you want to initiate a quick and dirty simple scan without 35 | creating a configuration and attaching scanner nodes, the simple scan 36 | is what you are looking for. Get the work done nmap-style like you are used to.`, 37 | Run: func(cmd *cobra.Command, args []string) { 38 | if rawTargets == "" { // scan from stdin 39 | 40 | } 41 | persistResults := true 42 | if outputFile == "" { 43 | persistResults = false 44 | } 45 | 46 | targetChan := parseTargets() 47 | parsedPorts := parsePorts() 48 | scanChan := prepareScan(targetChan, parsedPorts) 49 | resultChan := make(chan (*scanner.PortscanResult), 100) 50 | scanFuncs := prepareScanFuncs(scanChan, resultChan) 51 | 52 | var filechan chan *nraySchema.Event 53 | var logfile events.EventHandler 54 | if persistResults { 55 | config := viper.New() 56 | config.Set("filename", outputFile) 57 | config.Set("overwriteExisting", true) 58 | logfile := events.GetEventHandler("json-file") 59 | logfile.Configure(config) 60 | filechan := make(chan (*nraySchema.Event), 1000) 61 | go logfile.ProcessEventStream(filechan) 62 | } 63 | 64 | stdout := events.GetEventHandler("terminal") 65 | stdout.Configure(viper.New()) 66 | stdoutchan := make(chan (*nraySchema.Event), 1000) 67 | go stdout.ProcessEventStream(stdoutchan) 68 | 69 | go func(resultChan <-chan *scanner.PortscanResult) { 70 | for portscanResult := range resultChan { 71 | now, _ := ptypes.TimestampProto(time.Now()) 72 | data := &nraySchema.Event{ 73 | NodeID: "0", 74 | NodeName: "localscanner", 75 | Timestamp: now, 76 | Scannername: "local", 77 | EventData: &nraySchema.Event_Result{ 78 | Result: &nraySchema.ScanResult{ 79 | Target: portscanResult.Target, 80 | Port: portscanResult.Port, 81 | Result: &nraySchema.ScanResult_Portscan{ 82 | Portscan: &nraySchema.PortScanResult{ 83 | Scantype: portscanResult.Scantype, 84 | Target: portscanResult.Target, 85 | Port: portscanResult.Port, 86 | Open: portscanResult.Open, 87 | Timeout: uint32(portscanResult.Timeout / time.Millisecond), 88 | }, 89 | }, 90 | }, 91 | }, 92 | } 93 | if persistResults { 94 | filechan <- data 95 | } 96 | stdoutchan <- data 97 | } 98 | }(resultChan) 99 | startScan(scanFuncs, resultChan) 100 | 101 | utils.CheckError(stdout.Close(), false) 102 | if persistResults { 103 | utils.CheckError(logfile.Close(), false) 104 | } 105 | 106 | }, 107 | } 108 | 109 | func init() { 110 | rootCmd.AddCommand(scanCmd) 111 | scanCmd.PersistentFlags().StringVarP(&rawPorts, "ports", "p", "", "Ports to scan. A comma-separated list as well as ranges are supported.") 112 | scanCmd.PersistentFlags().StringVarP(&rawTargets, "targets", "t", "", "Targets to scan.") 113 | scanCmd.PersistentFlags().BoolVarP(&scanUDP, "udp", "u", false, "This flag switches to UDP scanning.") 114 | scanCmd.PersistentFlags().DurationVarP(&timeout, "timeout", "", 1000*time.Millisecond, "Timeout for TCP connect.") 115 | scanCmd.PersistentFlags().StringVarP(&outputFile, "output", "o", "", "The file to write json output") 116 | scanCmd.PersistentFlags().UintVarP(&workers, "workers", "w", 1000, "How many workers to use for scanning.") 117 | scanCmd.MarkFlagRequired("ports") 118 | scanCmd.MarkFlagRequired("targets") // remove once stdin scanning is implemented 119 | log.SetFormatter(&utils.Formatter{}) 120 | } 121 | 122 | func parseTargets() <-chan (string) { 123 | targetChan := make(chan (string), 500) 124 | go func(targets chan<- (string)) { 125 | for _, rawTarget := range strings.Split(rawTargets, ",") { 126 | if utils.Ipv4NetRegexpr.MatchString(rawTarget) { // An IPv4 network 127 | _, ipnet, err := net.ParseCIDR(rawTarget) 128 | targetCount += cidr.AddressCount(ipnet) 129 | utils.CheckError(err, true) 130 | ipStream := targetgeneration.GenerateIPStreamFromCIDR(ipnet, nil) 131 | for ip := range ipStream { 132 | targets <- ip.String() 133 | } 134 | } else if utils.Ipv4Regexpr.MatchString(rawTarget) { // An IPv4 address 135 | targetCount++ 136 | targets <- rawTarget 137 | } else if utils.MayBeFQDN(rawTarget) { // Probably a FQDN 138 | targetCount++ 139 | targets <- rawTarget 140 | } else { 141 | log.WithFields(log.Fields{ 142 | "module": "cmd.scan", 143 | "src": "parseTargets", 144 | }).Printf("This does not look like a valid target: %s", rawTarget) 145 | } 146 | } 147 | close(targets) 148 | }(targetChan) 149 | return targetChan 150 | } 151 | 152 | func parsePorts() []uint16 { 153 | if len(rawPorts) == 0 { 154 | log.Fatal("Port list is empty") 155 | } 156 | var parsedPorts []uint16 157 | if scanUDP { 158 | parsedPorts = targetgeneration.ParsePorts(strings.Split(rawPorts, ","), "udp") 159 | } else { 160 | parsedPorts = targetgeneration.ParsePorts(strings.Split(rawPorts, ","), "tcp") 161 | } 162 | return parsedPorts 163 | } 164 | 165 | func prepareScan(targetChan <-chan (string), ports []uint16) <-chan (*scanner.Target) { 166 | scanChan := make(chan (*scanner.Target), 1000) 167 | var proto string 168 | if scanUDP { 169 | proto = "UDP" 170 | } else { 171 | proto = "TCP" 172 | } 173 | for hostToScan := range targetChan { 174 | for _, port := range ports { 175 | t := &scanner.Target{ 176 | Host: hostToScan, 177 | Protocol: proto, 178 | Port: uint32(port), 179 | } 180 | scanChan <- t 181 | } 182 | } 183 | close(scanChan) 184 | return scanChan 185 | } 186 | 187 | func startScan(funcChan <-chan (func()), resultChan chan<- *scanner.PortscanResult) { 188 | var wg sync.WaitGroup 189 | log.WithFields(log.Fields{ 190 | "module": "cmd.scan", 191 | "src": "startScan", 192 | }).Printf("Starting workers: %d", workers) 193 | for i := uint(0); i < workers; i++ { 194 | wg.Add(1) 195 | go func(queue <-chan func()) { 196 | for queuedTask := range queue { 197 | queuedTask() 198 | } 199 | wg.Done() 200 | }(funcChan) 201 | } 202 | wg.Wait() 203 | close(resultChan) 204 | } 205 | 206 | // prepareScanFuncs returns a channel where scan functions are sent over 207 | // They are completely prepared and just have to be called 208 | // This is a leightweight version of the larger implementation when using 209 | // the full feature set 210 | func prepareScanFuncs(targetChan <-chan (*scanner.Target), results chan<- *scanner.PortscanResult) <-chan func() { 211 | scannerconf := viper.New() 212 | scannerconf.Set("timeout", timeout) 213 | var tcpscanner = &scanner.TCPScanner{} 214 | tcpscanner.Configure(scannerconf) 215 | var udpscanner = &scanner.UDPScanner{} 216 | udpscanner.Configure(scannerconf) 217 | scanFuncs := make(chan func(), 100) 218 | 219 | go func(targetChan <-chan (*scanner.Target), results chan<- *scanner.PortscanResult) { 220 | for targetHost := range targetChan { 221 | if targetHost.Protocol == "UDP" { 222 | t := targetHost.Host 223 | port := targetHost.Port 224 | scanFuncs <- func() { 225 | result, err := scanner.UDPProtoScan(t, port, *udpscanner) 226 | utils.CheckError(err, false) 227 | if result != nil { 228 | results <- result 229 | } 230 | } 231 | } else { // Assume TCP default 232 | t := targetHost.Host 233 | port := targetHost.Port 234 | scanFuncs <- func() { 235 | result, err := scanner.TCPConnectIsOpen(t, port, timeout) 236 | utils.CheckError(err, false) 237 | if result != nil { 238 | results <- result 239 | } 240 | } 241 | } 242 | } 243 | close(scanFuncs) 244 | }(targetChan, results) 245 | 246 | return scanFuncs 247 | } 248 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= 2 | github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= 3 | github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= 4 | github.com/asergeyev/nradix v0.0.0-20170505151046-3872ab85bb56/go.mod h1:8BhOLuqtSuT5NZtZMwfvEibi09RO3u79uqfHZzfDTR4= 5 | github.com/asergeyev/nradix v0.0.0-20220715161825-e451993e425c h1:cN6WRmhJkh/u5bvf/XXjoqcHxljVKIz3Nt7q2dVJySo= 6 | github.com/asergeyev/nradix v0.0.0-20220715161825-e451993e425c/go.mod h1:8BhOLuqtSuT5NZtZMwfvEibi09RO3u79uqfHZzfDTR4= 7 | github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= 8 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 9 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 10 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 11 | github.com/denisbrodbeck/machineid v1.0.1 h1:geKr9qtkB876mXguW2X6TU4ZynleN6ezuMSRhl4D7AQ= 12 | github.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbjJCrnectwCyxcUSI= 13 | github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= 14 | github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= 15 | github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= 16 | github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= 17 | github.com/gdamore/optopia v0.2.0/go.mod h1:YKYEwo5C1Pa617H7NlPcmQXl+vG6YnSSNB44n8dNL0Q= 18 | github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= 19 | github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= 20 | github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= 21 | github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= 22 | github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= 23 | github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= 24 | github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= 25 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= 26 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= 27 | github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= 28 | github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= 29 | github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= 30 | github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 31 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 32 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 33 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 34 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 35 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 36 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 37 | github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= 38 | github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= 39 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 40 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 41 | github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= 42 | github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= 43 | github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 44 | github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k= 45 | github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk= 46 | github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= 47 | github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= 48 | github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= 49 | github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 50 | github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= 51 | github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= 52 | github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= 53 | github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= 54 | github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE= 55 | github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= 56 | github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= 57 | github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= 58 | github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= 59 | github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 60 | github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= 61 | github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= 62 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 63 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 64 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 65 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 66 | github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= 67 | github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= 68 | github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4= 69 | github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4= 70 | github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso= 71 | github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ= 72 | github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= 73 | github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= 74 | github.com/zmap/go-iptree v0.0.0-20210731043055-d4e632617837 h1:DjHnADS2r2zynZ3WkCFAQ+PNYngMSNceRROi0pO6c3M= 75 | github.com/zmap/go-iptree v0.0.0-20210731043055-d4e632617837/go.mod h1:9vp0bxqozzQwcjBwenEXfKVq8+mYbwHkQ1NF9Ap0DMw= 76 | go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= 77 | go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 78 | golang.org/x/sys v0.0.0-20181128092732-4ed8d59d0b35/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 79 | golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 80 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 81 | golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 82 | golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= 83 | golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 84 | golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= 85 | golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= 86 | golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= 87 | golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 88 | google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= 89 | google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= 90 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 91 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 92 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 93 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 94 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 95 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 96 | nanomsg.org/go/mangos/v2 v2.0.8 h1:Nnc5gCNPd8sSyxgfMTdlKK020p4nxLAxcQrhLVnjGQ8= 97 | nanomsg.org/go/mangos/v2 v2.0.8/go.mod h1:gngxudWUZkxqHN+8n/2y9gWZPcwmSbliFYJsYG8mbKs= 98 | -------------------------------------------------------------------------------- /core/type_pool.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "time" 7 | 8 | targetgeneration "github.com/nray-scanner/nray/core/targetGeneration" 9 | log "github.com/sirupsen/logrus" 10 | ) 11 | 12 | // Pool is a container that contains nodes and 13 | // work those nodes have still to do 14 | type Pool struct { 15 | nodeLock sync.RWMutex 16 | nodes map[string]*Node 17 | TargetChan <-chan targetgeneration.AnyTargets 18 | targetGenerationErrorStream chan error 19 | jobArea []*Job 20 | jobAreaLock sync.Mutex 21 | jobGenerationDone bool 22 | jobGenerationDoneLock sync.RWMutex 23 | CountTargets uint64 24 | CountWorkDone uint64 25 | poolLock sync.RWMutex 26 | } 27 | 28 | // Returns a pointer to a newly allocated pool 29 | func initPool(statusInterval time.Duration) *Pool { 30 | p := &Pool{ 31 | nodes: make(map[string]*Node, 0), 32 | TargetChan: make(chan targetgeneration.AnyTargets, 1024), 33 | targetGenerationErrorStream: make(chan error, 100), 34 | jobArea: make([]*Job, 0), 35 | jobGenerationDone: false, 36 | } 37 | go p.printProgress(statusInterval) 38 | return p 39 | } 40 | 41 | func (p *Pool) getCurrentPoolSize() int { 42 | p.nodeLock.RLock() 43 | defer p.nodeLock.RUnlock() 44 | poolSize := len(p.nodes) 45 | return poolSize 46 | } 47 | 48 | // Returns a pointer to the node with the given ID - if the node didn't exist, the second return value is false 49 | func (p *Pool) getNodeFromID(searchID string) (*Node, bool) { 50 | p.nodeLock.RLock() 51 | defer p.nodeLock.RUnlock() 52 | node, exists := p.nodes[searchID] 53 | return node, exists 54 | } 55 | 56 | // Adds a new node to the pool 57 | func (p *Pool) addNodeToPool(newNodeID string, newNodeName string, newNodeMetaInfo string, newNodeRegisterTime time.Time) { 58 | var finalNodeName string 59 | // if no name is presented, take node ID as name 60 | if newNodeName == "" { 61 | finalNodeName = newNodeID 62 | } else { 63 | finalNodeName = newNodeName 64 | } 65 | newNode := Node{ 66 | ID: newNodeID, 67 | Name: finalNodeName, 68 | MetaInfo: newNodeMetaInfo, 69 | LastHeartbeat: newNodeRegisterTime, 70 | } 71 | p.nodeLock.Lock() 72 | defer p.nodeLock.Unlock() 73 | p.nodes[newNodeID] = &newNode 74 | } 75 | 76 | // Removes a node from the pool 77 | func (p *Pool) removeNodeFromPool(nodeID string, kill bool) bool { 78 | if !kill { 79 | if p.NodeHasOpenJobs(nodeID) { 80 | return false 81 | } 82 | } else { 83 | p.jobAreaLock.Lock() 84 | defer p.jobAreaLock.Unlock() 85 | for _, job := range p.jobArea { 86 | if job.nodeIDWorkingOnJob == nodeID { 87 | job.nodeIDWorkingOnJob = "" 88 | job.state = waiting 89 | } 90 | } 91 | } 92 | // Don't use getNodeFromID() here since an atomar locking is required for the whole operation 93 | // Otherwise it might be possible that other goroutines are modifying the slice between 94 | // getting the index and deleting it 95 | p.nodeLock.Lock() 96 | defer p.nodeLock.Unlock() 97 | delete(p.nodes, nodeID) 98 | return true 99 | } 100 | 101 | // Returns a list of nodes that are expired 102 | func (p *Pool) getExpiredNodeIDs(expiryTime time.Duration) []string { 103 | expiredNodeIDs := make([]string, 0) 104 | p.nodeLock.RLock() 105 | defer p.nodeLock.RUnlock() 106 | for _, node := range p.nodes { 107 | if time.Now().Sub(node.LastHeartbeat) > expiryTime { 108 | expiredNodeIDs = append(expiredNodeIDs, node.ID) 109 | } 110 | } 111 | return expiredNodeIDs 112 | } 113 | 114 | // Deletes a node from the pool 115 | func (p *Pool) updateHeartbeatTimer(nodeID string, lastHeartbeatReceived time.Time) { 116 | p.nodeLock.Lock() 117 | defer p.nodeLock.Unlock() 118 | node, exists := p.nodes[nodeID] 119 | if exists { 120 | node.heartBeatLock.Lock() 121 | defer node.heartBeatLock.Unlock() 122 | node.LastHeartbeat = lastHeartbeatReceived 123 | } 124 | } 125 | 126 | // Supposed to run in a dedicated goroutine 127 | func removeExpiredNodes(pool *Pool, checkInterval time.Duration, maxExpiryTime time.Duration) { 128 | ticker := time.NewTicker(checkInterval) 129 | for range ticker.C { 130 | expiredNodes := pool.getExpiredNodeIDs(maxExpiryTime) 131 | for _, nodeID := range expiredNodes { 132 | log.WithFields(log.Fields{ 133 | "module": "core.type_pool", 134 | "src": "removeExpiredNodes", 135 | }).Debugf("Removing node %s from pool", nodeID) 136 | pool.removeNodeFromPool(nodeID, true) 137 | } 138 | } 139 | } 140 | 141 | func (p *Pool) removeJobFromJobArea(nodeID string, jobIDToDelete uint64) error { 142 | p.jobAreaLock.Lock() 143 | defer p.jobAreaLock.Unlock() 144 | posOfJobToDelete := -1 145 | workDoneCount := uint64(0) 146 | // Locate Job 147 | for pos, job := range p.jobArea { 148 | if job.id == jobIDToDelete && job.nodeIDWorkingOnJob == nodeID { 149 | posOfJobToDelete = pos 150 | workDoneCount = job.workItems.TargetCount() 151 | break 152 | } 153 | } 154 | 155 | if posOfJobToDelete == -1 { 156 | return fmt.Errorf("Couldn't find the job to delete") 157 | } 158 | 159 | // Enter the madness 160 | // https://github.com/golang/go/wiki/SliceTricks 161 | p.jobArea[posOfJobToDelete] = p.jobArea[len(p.jobArea)-1] 162 | p.jobArea[len(p.jobArea)-1] = nil 163 | p.jobArea = p.jobArea[:len(p.jobArea)-1] 164 | 165 | p.poolLock.Lock() 166 | p.CountWorkDone += workDoneCount 167 | p.poolLock.Unlock() 168 | return nil 169 | } 170 | 171 | // GetJobForNode returns the next job for a given node ID 172 | func (p *Pool) GetJobForNode(nodeID string) *Job { 173 | p.jobAreaLock.Lock() 174 | defer p.jobAreaLock.Unlock() 175 | for _, job := range p.jobArea { 176 | if job.nodeIDWorkingOnJob == nodeID { 177 | return job 178 | 179 | } 180 | } 181 | for _, job := range p.jobArea { 182 | if job.nodeIDWorkingOnJob == "" { 183 | job.nodeIDWorkingOnJob = nodeID 184 | job.state = inProgress 185 | return job 186 | } 187 | } 188 | return nil 189 | } 190 | 191 | // GetNumberOfWaitingJobs returns how many jobs are currently open 192 | func (p *Pool) GetNumberOfWaitingJobs() int { 193 | p.jobAreaLock.Lock() 194 | defer p.jobAreaLock.Unlock() 195 | // Count waiting jobs 196 | waitingJobs := 0 197 | for _, job := range p.jobArea { 198 | if job.state == waiting { 199 | waitingJobs++ 200 | } 201 | } 202 | return waitingJobs 203 | } 204 | 205 | // GetNumberOfAllJobs returns the length of the JobArea. If it is 206 | // 0, we can likely stop all nodes and the server 207 | func (p *Pool) GetNumberOfAllJobs() int { 208 | p.jobAreaLock.Lock() 209 | defer p.jobAreaLock.Unlock() 210 | return len(p.jobArea) 211 | } 212 | 213 | // AddJobToJobArea adds a new job to this pool's job queue 214 | func (p *Pool) AddJobToJobArea(job *Job) { 215 | p.jobAreaLock.Lock() 216 | defer p.jobAreaLock.Unlock() 217 | p.jobArea = append(p.jobArea, job) 218 | } 219 | 220 | // SetJobGenerationDone sets the flag that job generation is done 221 | func (p *Pool) SetJobGenerationDone() { 222 | p.jobGenerationDoneLock.Lock() 223 | defer p.jobGenerationDoneLock.Unlock() 224 | p.jobGenerationDone = true 225 | } 226 | 227 | // IsJobGenerationDone queries the flag indicating that all jobs were generated 228 | func (p *Pool) IsJobGenerationDone() bool { 229 | p.jobGenerationDoneLock.RLock() 230 | defer p.jobGenerationDoneLock.RUnlock() 231 | return p.jobGenerationDone 232 | } 233 | 234 | // StopNode pauses a single node identified by its ID 235 | func (p *Pool) StopNode(nodeID string) { 236 | p.nodeLock.Lock() 237 | defer p.nodeLock.Unlock() 238 | for _, node := range p.nodes { 239 | if node.ID == nodeID { 240 | node.setStop(true) 241 | } 242 | } 243 | } 244 | 245 | // StopAllNodes pauses all nodes in this pool 246 | func (p *Pool) StopAllNodes() { 247 | p.nodeLock.Lock() 248 | defer p.nodeLock.Unlock() 249 | for _, node := range p.nodes { 250 | node.setStop(true) 251 | } 252 | } 253 | 254 | // ResumeNode resumes a single node identified by its ID 255 | func (p *Pool) ResumeNode(nodeID string) { 256 | p.nodeLock.Lock() 257 | defer p.nodeLock.Unlock() 258 | for _, node := range p.nodes { 259 | if node.ID == nodeID { 260 | node.setStop(false) 261 | } 262 | } 263 | } 264 | 265 | // ResumeAllNodes resumes all nodes in this pool 266 | func (p *Pool) ResumeAllNodes() { 267 | p.nodeLock.Lock() 268 | defer p.nodeLock.Unlock() 269 | for _, node := range p.nodes { 270 | node.setStop(false) 271 | } 272 | } 273 | 274 | // NodeHasOpenJobs returns true if the node did not finish 275 | // all of its jobs, false otherwise 276 | func (p *Pool) NodeHasOpenJobs(nodeID string) bool { 277 | p.jobAreaLock.Lock() 278 | defer p.jobAreaLock.Unlock() 279 | for _, job := range p.jobArea { 280 | if job.nodeIDWorkingOnJob == nodeID { 281 | return true 282 | } 283 | } 284 | return false 285 | } 286 | 287 | // NodesEmpty returns true if there are no nodes left in the pool 288 | func (p *Pool) NodesEmpty() bool { 289 | p.nodeLock.RLock() 290 | defer p.nodeLock.RUnlock() 291 | return len(p.nodes) == 0 292 | } 293 | 294 | func (p *Pool) printProgress(pause time.Duration) { 295 | ticker := time.NewTicker(pause) 296 | for { 297 | _ = <-ticker.C 298 | p.poolLock.RLock() 299 | done := p.CountWorkDone 300 | all := p.CountTargets 301 | p.poolLock.RUnlock() 302 | ratio := float32(0) 303 | if all != 0 && all >= done { 304 | ratio = float32(done) / float32(all) 305 | } 306 | log.WithFields(log.Fields{ 307 | "module": "core.type_pool", 308 | "src": "printProgress", 309 | }).Infof("All: %d; TODO: %d; Done: %d (%.2f%%)", all, all-done, done, ratio*100) 310 | } 311 | } 312 | 313 | // SetTargetCount is goroutine safe for setting the target count 314 | func (p *Pool) SetTargetCount(targetCount uint64) { 315 | p.poolLock.Lock() 316 | defer p.poolLock.Unlock() 317 | p.CountTargets = targetCount 318 | } 319 | -------------------------------------------------------------------------------- /core/messageStuff.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "bytes" 5 | "crypto/rand" 6 | "encoding/hex" 7 | "fmt" 8 | "os" 9 | "time" 10 | 11 | targetgeneration "github.com/nray-scanner/nray/core/targetGeneration" 12 | "github.com/spf13/viper" 13 | 14 | "github.com/denisbrodbeck/machineid" 15 | "github.com/golang/protobuf/proto" 16 | "github.com/golang/protobuf/ptypes" 17 | nraySchema "github.com/nray-scanner/nray/schemas" 18 | "github.com/nray-scanner/nray/utils" 19 | log "github.com/sirupsen/logrus" 20 | mangos "nanomsg.org/go/mangos/v2" 21 | ) 22 | 23 | func generateRandomNodeID() string { 24 | bytes := make([]byte, 8) 25 | _, err := rand.Read(bytes) 26 | utils.CheckError(err, true) 27 | return hex.EncodeToString(bytes) 28 | } 29 | 30 | // Handles incoming registration requests from nodes 31 | // 1. Check if node is already registered 32 | // 2. If not, generate ID, register it and prepare answer with current time (for node sync) 33 | func handleNodeRegister(message *nraySchema.NodeRegister, considerClientPoolPreference bool, allowMultipleNodesPerHost bool) *nraySchema.RegisteredNode { 34 | var nodeIDReply string 35 | // The node already exists and multiple nodes are not allowed. Send empty string 36 | if !allowMultipleNodesPerHost && CurrentConfig.getNodeFromID(message.GetMachineID()) != nil { 37 | nodeIDReply = "" 38 | log.WithFields(log.Fields{ 39 | "module": "core.messageStuff", 40 | "src": "handleNodeRegister", 41 | }).Debugf("Node with ID %s is already registered, refusing", message.GetMachineID()) 42 | } else { 43 | var newNodeID string 44 | if allowMultipleNodesPerHost { 45 | for { 46 | newNodeID = generateRandomNodeID() 47 | // Make sure that the ID is not already assigned 48 | if CurrentConfig.getNodeFromID(newNodeID) == nil { 49 | break 50 | } 51 | } 52 | } else { 53 | newNodeID = message.GetMachineID() 54 | } 55 | var targetPool *Pool 56 | if considerClientPoolPreference && CurrentConfig.getPool(int(message.GetPreferredPool())) != nil { 57 | targetPool = CurrentConfig.getPool(int(message.GetPreferredPool())) 58 | log.WithFields(log.Fields{ 59 | "module": "core.messageStuff", 60 | "src": "handleNodeRegister", 61 | }).Debugf("Assigned node %s to pool %d", newNodeID, int(message.GetPreferredPool())) 62 | } else { 63 | targetPool = CurrentConfig.getSmallestPool() 64 | } 65 | targetPool.addNodeToPool(newNodeID, message.GetPreferredNodeName(), "", time.Now()) 66 | nodeIDReply = newNodeID 67 | log.WithFields(log.Fields{ 68 | "module": "core.messageStuff", 69 | "src": "handleNodeRegister", 70 | }).Debugf("New node %s registered successfully", newNodeID) 71 | } 72 | registeredNode := &nraySchema.RegisteredNode{ 73 | NodeID: nodeIDReply, 74 | ServerClock: ptypes.TimestampNow(), 75 | } 76 | return registeredNode 77 | } 78 | 79 | // HandleRegisteredNode extracts the assigned scanner ID 80 | // as well as the clock offset 81 | func HandleRegisteredNode(registeredNode *nraySchema.RegisteredNode) (string, time.Duration, *viper.Viper) { 82 | nodeID := registeredNode.GetNodeID() 83 | log.WithFields(log.Fields{ 84 | "module": "core.messageStuff", 85 | "src": "HandleRegisteredNode", 86 | }).Infof("Got ID: %s", nodeID) 87 | if nodeID == "" { 88 | log.WithFields(log.Fields{ 89 | "module": "core.messageStuff", 90 | "src": "HandleRegisteredNode", 91 | }).Error("Aborting, server refused to give an ID. Is there another instance running on this system?") 92 | os.Exit(1) 93 | } 94 | serverTime, err := ptypes.Timestamp(registeredNode.GetServerClock()) 95 | utils.CheckError(err, true) 96 | timeOffset := serverTime.Sub(time.Now()) 97 | rawConfig := registeredNode.GetScannerconfig() 98 | utils.CheckError(err, true) 99 | scannerConfig := viper.New() 100 | scannerConfig.SetConfigType("json") 101 | scannerConfig.ReadConfig(bytes.NewBuffer(rawConfig)) 102 | return nodeID, timeOffset, scannerConfig 103 | } 104 | 105 | func handleHeartbeat(heartbeat *nraySchema.Heartbeat) *nraySchema.HeartbeatAck { 106 | id := heartbeat.NodeID 107 | timestamp, err := ptypes.Timestamp(heartbeat.BeatTime) 108 | utils.CheckError(err, false) 109 | // Timestamp mustn't be older than 10 seconds 110 | diff := time.Now().Sub(timestamp) 111 | if diff.Seconds() > 10 { 112 | log.WithFields(log.Fields{ 113 | "module": "core.messageStuff", 114 | "src": "handleHeartbeat", 115 | }).Debug("Received too old heartbeat, temporarily stopping scanner") 116 | return &nraySchema.HeartbeatAck{ 117 | Running: true, 118 | Scanning: false, 119 | } 120 | } 121 | pool := CurrentConfig.getPoolFromNodeID(id) 122 | if pool == nil { 123 | log.WithFields(log.Fields{ 124 | "module": "core.messageStuff", 125 | "src": "handleHeartbeat", 126 | }).Error("Pool is nil, this should not happen. Probably I'm going to die right now.") 127 | } 128 | pool.updateHeartbeatTimer(id, timestamp) 129 | node := CurrentConfig.getNodeFromID(id) 130 | log.WithFields(log.Fields{ 131 | "module": "core.messageStuff", 132 | "src": "handleHeartbeat", 133 | }).Debugf("Received heartbeat %v from node %s", timestamp, node.Name) 134 | // No more jobs, stop node 135 | if pool.IsJobGenerationDone() && pool.GetNumberOfWaitingJobs() == 0 { 136 | node.setStop(true) 137 | } 138 | 139 | return &nraySchema.HeartbeatAck{ 140 | Running: !node.getStop(), 141 | Scanning: !node.scanPaused, 142 | } 143 | } 144 | 145 | func handleMoreWorkRequest(moreWork *nraySchema.MoreWorkRequest) string { 146 | return moreWork.NodeID 147 | } 148 | 149 | func createMoreWorkMsg(targets targetgeneration.AnyTargets, jobID uint64) []byte { 150 | t := &nraySchema.ScanTargets{ 151 | Rhosts: targets.RemoteHosts, 152 | Tcpports: targets.TCPPorts, 153 | Udpports: targets.UDPPorts, 154 | } 155 | moreWork := &nraySchema.MoreWorkReply{ 156 | Batchid: jobID, 157 | Targets: t, 158 | } 159 | serverMessage := &nraySchema.NrayServerMessage{ 160 | MessageContent: &nraySchema.NrayServerMessage_JobBatch{ 161 | JobBatch: moreWork, 162 | }, 163 | } 164 | marshalled, err := proto.Marshal(serverMessage) 165 | utils.CheckError(err, false) 166 | return marshalled 167 | } 168 | 169 | func createUnregisteredMessage(nodeID string) *nraySchema.NrayServerMessage { 170 | return &nraySchema.NrayServerMessage{ 171 | MessageContent: &nraySchema.NrayServerMessage_NodeIsUnregistered{ 172 | NodeIsUnregistered: &nraySchema.Unregistered{ 173 | NodeID: nodeID, 174 | }, 175 | }, 176 | } 177 | } 178 | 179 | // makeHeartbeats sends an already serialized heartbeat every heartBeatTick to the specified channel 180 | func makeHeartbeats(dataChan chan<- *nraySchema.NrayNodeMessage, heartBeatTick time.Duration, timeOffset time.Duration) { 181 | ticker := time.NewTicker(heartBeatTick) 182 | for range ticker.C { 183 | // Add offset to have timestamps aligned to the server's clock 184 | normalizedTime, err := ptypes.TimestampProto(time.Now().Add(timeOffset)) 185 | utils.CheckError(err, false) 186 | heartbeat := nraySchema.Heartbeat{ 187 | NodeID: nodeID, 188 | BeatTime: normalizedTime, 189 | } 190 | msg := &nraySchema.NrayNodeMessage{ 191 | MessageContent: &nraySchema.NrayNodeMessage_Heartbeat{ 192 | Heartbeat: &heartbeat, 193 | }, 194 | } 195 | dataChan <- msg 196 | } 197 | } 198 | 199 | // Generate an already serialized NodeRegister message 200 | func generateNodeRegister(nodeName string, preferredPool int32) []byte { 201 | // the machineid is supposed to be a unique machine 202 | // identifier, so the server is able to reject multiple 203 | // instances running on the same machine 204 | id, err := machineid.ProtectedID("nray-scanner") 205 | utils.CheckError(err, false) 206 | if err != nil || len(id) < 8 { 207 | log.WithFields(log.Fields{ 208 | "module": "core.messagestuff", 209 | "src": "generateNodeRegister", 210 | }).Warningf("Some error occured during generation of node ID. Falling back to random node IDs.") 211 | id = generateRandomNodeID() 212 | } 213 | envInfo := gatherEnvironmentInformation() 214 | event := &nraySchema.Event{ 215 | NodeID: id[0:8], 216 | NodeName: nodeName, 217 | Scannername: "node_environment", 218 | EventData: &nraySchema.Event_Environment{ 219 | Environment: envInfo, 220 | }, 221 | 222 | Timestamp: ptypes.TimestampNow(), 223 | } 224 | node := nraySchema.NodeRegister{ 225 | MachineID: id[0:8], 226 | PreferredNodeName: nodeName, 227 | PreferredPool: preferredPool, 228 | Envinfo: event, 229 | } 230 | nodeMessage := &nraySchema.NrayNodeMessage{ 231 | MessageContent: &nraySchema.NrayNodeMessage_NodeRegister{ 232 | NodeRegister: &node, 233 | }, 234 | } 235 | msg, err := proto.Marshal(nodeMessage) 236 | utils.CheckError(err, true) 237 | return msg 238 | } 239 | 240 | // HandleHeartbeatAck unpacks the message and returns the values 241 | func HandleHeartbeatAck(heartbeatAck *nraySchema.HeartbeatAck) (bool, bool) { 242 | return heartbeatAck.Scanning, heartbeatAck.Running 243 | } 244 | 245 | // Register a node at the server. The node generates a unique ID 246 | // that identifies the machine so the server can reject multiple 247 | // instances on the same machine 248 | func registerNode(sock mangos.Socket, nodeName string, preferredPool int32) (string, time.Duration, error) { 249 | err := sock.Send(generateNodeRegister(nodeName, preferredPool)) 250 | utils.CheckError(err, true) 251 | msg, err := sock.Recv() 252 | utils.CheckError(err, true) 253 | // Unpack it 254 | skeleton := &nraySchema.NrayServerMessage{} 255 | err = proto.Unmarshal(msg, skeleton) 256 | utils.CheckError(err, false) 257 | 258 | // Depending on the content of the message, do someting 259 | switch skeleton.MessageContent.(type) { 260 | case *nraySchema.NrayServerMessage_RegisteredNode: 261 | nodeID, timeOffset, scannerConfig = HandleRegisteredNode(skeleton.GetRegisteredNode()) 262 | return nodeID, timeOffset, nil 263 | case nil: 264 | return "", 0, fmt.Errorf("Expected RegisteredNode message") 265 | default: 266 | return "", 0, fmt.Errorf("Expected RegisteredNode message") 267 | } 268 | } 269 | -------------------------------------------------------------------------------- /scanner/types.go: -------------------------------------------------------------------------------- 1 | package scanner 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "sync/atomic" 7 | "time" 8 | 9 | "github.com/golang/protobuf/ptypes" 10 | "github.com/golang/time/rate" 11 | 12 | nraySchema "github.com/nray-scanner/nray/schemas" 13 | "github.com/nray-scanner/nray/utils" 14 | "github.com/spf13/viper" 15 | ) 16 | 17 | // PauseIndicator is a type that can be used to indicate that a scanner should stop. Concurrency safe. 18 | type PauseIndicator struct { 19 | scannerShouldPause bool 20 | lock sync.RWMutex 21 | } 22 | 23 | // SetValue applies the new value 24 | func (pi *PauseIndicator) SetValue(value bool) { 25 | pi.lock.Lock() 26 | defer pi.lock.Unlock() 27 | pi.scannerShouldPause = value 28 | } 29 | 30 | // GetValue returns the currently set value 31 | func (pi *PauseIndicator) GetValue() bool { 32 | pi.lock.RLock() 33 | defer pi.lock.RUnlock() 34 | return pi.scannerShouldPause 35 | } 36 | 37 | // Target represents one specific target, meaning a service that 38 | // is reachable knowing a proto (TCP/UDP), a destination (FQDN or IP) 39 | // and a port 40 | type Target struct { 41 | Protocol string 42 | Host string 43 | Port uint32 44 | } 45 | 46 | // ScanTargets allows to abstract different types of target notations 47 | // For regular scans the interface is sufficient, providing 48 | // stream of targets whereas more specific implementations 49 | // may expose data like networks or port ranges directly to 50 | // be more efficient (e.g. when feeding to ZMap) 51 | type ScanTargets interface { 52 | getTargetGenerator() <-chan *Target // return a channel where Targets are supplied 53 | } 54 | 55 | // StandardTargets is the default target implementation that allows for an arbitrary 56 | // *single* host and multiple ports 57 | type StandardTargets struct { 58 | protocol string 59 | // url may be a DNS name or IPv4 address WITHOUT protocol or port encoded as string 60 | url string 61 | targetPorts []uint32 62 | } 63 | 64 | func (st *StandardTargets) getTargetGenerator() <-chan *Target { 65 | targetChan := make(chan *Target, len(st.targetPorts)) 66 | go func(proto string, url string, ports []uint32, channel chan<- *Target) { 67 | for _, port := range ports { 68 | channel <- &Target{ 69 | Protocol: proto, 70 | Host: url, 71 | Port: port, 72 | } 73 | } 74 | }(st.protocol, st.url, st.targetPorts, targetChan) 75 | return targetChan 76 | } 77 | 78 | // ScanController holds most information required to keep everything running 79 | type ScanController struct { 80 | controllerLock sync.RWMutex 81 | nodeID string 82 | nodeName string 83 | timeOffset time.Duration 84 | scannerConfig *viper.Viper 85 | // A map containing functions taking a proto, a host and a port that return 86 | // a function (closure) that can directly be called. The idea is that each scanner 87 | // may register itself e.g. for tcp/80 with a function taking those arguments. 88 | // If tcp/80 is discovered to be open, the function will be called and a closure 89 | // containing all relevant scanning information is returned. This closure can then 90 | // be queued in a channel and is picked up by the workers, simply calling the function 91 | // triggering the scan with the wrapped target information. 92 | Subscriptions map[string][]func(proto string, host string, port uint, results chan<- *nraySchema.Event) func() 93 | subscriptionLock sync.RWMutex 94 | Pause *PauseIndicator 95 | scanQueue chan func() 96 | eventQueue chan *nraySchema.Event 97 | portscanResultQueue chan *PortscanResult 98 | results []*nraySchema.Event 99 | resultsLock sync.Mutex 100 | workersDone bool 101 | ratelimiter *rate.Limiter 102 | scansRunning int64 103 | } 104 | 105 | // CreateScanController initialises a new ScanController 106 | func CreateScanController(nodeID string, nodeName string, timeOffset time.Duration, scannerConfig *viper.Viper) *ScanController { 107 | if nodeName == "" { 108 | nodeName = nodeID 109 | } 110 | scannerConfig = utils.ApplyDefaultScannerConfig(scannerConfig) 111 | sc := &ScanController{ 112 | nodeID: nodeID, 113 | nodeName: nodeName, 114 | timeOffset: timeOffset, 115 | scannerConfig: scannerConfig, 116 | Subscriptions: make(map[string][]func(string, string, uint, chan<- *nraySchema.Event) func()), 117 | scanQueue: make(chan func(), 1000), 118 | eventQueue: make(chan *nraySchema.Event, 1000), 119 | portscanResultQueue: make(chan *PortscanResult, 1000), 120 | results: make([]*nraySchema.Event, 0), 121 | workersDone: false, 122 | Pause: &PauseIndicator{scannerShouldPause: false}, 123 | ratelimiter: rate.NewLimiter(rate.Inf, 1), 124 | scansRunning: 0, 125 | } 126 | return sc 127 | } 128 | 129 | // Refresh cleans the state for each workBatch. 130 | // This is mainly required because termination of each run 131 | // depends heavily on closing internal channels 132 | func (controller *ScanController) Refresh() { 133 | controller.controllerLock.Lock() 134 | controller.scanQueue = make(chan func(), 1000) 135 | controller.eventQueue = make(chan *nraySchema.Event, 1000) 136 | controller.portscanResultQueue = make(chan *PortscanResult, 1000) 137 | controller.results = make([]*nraySchema.Event, 0) 138 | if controller.scannerConfig.GetString("ratelimit") == "none" { 139 | controller.ratelimiter.SetLimit(rate.Inf) 140 | } else { 141 | controller.ratelimiter.SetLimit(rate.Limit(controller.scannerConfig.GetFloat64("ratelimit"))) 142 | } 143 | controller.controllerLock.Unlock() 144 | go controller.processPortScanEvents() 145 | go controller.processEventsToResults() 146 | } 147 | 148 | // Subscribe is called by protocol scanners to get notified in case interesting ports are open 149 | func (controller *ScanController) Subscribe(key string, function func(string, string, uint, chan<- *nraySchema.Event) func()) { 150 | controller.subscriptionLock.Lock() 151 | defer controller.subscriptionLock.Unlock() 152 | if controller.Subscriptions[key] == nil { 153 | controller.Subscriptions[key] = make([]func(string, string, uint, chan<- *nraySchema.Event) func(), 0) 154 | } 155 | controller.Subscriptions[key] = append(controller.Subscriptions[key], function) 156 | } 157 | 158 | // notifies higher layer scanners that are interested, e.g. if a scanner registered 159 | // for "tcp/80" and such a target is found, the scan function of the higher level 160 | // scanner is prepared and queued here 161 | func (controller *ScanController) notify(proto string, host string, port uint) { 162 | controller.subscriptionLock.RLock() 163 | defer controller.subscriptionLock.RUnlock() 164 | key := fmt.Sprintf("%s/%d", proto, port) 165 | //log.Debug(key) 166 | if functions := controller.Subscriptions[key]; functions != nil { 167 | for _, f := range functions { 168 | controller.scanQueue <- f(proto, host, port, controller.eventQueue) 169 | } 170 | } 171 | } 172 | 173 | // processPortScanEvents must run concurrently to a scan in its own goroutine 174 | // It reads the events generated by the port scanner, notifies the higher layer 175 | // scanners and wraps the port scan results into events 176 | func (controller *ScanController) processPortScanEvents() { 177 | for portscanResult := range controller.portscanResultQueue { 178 | if portscanResult == nil { 179 | continue 180 | } 181 | // Create Event 182 | timestamp, _ := ptypes.TimestampProto(currentTime()) 183 | eventData := &nraySchema.Event_Result{ 184 | Result: &nraySchema.ScanResult{ 185 | Target: portscanResult.Target, 186 | Port: portscanResult.Port, 187 | Result: &nraySchema.ScanResult_Portscan{ 188 | Portscan: &nraySchema.PortScanResult{ 189 | Scantype: portscanResult.Scantype, 190 | Target: portscanResult.Target, 191 | Port: portscanResult.Port, 192 | Open: portscanResult.Open, 193 | Timeout: uint32(portscanResult.Timeout / time.Millisecond), 194 | }, 195 | }, 196 | }, 197 | } 198 | event := &nraySchema.Event{ 199 | NodeID: controller.nodeID, 200 | NodeName: controller.nodeName, 201 | EventData: eventData, 202 | Scannername: "native-portscanner", 203 | Timestamp: timestamp, 204 | } 205 | controller.eventQueue <- event 206 | 207 | // Notify others 208 | if portscanResult.Scantype == "tcpconnect" && portscanResult.Open { 209 | //log.Debug("Notifying: %s:%d", portscanResult.Target, portscanResult.Port) 210 | controller.notify("tcp", portscanResult.Target, uint(portscanResult.Port)) 211 | } 212 | if portscanResult.Scantype == "udp" && portscanResult.Open { 213 | //log.Debug("Notifying: %s:%d", portscanResult.Target, portscanResult.Port) 214 | controller.notify("udp", portscanResult.Target, uint(portscanResult.Port)) 215 | } 216 | } 217 | close(controller.eventQueue) 218 | } 219 | 220 | func (controller *ScanController) processEventsToResults() { 221 | controller.controllerLock.RLock() 222 | controller.resultsLock.Lock() 223 | defer controller.controllerLock.RUnlock() 224 | defer controller.resultsLock.Unlock() 225 | for event := range controller.eventQueue { 226 | controller.results = append(controller.results, event) 227 | } 228 | } 229 | 230 | func (controller *ScanController) getResults() []*nraySchema.Event { 231 | controller.controllerLock.RLock() 232 | controller.resultsLock.Lock() 233 | defer controller.controllerLock.RUnlock() 234 | defer controller.resultsLock.Unlock() 235 | return controller.results 236 | } 237 | 238 | // The only way to find out if a scan is finished is to check if all queues are empty 239 | // Call this function after the workers have finished 240 | func (controller *ScanController) waitForScanToFinishAndEventsToBeProcessed() { 241 | ctr := 0 242 | for { 243 | // Are there any workers having jobs? If no, increment ctr 244 | if atomic.LoadInt64(&controller.scansRunning) == 0 { 245 | ctr++ 246 | } else { // Still work, reset ctr 247 | ctr = 0 248 | } 249 | if ctr == 5 { // No work for last 5 probes in 50 ms interval; we're probably done; close chans 250 | close(controller.scanQueue) 251 | break 252 | } 253 | 254 | //log.Debug("sq: %d\t prq: %d\t eq: %d", len(controller.scanQueue), len(controller.portscanResultQueue), len(controller.eventQueue)) 255 | 256 | // Check loop runs only all 100 Milliseconds to give producers the chance of filling the queue 257 | // BTW, weird things happen if you remove this, so don't... 258 | time.Sleep(100 * time.Millisecond) 259 | } 260 | } 261 | 262 | // TCPPortScanner is the interface all TCP Port Scanners must adhere to 263 | type TCPPortScanner interface { 264 | Configure(config *viper.Viper) 265 | PrepareScanFuncs(targetMsg *nraySchema.MoreWorkReply, results chan<- *PortscanResult) <-chan func() 266 | } 267 | 268 | // ProtocolScanner is the interface all scanners of higher level protocols must adhere to 269 | // There is no explicit scan method because scanners register themselves for targets of interest 270 | // and are called if something is found 271 | type ProtocolScanner interface { 272 | Configure(config *viper.Viper, nodeID string, nodeName string) 273 | Register(scanctrl *ScanController) 274 | } 275 | -------------------------------------------------------------------------------- /core/targetGeneration/targetGenerator.go: -------------------------------------------------------------------------------- 1 | package targetgeneration 2 | 3 | import ( 4 | "math" 5 | "math/rand" 6 | "net" 7 | "regexp" 8 | "strconv" 9 | "strings" 10 | "time" 11 | 12 | "github.com/apparentlymart/go-cidr/cidr" 13 | "github.com/nray-scanner/nray/utils" 14 | log "github.com/sirupsen/logrus" 15 | "github.com/spf13/viper" 16 | ) 17 | 18 | // AnyTargets is the most abstract type holding information 19 | // regarding targets. Any number of hosts, networks, ports etc. 20 | // is allowed 21 | type AnyTargets struct { 22 | RemoteHosts []string 23 | TCPPorts []uint32 24 | UDPPorts []uint32 25 | } 26 | 27 | // TargetCount returns the number of targets, meaning individual ports on individual systems 28 | func (at *AnyTargets) TargetCount() uint64 { 29 | return uint64(len(at.RemoteHosts) * (len(at.TCPPorts) + len(at.UDPPorts))) 30 | } 31 | 32 | // TargetGenerator is the type that unifies all backends and provides 33 | // central access to all generated targets 34 | type TargetGenerator struct { 35 | targetChannels []<-chan AnyTargets 36 | targetChan chan AnyTargets 37 | targetCount uint64 38 | } 39 | 40 | // Init takes the target generation subtree of the configuration 41 | // and sets up the TargetGenerator to receive targets from 42 | func (tg *TargetGenerator) Init(config *viper.Viper) { 43 | tg.targetChan = make(chan AnyTargets, config.GetInt("buffersize")) 44 | 45 | backend := &standardTGBackend{} 46 | // Supply config 47 | err := backend.configure(config.Sub("standard")) 48 | utils.CheckError(err, true) 49 | tg.targetCount, err = backend.targetCount() 50 | utils.CheckError(err, false) 51 | // Append channel to slice holding all channels that are sending work 52 | tg.targetChannels = append(tg.targetChannels, backend.receiveTargets()) 53 | go tg.zipChannels() 54 | } 55 | 56 | // GetTargetChan is used to expose a read-only channel to the core 57 | func (tg *TargetGenerator) GetTargetChan() <-chan AnyTargets { 58 | return tg.targetChan 59 | } 60 | 61 | // TargetCount returns the total target count of this target generator. 62 | func (tg *TargetGenerator) TargetCount() uint64 { 63 | return tg.targetCount 64 | } 65 | 66 | // zipChannels reads from all channels supplying targets and sends work over a single 67 | // channel where it is consumed from the core. 68 | // It is supposed to be called only once as goroutine from tg.Init() 69 | // Closed channels are removed from the slice. 70 | // If the slice becomes empty, the target channel core reads from is closed 71 | func (tg *TargetGenerator) zipChannels() { 72 | outer: 73 | for len(tg.targetChannels) > 0 { 74 | for pos, channel := range tg.targetChannels { 75 | elem, ok := <-channel 76 | // Channel is closed, remove from slice 77 | if !ok { 78 | // Don't ask, see https://github.com/golang/go/wiki/SliceTricks 79 | copy(tg.targetChannels[pos:], tg.targetChannels[pos+1:]) 80 | tg.targetChannels[len(tg.targetChannels)-1] = nil // or the zero value of T 81 | tg.targetChannels = tg.targetChannels[:len(tg.targetChannels)-1] 82 | 83 | // After modifying the slice that is currently iterated over it may be wise to start over from the beginning 84 | continue outer 85 | } 86 | tg.targetChan <- elem 87 | } 88 | } 89 | close(tg.targetChan) 90 | } 91 | 92 | // targetGeneratorBackend is the interface that has to be implemented in order to 93 | // supply targets for the TargetGenerator 94 | type targetGeneratorBackend interface { 95 | configure(*viper.Viper) error 96 | receiveTargets() <-chan AnyTargets 97 | targetCount() (uint64, error) 98 | } 99 | 100 | // Taken from https://www.rosettacode.org/wiki/Remove_duplicate_elements#Map_solution 101 | func uniq(list []uint16) []uint16 { 102 | uniqueSet := make(map[uint16]bool, len(list)) 103 | for _, x := range list { 104 | uniqueSet[x] = true 105 | } 106 | result := make([]uint16, 0, len(uniqueSet)) 107 | for x := range uniqueSet { 108 | result = append(result, x) 109 | } 110 | return result 111 | } 112 | 113 | // GetNmapTopTCPPorts returns an array containing the topN TCP ports 114 | func GetNmapTopTCPPorts(topN uint) []uint16 { 115 | if topN > uint(len(TopTCPPorts)) { 116 | topN = uint(len(TopTCPPorts)) 117 | } 118 | return TopTCPPorts[0:uint(topN)] 119 | } 120 | 121 | // GetNmapTopUDPPorts returns an array containing the topN UDP ports 122 | func GetNmapTopUDPPorts(topN uint) []uint16 { 123 | if topN > uint(len(TopUDPPorts)) { 124 | topN = uint(len(TopUDPPorts)) 125 | } 126 | return TopUDPPorts[0:uint(topN)] 127 | } 128 | 129 | // GenerateIPStreamFromCIDR uses the ZMap algorithm to expand a CIDR network. 130 | // A blacklist may be specified and hosts contained in there are omitted. 131 | // Returns a stream of hosts, which is closed when the network has been completely expanded. 132 | func GenerateIPStreamFromCIDR(ipnet *net.IPNet, blacklist *NrayBlacklist) <-chan net.IP { 133 | if blacklist == nil { 134 | blacklist = NewBlacklist() 135 | } 136 | // size is arbitrary, 50 should be enough avoid that the channel empties during operation 137 | returnChan := make(chan net.IP, 50) 138 | 139 | // Generate target asynchronously 140 | go func(returnChan chan<- net.IP, ipnet *net.IPNet, blacklist *NrayBlacklist) { 141 | // Set up parameters for the sharding algorithm 142 | // There is a first and a current number that are mapped to the n-th IP in the network 143 | // A loop is calling next() to generate a new currNum and sending it to the work chan 144 | // until currNum equals first again - then there was a complete run through the circle and 145 | // the algoritm is done 146 | var firstNum, currNum uint64 147 | group := getGroup(cidr.AddressCount(ipnet)) 148 | // Masscan and ZMap support user-controlled seeds, using current time should be enough 149 | // until somebody comes up with the requirement to manually seed. 150 | cycle := makeCycle(group, time.Now().UTC().UnixNano()) 151 | firstNum = first(&cycle) 152 | currNum = firstNum 153 | // Don't always start with 1 154 | next(&cycle, &currNum) 155 | // Remember to fix firstNum for break condition later 156 | firstNum = currNum 157 | 158 | // Generation happens here 159 | for { 160 | nextHost, _ := cidr.Host(ipnet, int(currNum)) 161 | if nextHost != nil && !blacklist.IsIPBlacklisted(nextHost.String()) { 162 | returnChan <- nextHost 163 | } 164 | next(&cycle, &currNum) 165 | if currNum == 0 { // we had 0, so stop now 166 | break 167 | } 168 | if currNum == firstNum { // we did a full run through the cycle, but 0 is still missing 169 | currNum = 0 170 | } 171 | } 172 | log.WithFields(log.Fields{ 173 | "module": "targetgeneration.targetGenerator", 174 | "src": "GenerateIPStreamFromCIDR", 175 | }).Debug("Closing returnChan") 176 | close(returnChan) 177 | }(returnChan, ipnet, blacklist) 178 | 179 | return returnChan 180 | } 181 | 182 | // GeneratePortStream takes a list of ports and returns them in arbitrary order over a channel 183 | func GeneratePortStream(ports []uint16) <-chan uint16 { 184 | // size is arbitrary, 50 should be enough avoid that the channel empties during operation 185 | returnChan := make(chan uint16, 50) 186 | 187 | // Shuffle slice 188 | r := rand.New(rand.NewSource(time.Now().UnixNano())) 189 | r.Shuffle(len(ports), func(i, j int) { 190 | ports[i], ports[j] = ports[j], ports[i] 191 | }) 192 | 193 | // ports are sent back over the channel asynchronously 194 | go func(returnChan chan<- uint16, ports []uint16) { 195 | for _, port := range ports { 196 | returnChan <- port 197 | } 198 | close(returnChan) 199 | }(returnChan, ports) 200 | 201 | return returnChan 202 | } 203 | 204 | // ParsePorts takes the a list of target strings supplied by the user 205 | // and tries to parse them into a slice of uint32s 206 | // Errors are sent back over errorChan 207 | func ParsePorts(rawPorts []string, proto string) []uint16 { 208 | ports := make([]uint16, 0) 209 | portRangeRegexpr := regexp.MustCompile(utils.RegexPortRange) 210 | topPortsRegexpr := regexp.MustCompile(utils.RegexTopPorts) 211 | thousandNumberRegexp := regexp.MustCompile(utils.RegexThousandNumber) 212 | for _, candidate := range rawPorts { 213 | // A single port 214 | parsed, err := strconv.ParseUint(candidate, 10, 32) 215 | if err == nil && parsed < math.MaxUint16 { 216 | ports = append(ports, uint16(parsed)) 217 | continue 218 | } else if portRangeRegexpr.MatchString(candidate) { // A port range. Split, sort, flatten. 219 | splitted := strings.Split(candidate, "-") 220 | first, err := strconv.ParseUint(splitted[0], 10, 32) 221 | if err == nil && first <= math.MaxUint16 { 222 | second, err := strconv.ParseUint(splitted[1], 10, 32) 223 | if err == nil && second <= math.MaxUint16 { 224 | var start, end uint16 225 | if first <= second { 226 | start = uint16(first) 227 | end = uint16(second) 228 | } else { 229 | start = uint16(second) 230 | end = uint16(first) 231 | } 232 | for i := start; i <= end; i++ { 233 | ports = append(ports, i) 234 | if i == math.MaxUint16 { // Otherwise there is a nasty overflow causing a memory leak until you get killed by OOM 235 | break 236 | } 237 | } 238 | continue 239 | } 240 | } 241 | } else if topPortsRegexpr.MatchString(candidate) { // A toplist 242 | topN, err := strconv.ParseUint(thousandNumberRegexp.FindString(candidate), 10, 32) 243 | utils.CheckError(err, true) 244 | if proto == "udp" { 245 | ports = append(ports, GetNmapTopUDPPorts(uint(topN))...) 246 | } else { 247 | ports = append(ports, GetNmapTopTCPPorts(uint(topN))...) 248 | } 249 | continue 250 | } else if candidate == "all" { 251 | for i := uint16(1); i <= math.MaxUint16; i++ { 252 | ports = append(ports, i) 253 | if i == math.MaxUint16 { // Otherwise there is a nasty overflow causing a memory leak until you get killed by OOM 254 | break 255 | } 256 | } 257 | } else { 258 | log.Warningf("Can't parse port list %s, skipping.", candidate) 259 | } 260 | } 261 | 262 | return uniq(ports) 263 | } 264 | 265 | // chunkPorts creates a slice of AnyTargets that contain all provided hosts with the specified port chunkings 266 | func chunkPorts(hosts []string, tcpports []uint16, udpports []uint16, maxTCPPorts uint, maxUDPPorts uint) []AnyTargets { 267 | targets := make([]AnyTargets, 0) 268 | 269 | // Get fresh port streams 270 | tcpPortStream := GeneratePortStream(tcpports) 271 | udpPortStream := GeneratePortStream(udpports) 272 | 273 | // As long as both port streams are not consumed, create new AnyTargets containing the 274 | // host list and the targets. 275 | for tcpPortStream != nil || udpPortStream != nil { 276 | tcpPorts := make([]uint32, 0) 277 | udpPorts := make([]uint32, 0) 278 | for numTCPPort := uint(0); numTCPPort < maxTCPPorts; numTCPPort++ { 279 | if tcpPortStream == nil { 280 | break 281 | } 282 | tcpPort, ok := <-tcpPortStream 283 | if !ok { 284 | tcpPortStream = nil 285 | break 286 | } 287 | tcpPorts = append(tcpPorts, uint32(tcpPort)) 288 | } 289 | for numUDPPort := uint(0); numUDPPort < maxUDPPorts; numUDPPort++ { 290 | if udpPortStream == nil { 291 | break 292 | } 293 | udpPort, ok := <-udpPortStream 294 | if !ok { 295 | udpPortStream = nil 296 | break 297 | } 298 | udpPorts = append(udpPorts, uint32(udpPort)) 299 | } 300 | if len(tcpPorts) == 0 && len(udpPorts) == 0 { 301 | continue 302 | } 303 | newTarget := AnyTargets{ 304 | RemoteHosts: hosts, 305 | TCPPorts: tcpPorts, 306 | UDPPorts: udpPorts, 307 | } 308 | targets = append(targets, newTarget) 309 | } 310 | return targets 311 | } 312 | -------------------------------------------------------------------------------- /core/server.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "crypto/rand" 5 | "crypto/tls" 6 | "crypto/x509" 7 | "encoding/json" 8 | "fmt" 9 | "io/ioutil" 10 | "os" 11 | "os/signal" 12 | "strconv" 13 | "time" 14 | 15 | "github.com/nray-scanner/nray/events" 16 | 17 | "github.com/golang/protobuf/proto" 18 | log "github.com/sirupsen/logrus" 19 | 20 | targetgeneration "github.com/nray-scanner/nray/core/targetGeneration" 21 | nraySchema "github.com/nray-scanner/nray/schemas" 22 | "github.com/nray-scanner/nray/utils" 23 | "github.com/spf13/viper" 24 | 25 | // TCP transport for nanomsg 26 | _ "nanomsg.org/go/mangos/v2/transport/tcp" 27 | _ "nanomsg.org/go/mangos/v2/transport/tlstcp" 28 | 29 | mangos "nanomsg.org/go/mangos/v2" 30 | ) 31 | 32 | // CurrentConfig is the initialized struct containing 33 | // the configuration 34 | var CurrentConfig GlobalConfig 35 | var externalConfig *viper.Viper 36 | 37 | // InitGlobalServerConfig initializes the GlobalConfig 38 | // from the values provided by viper 39 | func InitGlobalServerConfig(config *viper.Viper) error { 40 | externalConfig = config 41 | if externalConfig.GetBool("debug") { 42 | log.SetLevel(log.DebugLevel) 43 | log.SetFormatter(&utils.Formatter{ 44 | HideKeys: true, 45 | }) 46 | } 47 | 48 | // Init ports 49 | portsToListen := externalConfig.GetStringSlice("listen") 50 | if len(portsToListen) == 0 || portsToListen == nil { 51 | return fmt.Errorf("No port to bind to was given") 52 | } 53 | portList := make([]uint32, 0) 54 | for _, port := range portsToListen { 55 | val, err := strconv.ParseUint(port, 10, 32) 56 | utils.CheckError(err, false) 57 | portList = append(portList, uint32(val)) 58 | } 59 | 60 | // Init host configuration 61 | host := externalConfig.GetString("host") 62 | CurrentConfig = GlobalConfig{ListenPorts: portList, ListenHost: host} 63 | 64 | // Init TLS 65 | if externalConfig.GetBool("TLS.enabled") { 66 | cert, err := tls.LoadX509KeyPair(externalConfig.GetString("TLS.cert"), externalConfig.GetString("TLS.key")) 67 | utils.CheckError(err, true) 68 | CurrentConfig.TLSConfig = &tls.Config{Certificates: []tls.Certificate{cert}} 69 | CurrentConfig.TLSConfig.Rand = rand.Reader 70 | CurrentConfig.TLSConfig.BuildNameToCertificate() 71 | if externalConfig.GetBool("TLS.forceClientAuth") { 72 | caCert, err := ioutil.ReadFile(externalConfig.GetString("TLS.CA")) 73 | utils.CheckError(err, true) 74 | caCertPool := x509.NewCertPool() 75 | caCertPool.AppendCertsFromPEM(caCert) 76 | CurrentConfig.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert 77 | CurrentConfig.TLSConfig.ClientCAs = caCertPool 78 | } 79 | } 80 | 81 | // Init pool configuration 82 | CurrentConfig.Pools = make([]*Pool, externalConfig.GetInt("pools")) 83 | 84 | // Init event handlers 85 | CurrentConfig.EventHandlers = make([]events.EventHandler, 0) 86 | for _, eventHandlerName := range events.RegisteredHandlers { 87 | configPath := fmt.Sprintf("events.%s", eventHandlerName) 88 | if externalConfig.IsSet(configPath) { 89 | handler := events.GetEventHandler(eventHandlerName) 90 | err := handler.Configure(externalConfig.Sub(configPath)) 91 | utils.CheckError(err, true) 92 | CurrentConfig.EventHandlers = append(CurrentConfig.EventHandlers, handler) 93 | } 94 | } 95 | return nil 96 | } 97 | 98 | // Start starts the core 99 | func Start() { 100 | server(CurrentConfig) 101 | } 102 | 103 | // Here does (most of) the core magic happen. It's long, but don't get afraid 104 | func server(currentConfig GlobalConfig) { 105 | // Create node pools 106 | initPools() 107 | 108 | // Initialise Message Queue and bind to TCP ports 109 | sock := createRepSock(currentConfig.ListenHost, currentConfig.ListenPorts, currentConfig.TLSConfig) 110 | defer sock.Close() 111 | 112 | // Handle Ctrl+C events 113 | startSignalInterruptHandler() 114 | 115 | // Main Loop. Receives data from nodes, processes it and sends replies 116 | mainloop: 117 | for { 118 | msg, err := sock.Recv() 119 | utils.CheckError(err, false) 120 | skeleton := &nraySchema.NrayNodeMessage{} 121 | err = proto.Unmarshal(msg, skeleton) 122 | utils.CheckError(err, false) 123 | 124 | // TODO: Move this into own function 125 | // Here are all incoming messages processed 126 | switch skeleton.MessageContent.(type) { 127 | case *nraySchema.NrayNodeMessage_NodeRegister: 128 | registeredNode := handleNodeRegister(skeleton.GetNodeRegister(), externalConfig.GetBool("considerClientPoolPreference"), externalConfig.GetBool("allowMultipleNodesPerHost")) 129 | for _, handler := range currentConfig.EventHandlers { 130 | handler.ProcessEvents([]*nraySchema.Event{skeleton.GetNodeRegister().Envinfo}) 131 | } 132 | if externalConfig.IsSet("scannerconfig") { 133 | registeredNode.Scannerconfig, err = json.Marshal(externalConfig.Sub("scannerconfig").AllSettings()) 134 | } else { 135 | registeredNode.Scannerconfig = nil 136 | } 137 | utils.CheckError(err, false) 138 | serverMessage := &nraySchema.NrayServerMessage{ 139 | MessageContent: &nraySchema.NrayServerMessage_RegisteredNode{ 140 | RegisteredNode: registeredNode, 141 | }, 142 | } 143 | SendMessage(sock, serverMessage) 144 | case *nraySchema.NrayNodeMessage_Heartbeat: 145 | if alreadyRegistered := checkNodeIDIsRegistered(skeleton.GetHeartbeat().NodeID); !alreadyRegistered { 146 | SendMessage(sock, createUnregisteredMessage(skeleton.GetHeartbeat().NodeID)) 147 | } else { 148 | heartBeatAck := handleHeartbeat(skeleton.GetHeartbeat()) 149 | serverMessage := &nraySchema.NrayServerMessage{ 150 | MessageContent: &nraySchema.NrayServerMessage_HeartbeatAck{ 151 | HeartbeatAck: heartBeatAck, 152 | }, 153 | } 154 | SendMessage(sock, serverMessage) 155 | } 156 | case *nraySchema.NrayNodeMessage_MoreWork: 157 | if alreadyRegistered := checkNodeIDIsRegistered(skeleton.GetMoreWork().NodeID); !alreadyRegistered { 158 | SendMessage(sock, createUnregisteredMessage(skeleton.GetMoreWork().NodeID)) 159 | } 160 | nodeID := handleMoreWorkRequest(skeleton.GetMoreWork()) 161 | var marshalled []byte 162 | for _, pool := range currentConfig.Pools { 163 | node, exists := pool.getNodeFromID(nodeID) 164 | if exists { 165 | log.WithFields(log.Fields{ 166 | "module": "core.server", 167 | "src": "server", 168 | }).Debugf("Request for more work by node %s", node.Name) 169 | 170 | newJob := pool.GetJobForNode(nodeID) 171 | if newJob == nil { 172 | // Currently no jobs available :( 173 | marshalled = createMoreWorkMsg(targetgeneration.AnyTargets{}, 0) 174 | } else { 175 | marshalled = createMoreWorkMsg(newJob.workItems, newJob.id) 176 | } 177 | } 178 | } 179 | err = sock.Send(marshalled) 180 | utils.CheckError(err, false) 181 | case *nraySchema.NrayNodeMessage_WorkDone: 182 | if alreadyRegistered := checkNodeIDIsRegistered(skeleton.GetWorkDone().NodeID); !alreadyRegistered { 183 | SendMessage(sock, createUnregisteredMessage(skeleton.GetWorkDone().NodeID)) 184 | } else { 185 | currentConfig.LogEvents(skeleton.GetWorkDone().Events) 186 | nodeID := skeleton.GetWorkDone().NodeID 187 | poolOfNode := currentConfig.getPoolFromNodeID(nodeID) 188 | err := poolOfNode.removeJobFromJobArea(nodeID, skeleton.GetWorkDone().Batchid) 189 | utils.CheckError(err, false) 190 | serverMessage := &nraySchema.NrayServerMessage{ 191 | MessageContent: &nraySchema.NrayServerMessage_WorkDoneAck{ 192 | WorkDoneAck: &nraySchema.WorkDoneAck{}, 193 | }, 194 | } 195 | SendMessage(sock, serverMessage) 196 | } 197 | case *nraySchema.NrayNodeMessage_Goodbye: 198 | if alreadyRegistered := checkNodeIDIsRegistered(skeleton.GetGoodbye().NodeID); !alreadyRegistered { 199 | SendMessage(sock, createUnregisteredMessage(skeleton.GetGoodbye().NodeID)) 200 | } else { 201 | nodeID := skeleton.GetGoodbye().NodeID 202 | for _, pool := range currentConfig.Pools { 203 | _, exists := pool.getNodeFromID(nodeID) 204 | var success bool 205 | if exists { 206 | success = pool.removeNodeFromPool(nodeID, false) 207 | serverMessage := &nraySchema.NrayServerMessage{ 208 | MessageContent: &nraySchema.NrayServerMessage_GoodbyeAck{ 209 | GoodbyeAck: &nraySchema.GoodbyeAck{ 210 | Ok: success, 211 | }, 212 | }, 213 | } 214 | SendMessage(sock, serverMessage) 215 | break 216 | } 217 | } 218 | } 219 | case nil: 220 | log.WithFields(log.Fields{ 221 | "module": "core.server", 222 | "src": "server", 223 | }).Warning("Message sent by node is empty") 224 | default: 225 | log.WithFields(log.Fields{ 226 | "module": "core.server", 227 | "src": "server", 228 | }).Error("Cannot decode message sent by node") 229 | } 230 | 231 | // If the Job queue is empty and job generation is done, stop all nodes 232 | poolsStillRunning := false 233 | for _, pool := range currentConfig.Pools { 234 | if !pool.IsJobGenerationDone() || pool.GetNumberOfAllJobs() > 0 { 235 | poolsStillRunning = true 236 | } else { 237 | pool.StopAllNodes() 238 | } 239 | } 240 | if poolsStillRunning { 241 | continue mainloop 242 | } 243 | // "Fix" rare situations where server is stopped before node received the message to shut down 244 | time.Sleep(500 * time.Millisecond) 245 | // After all nodes are stopped ... 246 | for _, pool := range currentConfig.Pools { 247 | if !pool.NodesEmpty() { 248 | continue mainloop 249 | } 250 | } 251 | // "Fix" rare situations where server is stopped before node received the message to shut down 252 | time.Sleep(500 * time.Millisecond) 253 | 254 | log.WithFields(log.Fields{ 255 | "module": "core.server", 256 | "src": "server", 257 | }).Info("Closing event handlers") 258 | // ... and event handlers are closed ... 259 | currentConfig.CloseEventHandlers() 260 | // ... finally stop the server by ending its main loop 261 | break mainloop 262 | } 263 | } 264 | 265 | // SendMessage takes a socket and a servermessage that is going to be sent on the socket 266 | func SendMessage(sock mangos.Socket, message *nraySchema.NrayServerMessage) { 267 | marshalled, err := proto.Marshal(message) 268 | utils.CheckError(err, false) 269 | err = sock.Send(marshalled) 270 | utils.CheckError(err, false) 271 | } 272 | 273 | func initPools() { 274 | statusInterval := externalConfig.GetDuration("statusPrintInterval") 275 | for i := 0; i < externalConfig.GetInt("pools"); i++ { 276 | CurrentConfig.Pools[i] = initPool(statusInterval) 277 | } 278 | 279 | // Create goroutines that clean up pools regularly 280 | nodeExpiryTime := time.Duration(externalConfig.GetInt("internal.nodeExpiryTime")) * time.Second 281 | nodeExpiryCheckInterval := time.Duration(externalConfig.GetInt("internal.nodeExpiryCheckInterval")) * time.Second 282 | for _, pool := range CurrentConfig.Pools { 283 | go removeExpiredNodes(pool, nodeExpiryCheckInterval, nodeExpiryTime) 284 | } 285 | 286 | for _, pool := range CurrentConfig.Pools { 287 | // Each pool has a target generator 288 | targetGenerator := targetgeneration.TargetGenerator{} 289 | targetGenerator.Init(externalConfig.Sub("targetgenerator")) 290 | pool.TargetChan = targetGenerator.GetTargetChan() 291 | pool.SetTargetCount(targetGenerator.TargetCount()) 292 | 293 | // This goroutine creates jobs for each pool 294 | go func(p *Pool) { 295 | log.WithFields(log.Fields{ 296 | "module": "core.server", 297 | "src": "initPools", 298 | }).Debug("Started job creation goroutine") 299 | for { 300 | waitingJobs := p.GetNumberOfWaitingJobs() 301 | // If there are less than 50 jobs, create new ones. I doubt somebody is ever performing a scan at a scale where 50 is too few 302 | if waitingJobs < 50 { 303 | nexTarget, ok := <-p.TargetChan 304 | if ok { 305 | nextJob := createJob(nexTarget) 306 | p.AddJobToJobArea(&nextJob) 307 | } else { 308 | p.SetJobGenerationDone() 309 | return 310 | } 311 | } else { 312 | time.Sleep(1 * time.Second) 313 | } 314 | } 315 | }(pool) 316 | } 317 | } 318 | 319 | func checkNodeIDIsRegistered(nodeID string) bool { 320 | return CurrentConfig.getNodeFromID(nodeID) != nil 321 | } 322 | 323 | func startSignalInterruptHandler() { 324 | // Make a channel to receive interrupt signals ("Ctrl+C") 325 | interruptSignals := make(chan os.Signal, 1) 326 | signal.Notify(interruptSignals, os.Interrupt) 327 | // A goroutine is constantly watching the channel for any incoming signals 328 | go func(c chan os.Signal) { 329 | // count the signals. 330 | // 0: all good 331 | // 1: send stop to nodes 332 | // 2: warn user that shutting down nodes may be a good idea and to be patient. really 333 | // 3: user REALLY wants to exit the server, let's do him a favor... 334 | ctr := 0 335 | for sig := range c { 336 | ctr++ 337 | log.WithFields(log.Fields{ 338 | "module": "core.server", 339 | "src": "startSignalInterruptHandler", 340 | }).Warningf("Caught signal %s", sig) 341 | if ctr == 1 { 342 | go func() { 343 | for _, pool := range CurrentConfig.Pools { 344 | pool.StopAllNodes() 345 | waitingTillAllNodesAreGone: 346 | for { 347 | log.WithFields(log.Fields{ 348 | "module": "core.server", 349 | "src": "startSignalInterruptHandler", 350 | }).Warning("Stopping all nodes, this may take a few seconds. Please be patient.") 351 | for _, pool := range CurrentConfig.Pools { 352 | if !pool.NodesEmpty() { 353 | // Don't go wild on printing 354 | time.Sleep(1 * time.Second) 355 | continue waitingTillAllNodesAreGone 356 | } 357 | } 358 | log.WithFields(log.Fields{ 359 | "module": "core.server", 360 | "src": "startSignalInterruptHandler", 361 | }).Info("All nodes stopped. Now stopping event handlers") 362 | CurrentConfig.CloseEventHandlers() 363 | log.WithFields(log.Fields{ 364 | "module": "core.server", 365 | "src": "startSignalInterruptHandler", 366 | }).Info("Event handlers stopped. Exiting now.") 367 | os.Exit(1) 368 | } 369 | } 370 | }() 371 | } else if ctr == 2 { 372 | log.WithFields(log.Fields{ 373 | "module": "core.server", 374 | "src": "startSignalInterruptHandler", 375 | }).Warning("So you really want to exit, I got it... Do you really want to leave zombie scanners around?!") 376 | } else if ctr == 3 { 377 | log.WithFields(log.Fields{ 378 | "module": "core.server", 379 | "src": "startSignalInterruptHandler", 380 | }).Warning("You're the boss...") 381 | os.Exit(1) 382 | } else { 383 | log.WithFields(log.Fields{ 384 | "module": "core.server", 385 | "src": "startSignalInterruptHandler", 386 | }).Error("How did you manage to get here?!") 387 | os.Exit(1) 388 | } 389 | } 390 | }(interruptSignals) 391 | } 392 | -------------------------------------------------------------------------------- /schemas/events.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // source: schemas/events.proto 3 | 4 | package nraySchema 5 | 6 | import ( 7 | fmt "fmt" 8 | proto "github.com/golang/protobuf/proto" 9 | _struct "github.com/golang/protobuf/ptypes/struct" 10 | timestamp "github.com/golang/protobuf/ptypes/timestamp" 11 | math "math" 12 | ) 13 | 14 | // Reference imports to suppress errors if they are not otherwise used. 15 | var _ = proto.Marshal 16 | var _ = fmt.Errorf 17 | var _ = math.Inf 18 | 19 | // This is a compile-time assertion to ensure that this generated file 20 | // is compatible with the proto package it is being compiled against. 21 | // A compilation error at this line likely means your copy of the 22 | // proto package needs to be updated. 23 | const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package 24 | 25 | // Event is a container for everything that happens 26 | //at a node and should later on be handled by EventHandlers 27 | type Event struct { 28 | NodeID string `protobuf:"bytes,1,opt,name=nodeID,proto3" json:"nodeID,omitempty"` 29 | NodeName string `protobuf:"bytes,2,opt,name=nodeName,proto3" json:"nodeName,omitempty"` 30 | Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` 31 | Scannername string `protobuf:"bytes,6,opt,name=scannername,proto3" json:"scannername,omitempty"` 32 | // Types that are valid to be assigned to EventData: 33 | // *Event_Environment 34 | // *Event_Result 35 | EventData isEvent_EventData `protobuf_oneof:"EventData"` 36 | XXX_NoUnkeyedLiteral struct{} `json:"-"` 37 | XXX_unrecognized []byte `json:"-"` 38 | XXX_sizecache int32 `json:"-"` 39 | } 40 | 41 | func (m *Event) Reset() { *m = Event{} } 42 | func (m *Event) String() string { return proto.CompactTextString(m) } 43 | func (*Event) ProtoMessage() {} 44 | func (*Event) Descriptor() ([]byte, []int) { 45 | return fileDescriptor_3ab30010df94cd8f, []int{0} 46 | } 47 | 48 | func (m *Event) XXX_Unmarshal(b []byte) error { 49 | return xxx_messageInfo_Event.Unmarshal(m, b) 50 | } 51 | func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 52 | return xxx_messageInfo_Event.Marshal(b, m, deterministic) 53 | } 54 | func (m *Event) XXX_Merge(src proto.Message) { 55 | xxx_messageInfo_Event.Merge(m, src) 56 | } 57 | func (m *Event) XXX_Size() int { 58 | return xxx_messageInfo_Event.Size(m) 59 | } 60 | func (m *Event) XXX_DiscardUnknown() { 61 | xxx_messageInfo_Event.DiscardUnknown(m) 62 | } 63 | 64 | var xxx_messageInfo_Event proto.InternalMessageInfo 65 | 66 | func (m *Event) GetNodeID() string { 67 | if m != nil { 68 | return m.NodeID 69 | } 70 | return "" 71 | } 72 | 73 | func (m *Event) GetNodeName() string { 74 | if m != nil { 75 | return m.NodeName 76 | } 77 | return "" 78 | } 79 | 80 | func (m *Event) GetTimestamp() *timestamp.Timestamp { 81 | if m != nil { 82 | return m.Timestamp 83 | } 84 | return nil 85 | } 86 | 87 | func (m *Event) GetScannername() string { 88 | if m != nil { 89 | return m.Scannername 90 | } 91 | return "" 92 | } 93 | 94 | type isEvent_EventData interface { 95 | isEvent_EventData() 96 | } 97 | 98 | type Event_Environment struct { 99 | Environment *EnvironmentInformation `protobuf:"bytes,7,opt,name=environment,proto3,oneof"` 100 | } 101 | 102 | type Event_Result struct { 103 | Result *ScanResult `protobuf:"bytes,8,opt,name=result,proto3,oneof"` 104 | } 105 | 106 | func (*Event_Environment) isEvent_EventData() {} 107 | 108 | func (*Event_Result) isEvent_EventData() {} 109 | 110 | func (m *Event) GetEventData() isEvent_EventData { 111 | if m != nil { 112 | return m.EventData 113 | } 114 | return nil 115 | } 116 | 117 | func (m *Event) GetEnvironment() *EnvironmentInformation { 118 | if x, ok := m.GetEventData().(*Event_Environment); ok { 119 | return x.Environment 120 | } 121 | return nil 122 | } 123 | 124 | func (m *Event) GetResult() *ScanResult { 125 | if x, ok := m.GetEventData().(*Event_Result); ok { 126 | return x.Result 127 | } 128 | return nil 129 | } 130 | 131 | // XXX_OneofWrappers is for the internal use of the proto package. 132 | func (*Event) XXX_OneofWrappers() []interface{} { 133 | return []interface{}{ 134 | (*Event_Environment)(nil), 135 | (*Event_Result)(nil), 136 | } 137 | } 138 | 139 | type ScanResult struct { 140 | Target string `protobuf:"bytes,4,opt,name=target,proto3" json:"target,omitempty"` 141 | Port uint32 `protobuf:"varint,5,opt,name=port,proto3" json:"port,omitempty"` 142 | // Types that are valid to be assigned to Result: 143 | // *ScanResult_Portscan 144 | // *ScanResult_Zgrabscan 145 | Result isScanResult_Result `protobuf_oneof:"result"` 146 | XXX_NoUnkeyedLiteral struct{} `json:"-"` 147 | XXX_unrecognized []byte `json:"-"` 148 | XXX_sizecache int32 `json:"-"` 149 | } 150 | 151 | func (m *ScanResult) Reset() { *m = ScanResult{} } 152 | func (m *ScanResult) String() string { return proto.CompactTextString(m) } 153 | func (*ScanResult) ProtoMessage() {} 154 | func (*ScanResult) Descriptor() ([]byte, []int) { 155 | return fileDescriptor_3ab30010df94cd8f, []int{1} 156 | } 157 | 158 | func (m *ScanResult) XXX_Unmarshal(b []byte) error { 159 | return xxx_messageInfo_ScanResult.Unmarshal(m, b) 160 | } 161 | func (m *ScanResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 162 | return xxx_messageInfo_ScanResult.Marshal(b, m, deterministic) 163 | } 164 | func (m *ScanResult) XXX_Merge(src proto.Message) { 165 | xxx_messageInfo_ScanResult.Merge(m, src) 166 | } 167 | func (m *ScanResult) XXX_Size() int { 168 | return xxx_messageInfo_ScanResult.Size(m) 169 | } 170 | func (m *ScanResult) XXX_DiscardUnknown() { 171 | xxx_messageInfo_ScanResult.DiscardUnknown(m) 172 | } 173 | 174 | var xxx_messageInfo_ScanResult proto.InternalMessageInfo 175 | 176 | func (m *ScanResult) GetTarget() string { 177 | if m != nil { 178 | return m.Target 179 | } 180 | return "" 181 | } 182 | 183 | func (m *ScanResult) GetPort() uint32 { 184 | if m != nil { 185 | return m.Port 186 | } 187 | return 0 188 | } 189 | 190 | type isScanResult_Result interface { 191 | isScanResult_Result() 192 | } 193 | 194 | type ScanResult_Portscan struct { 195 | Portscan *PortScanResult `protobuf:"bytes,8,opt,name=portscan,proto3,oneof"` 196 | } 197 | 198 | type ScanResult_Zgrabscan struct { 199 | Zgrabscan *ZGrab2ScanResult `protobuf:"bytes,9,opt,name=zgrabscan,proto3,oneof"` 200 | } 201 | 202 | func (*ScanResult_Portscan) isScanResult_Result() {} 203 | 204 | func (*ScanResult_Zgrabscan) isScanResult_Result() {} 205 | 206 | func (m *ScanResult) GetResult() isScanResult_Result { 207 | if m != nil { 208 | return m.Result 209 | } 210 | return nil 211 | } 212 | 213 | func (m *ScanResult) GetPortscan() *PortScanResult { 214 | if x, ok := m.GetResult().(*ScanResult_Portscan); ok { 215 | return x.Portscan 216 | } 217 | return nil 218 | } 219 | 220 | func (m *ScanResult) GetZgrabscan() *ZGrab2ScanResult { 221 | if x, ok := m.GetResult().(*ScanResult_Zgrabscan); ok { 222 | return x.Zgrabscan 223 | } 224 | return nil 225 | } 226 | 227 | // XXX_OneofWrappers is for the internal use of the proto package. 228 | func (*ScanResult) XXX_OneofWrappers() []interface{} { 229 | return []interface{}{ 230 | (*ScanResult_Portscan)(nil), 231 | (*ScanResult_Zgrabscan)(nil), 232 | } 233 | } 234 | 235 | // EnvironmentInformation tells the server 236 | //under which circumstances nodes are running 237 | type EnvironmentInformation struct { 238 | Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` 239 | Os string `protobuf:"bytes,2,opt,name=os,proto3" json:"os,omitempty"` 240 | Pid string `protobuf:"bytes,3,opt,name=pid,proto3" json:"pid,omitempty"` 241 | Processname string `protobuf:"bytes,4,opt,name=processname,proto3" json:"processname,omitempty"` 242 | Username string `protobuf:"bytes,5,opt,name=username,proto3" json:"username,omitempty"` 243 | Cpumodelname string `protobuf:"bytes,6,opt,name=cpumodelname,proto3" json:"cpumodelname,omitempty"` 244 | XXX_NoUnkeyedLiteral struct{} `json:"-"` 245 | XXX_unrecognized []byte `json:"-"` 246 | XXX_sizecache int32 `json:"-"` 247 | } 248 | 249 | func (m *EnvironmentInformation) Reset() { *m = EnvironmentInformation{} } 250 | func (m *EnvironmentInformation) String() string { return proto.CompactTextString(m) } 251 | func (*EnvironmentInformation) ProtoMessage() {} 252 | func (*EnvironmentInformation) Descriptor() ([]byte, []int) { 253 | return fileDescriptor_3ab30010df94cd8f, []int{2} 254 | } 255 | 256 | func (m *EnvironmentInformation) XXX_Unmarshal(b []byte) error { 257 | return xxx_messageInfo_EnvironmentInformation.Unmarshal(m, b) 258 | } 259 | func (m *EnvironmentInformation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 260 | return xxx_messageInfo_EnvironmentInformation.Marshal(b, m, deterministic) 261 | } 262 | func (m *EnvironmentInformation) XXX_Merge(src proto.Message) { 263 | xxx_messageInfo_EnvironmentInformation.Merge(m, src) 264 | } 265 | func (m *EnvironmentInformation) XXX_Size() int { 266 | return xxx_messageInfo_EnvironmentInformation.Size(m) 267 | } 268 | func (m *EnvironmentInformation) XXX_DiscardUnknown() { 269 | xxx_messageInfo_EnvironmentInformation.DiscardUnknown(m) 270 | } 271 | 272 | var xxx_messageInfo_EnvironmentInformation proto.InternalMessageInfo 273 | 274 | func (m *EnvironmentInformation) GetHostname() string { 275 | if m != nil { 276 | return m.Hostname 277 | } 278 | return "" 279 | } 280 | 281 | func (m *EnvironmentInformation) GetOs() string { 282 | if m != nil { 283 | return m.Os 284 | } 285 | return "" 286 | } 287 | 288 | func (m *EnvironmentInformation) GetPid() string { 289 | if m != nil { 290 | return m.Pid 291 | } 292 | return "" 293 | } 294 | 295 | func (m *EnvironmentInformation) GetProcessname() string { 296 | if m != nil { 297 | return m.Processname 298 | } 299 | return "" 300 | } 301 | 302 | func (m *EnvironmentInformation) GetUsername() string { 303 | if m != nil { 304 | return m.Username 305 | } 306 | return "" 307 | } 308 | 309 | func (m *EnvironmentInformation) GetCpumodelname() string { 310 | if m != nil { 311 | return m.Cpumodelname 312 | } 313 | return "" 314 | } 315 | 316 | // TCPScanResult contains the outcome of 317 | //a TCP scan against a single port on a single host 318 | type PortScanResult struct { 319 | Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` 320 | Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` 321 | Open bool `protobuf:"varint,3,opt,name=open,proto3" json:"open,omitempty"` 322 | Scantype string `protobuf:"bytes,4,opt,name=scantype,proto3" json:"scantype,omitempty"` 323 | Timeout uint32 `protobuf:"varint,5,opt,name=timeout,proto3" json:"timeout,omitempty"` 324 | XXX_NoUnkeyedLiteral struct{} `json:"-"` 325 | XXX_unrecognized []byte `json:"-"` 326 | XXX_sizecache int32 `json:"-"` 327 | } 328 | 329 | func (m *PortScanResult) Reset() { *m = PortScanResult{} } 330 | func (m *PortScanResult) String() string { return proto.CompactTextString(m) } 331 | func (*PortScanResult) ProtoMessage() {} 332 | func (*PortScanResult) Descriptor() ([]byte, []int) { 333 | return fileDescriptor_3ab30010df94cd8f, []int{3} 334 | } 335 | 336 | func (m *PortScanResult) XXX_Unmarshal(b []byte) error { 337 | return xxx_messageInfo_PortScanResult.Unmarshal(m, b) 338 | } 339 | func (m *PortScanResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 340 | return xxx_messageInfo_PortScanResult.Marshal(b, m, deterministic) 341 | } 342 | func (m *PortScanResult) XXX_Merge(src proto.Message) { 343 | xxx_messageInfo_PortScanResult.Merge(m, src) 344 | } 345 | func (m *PortScanResult) XXX_Size() int { 346 | return xxx_messageInfo_PortScanResult.Size(m) 347 | } 348 | func (m *PortScanResult) XXX_DiscardUnknown() { 349 | xxx_messageInfo_PortScanResult.DiscardUnknown(m) 350 | } 351 | 352 | var xxx_messageInfo_PortScanResult proto.InternalMessageInfo 353 | 354 | func (m *PortScanResult) GetTarget() string { 355 | if m != nil { 356 | return m.Target 357 | } 358 | return "" 359 | } 360 | 361 | func (m *PortScanResult) GetPort() uint32 { 362 | if m != nil { 363 | return m.Port 364 | } 365 | return 0 366 | } 367 | 368 | func (m *PortScanResult) GetOpen() bool { 369 | if m != nil { 370 | return m.Open 371 | } 372 | return false 373 | } 374 | 375 | func (m *PortScanResult) GetScantype() string { 376 | if m != nil { 377 | return m.Scantype 378 | } 379 | return "" 380 | } 381 | 382 | func (m *PortScanResult) GetTimeout() uint32 { 383 | if m != nil { 384 | return m.Timeout 385 | } 386 | return 0 387 | } 388 | 389 | type ZGrab2ScanResult struct { 390 | JsonResult *_struct.Value `protobuf:"bytes,1,opt,name=jsonResult,proto3" json:"jsonResult,omitempty"` 391 | XXX_NoUnkeyedLiteral struct{} `json:"-"` 392 | XXX_unrecognized []byte `json:"-"` 393 | XXX_sizecache int32 `json:"-"` 394 | } 395 | 396 | func (m *ZGrab2ScanResult) Reset() { *m = ZGrab2ScanResult{} } 397 | func (m *ZGrab2ScanResult) String() string { return proto.CompactTextString(m) } 398 | func (*ZGrab2ScanResult) ProtoMessage() {} 399 | func (*ZGrab2ScanResult) Descriptor() ([]byte, []int) { 400 | return fileDescriptor_3ab30010df94cd8f, []int{4} 401 | } 402 | 403 | func (m *ZGrab2ScanResult) XXX_Unmarshal(b []byte) error { 404 | return xxx_messageInfo_ZGrab2ScanResult.Unmarshal(m, b) 405 | } 406 | func (m *ZGrab2ScanResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 407 | return xxx_messageInfo_ZGrab2ScanResult.Marshal(b, m, deterministic) 408 | } 409 | func (m *ZGrab2ScanResult) XXX_Merge(src proto.Message) { 410 | xxx_messageInfo_ZGrab2ScanResult.Merge(m, src) 411 | } 412 | func (m *ZGrab2ScanResult) XXX_Size() int { 413 | return xxx_messageInfo_ZGrab2ScanResult.Size(m) 414 | } 415 | func (m *ZGrab2ScanResult) XXX_DiscardUnknown() { 416 | xxx_messageInfo_ZGrab2ScanResult.DiscardUnknown(m) 417 | } 418 | 419 | var xxx_messageInfo_ZGrab2ScanResult proto.InternalMessageInfo 420 | 421 | func (m *ZGrab2ScanResult) GetJsonResult() *_struct.Value { 422 | if m != nil { 423 | return m.JsonResult 424 | } 425 | return nil 426 | } 427 | 428 | func init() { 429 | proto.RegisterType((*Event)(nil), "nraySchema.Event") 430 | proto.RegisterType((*ScanResult)(nil), "nraySchema.ScanResult") 431 | proto.RegisterType((*EnvironmentInformation)(nil), "nraySchema.EnvironmentInformation") 432 | proto.RegisterType((*PortScanResult)(nil), "nraySchema.PortScanResult") 433 | proto.RegisterType((*ZGrab2ScanResult)(nil), "nraySchema.ZGrab2ScanResult") 434 | } 435 | 436 | func init() { proto.RegisterFile("schemas/events.proto", fileDescriptor_3ab30010df94cd8f) } 437 | 438 | var fileDescriptor_3ab30010df94cd8f = []byte{ 439 | // 489 bytes of a gzipped FileDescriptorProto 440 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x4f, 0x6b, 0xdb, 0x4e, 441 | 0x10, 0x8d, 0x14, 0xdb, 0x91, 0xc6, 0xbf, 0x5f, 0x08, 0x4b, 0x31, 0x42, 0x04, 0x6a, 0x74, 0xf2, 442 | 0x49, 0x2e, 0x29, 0x94, 0x1c, 0x7a, 0x0a, 0x49, 0x9b, 0xf4, 0x50, 0xca, 0xa6, 0xf4, 0xd0, 0xdb, 443 | 0x5a, 0xde, 0x38, 0x2e, 0xd6, 0x8e, 0xd8, 0x5d, 0x05, 0xd2, 0x0f, 0xd0, 0x8f, 0xd1, 0xcf, 0x51, 444 | 0xfa, 0xe9, 0xca, 0x8e, 0xfe, 0xad, 0x4d, 0x4e, 0xda, 0xa7, 0x79, 0xf3, 0x66, 0xf6, 0xbd, 0x85, 445 | 0x57, 0xa6, 0x78, 0x94, 0xa5, 0x30, 0x4b, 0xf9, 0x24, 0x95, 0x35, 0x79, 0xa5, 0xd1, 0x22, 0x03, 446 | 0xa5, 0xc5, 0xf3, 0x3d, 0x55, 0xd2, 0xd7, 0x1b, 0xc4, 0xcd, 0x4e, 0x2e, 0xa9, 0xb2, 0xaa, 0x1f, 447 | 0x96, 0x76, 0x5b, 0x4a, 0x63, 0x45, 0x59, 0x35, 0xe4, 0xf4, 0xfc, 0x90, 0x60, 0xac, 0xae, 0x0b, 448 | 0xdb, 0x54, 0xb3, 0xdf, 0x21, 0x8c, 0x6f, 0x9c, 0x36, 0x9b, 0xc1, 0x44, 0xe1, 0x5a, 0xde, 0x5d, 449 | 0x27, 0xc1, 0x3c, 0x58, 0xc4, 0xbc, 0x45, 0x2c, 0x85, 0xc8, 0x9d, 0x3e, 0x8b, 0x52, 0x26, 0x21, 450 | 0x55, 0x7a, 0xcc, 0x2e, 0x21, 0xee, 0xc7, 0x25, 0xc7, 0xf3, 0x60, 0x31, 0xbd, 0x48, 0xf3, 0x66, 451 | 0x5e, 0xde, 0xcd, 0xcb, 0xbf, 0x76, 0x0c, 0x3e, 0x90, 0xd9, 0x1c, 0xa6, 0xa6, 0x10, 0x4a, 0x49, 452 | 0xad, 0x9c, 0xf0, 0x84, 0x84, 0xfd, 0x5f, 0xec, 0x03, 0x4c, 0xa5, 0x7a, 0xda, 0x6a, 0x54, 0xa5, 453 | 0x54, 0x36, 0x39, 0x21, 0xf5, 0x2c, 0x1f, 0xae, 0x9e, 0xdf, 0x0c, 0xe5, 0x3b, 0xf5, 0x80, 0xba, 454 | 0x14, 0x76, 0x8b, 0xea, 0xf6, 0x88, 0xfb, 0x8d, 0xec, 0x0d, 0x4c, 0xb4, 0x34, 0xf5, 0xce, 0x26, 455 | 0x11, 0x49, 0xcc, 0x7c, 0x89, 0xfb, 0x42, 0x28, 0x4e, 0xd5, 0xdb, 0x23, 0xde, 0xf2, 0xae, 0xa6, 456 | 0x10, 0x93, 0x25, 0xd7, 0xc2, 0x8a, 0xec, 0x6f, 0x00, 0x30, 0xb0, 0x9c, 0x4b, 0x56, 0xe8, 0x8d, 457 | 0xb4, 0xc9, 0xa8, 0x71, 0xa9, 0x41, 0x8c, 0xc1, 0xa8, 0x42, 0x6d, 0x93, 0xf1, 0x3c, 0x58, 0xfc, 458 | 0xcf, 0xe9, 0xcc, 0x2e, 0x21, 0x72, 0x5f, 0x77, 0xa9, 0x76, 0x76, 0xea, 0xcf, 0xfe, 0x82, 0xda, 459 | 0xee, 0xcd, 0xef, 0xd9, 0xec, 0x3d, 0xc4, 0x3f, 0x37, 0x5a, 0xac, 0xa8, 0x35, 0xa6, 0xd6, 0x73, 460 | 0xbf, 0xf5, 0xfb, 0x47, 0x2d, 0x56, 0x17, 0x7b, 0xcd, 0x43, 0xc3, 0x55, 0xd4, 0xdd, 0x38, 0xfb, 461 | 0x13, 0xc0, 0xec, 0x65, 0x97, 0x5c, 0xac, 0x8f, 0x68, 0x2c, 0xb9, 0xdf, 0x04, 0xde, 0x63, 0x76, 462 | 0x0a, 0x21, 0x9a, 0x36, 0xec, 0x10, 0x0d, 0x3b, 0x83, 0xe3, 0x6a, 0xbb, 0xa6, 0x80, 0x63, 0xee, 463 | 0x8e, 0x2e, 0xbe, 0x4a, 0x63, 0x21, 0x8d, 0x21, 0x81, 0xc6, 0x0b, 0xff, 0x97, 0xd3, 0xaf, 0x4d, 464 | 0x9b, 0xee, 0xb8, 0xd1, 0xef, 0x30, 0xcb, 0xe0, 0xbf, 0xa2, 0xaa, 0x4b, 0x5c, 0xcb, 0x9d, 0x97, 465 | 0xfe, 0xde, 0xbf, 0xec, 0x57, 0x00, 0xa7, 0xfb, 0x0e, 0x79, 0xde, 0x07, 0x2f, 0x7a, 0x1f, 0x7a, 466 | 0xde, 0x33, 0x18, 0x61, 0x25, 0x15, 0xed, 0x1c, 0x71, 0x3a, 0xbb, 0x95, 0x9c, 0x3f, 0xf6, 0xb9, 467 | 0xea, 0x36, 0xee, 0x31, 0x4b, 0xe0, 0xc4, 0x3d, 0x4e, 0xac, 0xbb, 0x08, 0x3b, 0x98, 0x7d, 0x82, 468 | 0xb3, 0x43, 0xbb, 0xd9, 0x3b, 0x80, 0x1f, 0x06, 0x5b, 0x44, 0xdb, 0xb8, 0x77, 0x75, 0xf8, 0xf0, 469 | 0xbf, 0x89, 0x5d, 0x2d, 0xb9, 0xc7, 0x5c, 0x4d, 0xa8, 0xf6, 0xf6, 0x5f, 0x00, 0x00, 0x00, 0xff, 470 | 0xff, 0x3e, 0x8b, 0x35, 0x95, 0xd7, 0x03, 0x00, 0x00, 471 | } 472 | -------------------------------------------------------------------------------- /utils/defaultConfig_test.go: -------------------------------------------------------------------------------- 1 | package utils_test 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/nray-scanner/nray/utils" 8 | "github.com/spf13/viper" 9 | ) 10 | 11 | func TestApplyDefaultConfig(t *testing.T) { 12 | var result *viper.Viper 13 | 14 | // Test passing nil to the function 15 | result = utils.ApplyDefaultConfig(nil) 16 | if !result.IsSet("debug") || result.GetBool("debug") != false { 17 | t.Errorf("Test failed: Passing nil to config") 18 | } 19 | if !result.IsSet("listen") { 20 | t.Errorf("Test failed: Passing nil to config") 21 | } 22 | if !result.IsSet("host") || result.GetString("host") != "127.0.0.1" { 23 | t.Errorf("Test failed: Passing nil to config") 24 | } 25 | if !result.IsSet("TLS.enabled") || result.GetBool("TLS.enabled") != false { 26 | t.Errorf("Test failed: Passing nil to config") 27 | } 28 | if !result.IsSet("TLS.CA") || result.GetString("TLS.CA") != "" { 29 | t.Errorf("Test failed: Passing nil to config") 30 | } 31 | if !result.IsSet("TLS.cert") || result.GetString("TLS.cert") != "" { 32 | t.Errorf("Test failed: Passing nil to config") 33 | } 34 | if !result.IsSet("TLS.key") || result.GetString("TLS.key") != "" { 35 | t.Errorf("Test failed: Passing nil to config") 36 | } 37 | if !result.IsSet("TLS.forceClientAuth") || result.GetBool("TLS.forceClientAuth") != false { 38 | t.Errorf("Test failed: Passing nil to config") 39 | } 40 | if !result.IsSet("statusPrintInterval") || result.GetDuration("statusPrintInterval") != 15*time.Second { 41 | t.Errorf("Test failed: Passing nil to config") 42 | } 43 | if !result.IsSet("pools") || result.GetUint("pools") != 1 { 44 | t.Errorf("Test failed: Passing nil to config") 45 | } 46 | if !result.IsSet("considerClientPoolPreference") || result.GetBool("considerClientPoolPreference") != true { 47 | t.Errorf("Test failed: Passing nil to config") 48 | } 49 | if !result.IsSet("internal.nodeExpiryTime") || result.GetUint("internal.nodeExpiryTime") != 30 { 50 | t.Errorf("Test failed: Passing nil to config") 51 | } 52 | if !result.IsSet("internal.nodeExpiryCheckInterval") || result.GetUint("internal.nodeExpiryCheckInterval") != 10 { 53 | t.Errorf("Test failed: Passing nil to config") 54 | } 55 | if !result.IsSet("targetgenerator.bufferSize") || result.GetUint("targetgenerator.bufferSize") != 5 { 56 | t.Errorf("Test failed: Passing nil to config") 57 | } 58 | 59 | // Test passing an empty viper to the function 60 | emptyViper := viper.New() 61 | result = utils.ApplyDefaultConfig(emptyViper) 62 | if !result.IsSet("debug") || result.GetBool("debug") != false { 63 | t.Errorf("Test failed: Passing empty viper to config") 64 | } 65 | if !result.IsSet("listen") { 66 | t.Errorf("Test failed: Passing empty viper to config") 67 | } 68 | if !result.IsSet("host") || result.GetString("host") != "127.0.0.1" { 69 | t.Errorf("Test failed: Passing empty viper to config") 70 | } 71 | if !result.IsSet("TLS.enabled") || result.GetBool("TLS.enabled") != false { 72 | t.Errorf("Test failed: Passing empty viper to config") 73 | } 74 | if !result.IsSet("TLS.CA") || result.GetString("TLS.CA") != "" { 75 | t.Errorf("Test failed: Passing empty viper to config") 76 | } 77 | if !result.IsSet("TLS.cert") || result.GetString("TLS.cert") != "" { 78 | t.Errorf("Test failed: Passing empty viper to config") 79 | } 80 | if !result.IsSet("TLS.key") || result.GetString("TLS.key") != "" { 81 | t.Errorf("Test failed: Passing empty viper to config") 82 | } 83 | if !result.IsSet("TLS.forceClientAuth") || result.GetBool("TLS.forceClientAuth") != false { 84 | t.Errorf("Test failed: Passing empty viper to config") 85 | } 86 | if !result.IsSet("statusPrintInterval") || result.GetDuration("statusPrintInterval") != 15*time.Second { 87 | t.Errorf("Test failed: Passing empty viper to config") 88 | } 89 | if !result.IsSet("pools") || result.GetUint("pools") != 1 { 90 | t.Errorf("Test failed: Passing empty viper to config") 91 | } 92 | if !result.IsSet("considerClientPoolPreference") || result.GetBool("considerClientPoolPreference") != true { 93 | t.Errorf("Test failed: Passing empty viper to config") 94 | } 95 | if !result.IsSet("internal.nodeExpiryTime") || result.GetUint("internal.nodeExpiryTime") != 30 { 96 | t.Errorf("Test failed: Passing empty viper to config") 97 | } 98 | if !result.IsSet("internal.nodeExpiryCheckInterval") || result.GetUint("internal.nodeExpiryCheckInterval") != 10 { 99 | t.Errorf("Test failed: Passing empty viper to config") 100 | } 101 | if !result.IsSet("targetgenerator.bufferSize") || result.GetUint("targetgenerator.bufferSize") != 5 { 102 | t.Errorf("Test failed: Passing empty viper to config") 103 | } 104 | 105 | // Pass a viper with a value explicitly set. The value mustn't change. 106 | viperWithValue := viper.New() 107 | viperWithValue.Set("debug", true) 108 | viperWithValue.Set("host", "0.0.0.0") 109 | viperWithValue.Set("pools", 5) 110 | 111 | result = utils.ApplyDefaultConfig(viperWithValue) 112 | if !result.IsSet("debug") || result.GetBool("debug") != true { 113 | t.Errorf("Test failed: Passing changed value to config") 114 | } 115 | if !result.IsSet("listen") { 116 | t.Errorf("Test failed: Passing changed value to config") 117 | } 118 | if !result.IsSet("host") || result.GetString("host") != "0.0.0.0" { 119 | t.Errorf("Test failed: Passing changed value to config") 120 | } 121 | if !result.IsSet("TLS.enabled") || result.GetBool("TLS.enabled") != false { 122 | t.Errorf("Test failed: Passing changed value to config") 123 | } 124 | if !result.IsSet("TLS.CA") || result.GetString("TLS.CA") != "" { 125 | t.Errorf("Test failed: Passing changed value to config") 126 | } 127 | if !result.IsSet("TLS.cert") || result.GetString("TLS.cert") != "" { 128 | t.Errorf("Test failed: Passing changed value to config") 129 | } 130 | if !result.IsSet("TLS.key") || result.GetString("TLS.key") != "" { 131 | t.Errorf("Test failed: Passing changed value to config") 132 | } 133 | if !result.IsSet("TLS.forceClientAuth") || result.GetBool("TLS.forceClientAuth") != false { 134 | t.Errorf("Test failed: Passing changed value to config") 135 | } 136 | if !result.IsSet("pools") || result.GetUint("pools") != 5 { 137 | t.Errorf("Test failed: Passing changed value to config") 138 | } 139 | if !result.IsSet("considerClientPoolPreference") || result.GetBool("considerClientPoolPreference") != true { 140 | t.Errorf("Test failed: Passing changed value to config") 141 | } 142 | if !result.IsSet("internal.nodeExpiryTime") || result.GetUint("internal.nodeExpiryTime") != 30 { 143 | t.Errorf("Test failed: Passing changed value to config") 144 | } 145 | if !result.IsSet("internal.nodeExpiryCheckInterval") || result.GetUint("internal.nodeExpiryCheckInterval") != 10 { 146 | t.Errorf("Test failed: Passing changed value to config") 147 | } 148 | if !result.IsSet("targetgenerator.bufferSize") || result.GetUint("targetgenerator.bufferSize") != 5 { 149 | t.Errorf("Test failed: Passing changed value to config") 150 | } 151 | } 152 | 153 | func TestApplyDefaultTargetgeneratorStandardConfig(t *testing.T) { 154 | var result *viper.Viper 155 | 156 | // Test passing nil to the function 157 | result = utils.ApplyDefaultTargetgeneratorStandardConfig(nil) 158 | if !result.IsSet("enabled") || result.GetBool("enabled") != false { 159 | t.Errorf("Test failed: Passing nil to config") 160 | } 161 | if !result.IsSet("targets") { 162 | t.Errorf("Test failed: Passing nil to config") 163 | } 164 | if !result.IsSet("targetFile") || result.GetString("targetFile") != "" { 165 | t.Errorf("Test failed: Passing nil to config") 166 | } 167 | if !result.IsSet("tcpports") { 168 | t.Errorf("Test failed: Passing nil to config") 169 | } 170 | if !result.IsSet("udpports") { 171 | t.Errorf("Test failed: Passing nil to config") 172 | } 173 | if !result.IsSet("blacklist") { 174 | t.Errorf("Test failed: Passing nil to config") 175 | } 176 | if !result.IsSet("blacklistFile") || result.GetString("blacklistFile") != "" { 177 | t.Errorf("Test failed: Passing nil to config") 178 | } 179 | if !result.IsSet("maxHostsPerBatch") || result.GetUint("maxHostsPerBatch") != 150 { 180 | t.Errorf("Test failed: Passing nil to config") 181 | } 182 | if !result.IsSet("maxTcpPortsPerBatch") || result.GetUint("maxTcpPortsPerBatch") != 25 { 183 | t.Errorf("Test failed: Passing nil to config") 184 | } 185 | if !result.IsSet("maxUdpPortsPerBatch") || result.GetUint("maxUdpPortsPerBatch") != 25 { 186 | t.Errorf("Test failed: Passing nil to config") 187 | } 188 | 189 | // Test passing an empty viper to the function 190 | emptyViper := viper.New() 191 | result = utils.ApplyDefaultTargetgeneratorStandardConfig(emptyViper) 192 | if !result.IsSet("enabled") || result.GetBool("enabled") != false { 193 | t.Errorf("Test failed: Passing empty viper to config") 194 | } 195 | if !result.IsSet("targets") { 196 | t.Errorf("Test failed: Passing empty viper to config") 197 | } 198 | if !result.IsSet("targetFile") || result.GetString("targetFile") != "" { 199 | t.Errorf("Test failed: Passing empty viper to config") 200 | } 201 | if !result.IsSet("tcpports") { 202 | t.Errorf("Test failed: Passing empty viper to config") 203 | } 204 | if !result.IsSet("udpports") { 205 | t.Errorf("Test failed: Passing empty viper to config") 206 | } 207 | if !result.IsSet("blacklist") { 208 | t.Errorf("Test failed: Passing empty viper to config") 209 | } 210 | if !result.IsSet("blacklistFile") || result.GetString("blacklistFile") != "" { 211 | t.Errorf("Test failed: Passing empty viper to config") 212 | } 213 | if !result.IsSet("maxHostsPerBatch") || result.GetUint("maxHostsPerBatch") != 150 { 214 | t.Errorf("Test failed: Passing empty viper to config") 215 | } 216 | if !result.IsSet("maxTcpPortsPerBatch") || result.GetUint("maxTcpPortsPerBatch") != 25 { 217 | t.Errorf("Test failed: Passing empty viper to config") 218 | } 219 | if !result.IsSet("maxUdpPortsPerBatch") || result.GetUint("maxUdpPortsPerBatch") != 25 { 220 | t.Errorf("Test failed: Passing empty viper to config") 221 | } 222 | 223 | // Pass a viper with a value explicitly set. The value mustn't change. 224 | viperWithValue := viper.New() 225 | viperWithValue.Set("enabled", true) 226 | viperWithValue.Set("maxHostsPerBatch", 100) 227 | viperWithValue.Set("maxTcpPortsPerBatch", 50) 228 | viperWithValue.Set("maxUdpPortsPerBatch", 0) 229 | result = utils.ApplyDefaultTargetgeneratorStandardConfig(viperWithValue) 230 | if !result.IsSet("enabled") || result.GetBool("enabled") != true { 231 | t.Errorf("Test failed: Passing changed value to config") 232 | } 233 | if !result.IsSet("targets") { 234 | t.Errorf("Test failed: Passing changed value to config") 235 | } 236 | if !result.IsSet("targetFile") || result.GetString("targetFile") != "" { 237 | t.Errorf("Test failed: Passing changed value to config") 238 | } 239 | if !result.IsSet("tcpports") { 240 | t.Errorf("Test failed: Passing changed value to config") 241 | } 242 | if !result.IsSet("udpports") { 243 | t.Errorf("Test failed: Passing changed value to config") 244 | } 245 | if !result.IsSet("blacklist") { 246 | t.Errorf("Test failed: Passing changed value to config") 247 | } 248 | if !result.IsSet("blacklistFile") || result.GetString("blacklistFile") != "" { 249 | t.Errorf("Test failed: Passing changed value to config") 250 | } 251 | if !result.IsSet("maxHostsPerBatch") || result.GetUint("maxHostsPerBatch") != 100 { 252 | t.Errorf("Test failed: Passing changed value to config") 253 | } 254 | if !result.IsSet("maxTcpPortsPerBatch") || result.GetUint("maxTcpPortsPerBatch") != 50 { 255 | t.Errorf("Test failed: Passing changed value to config") 256 | } 257 | if !result.IsSet("maxUdpPortsPerBatch") || result.GetUint("maxUdpPortsPerBatch") != 0 { 258 | t.Errorf("Test failed: Passing changed value to config") 259 | } 260 | } 261 | 262 | func TestApplyDefaultScannerConfig(t *testing.T) { 263 | var result *viper.Viper 264 | 265 | // Test passing nil to the function 266 | result = utils.ApplyDefaultScannerConfig(nil) 267 | if !result.IsSet("workers") || result.GetUint("workers") != 250 { 268 | t.Errorf("Test failed: Passing nil to config") 269 | } 270 | if !result.IsSet("ratelimit") || result.GetString("ratelimit") != "none" { 271 | t.Errorf("Test failed: Passing nil to config") 272 | } 273 | 274 | // Test passing an empty viper to the function 275 | emptyViper := viper.New() 276 | result = utils.ApplyDefaultScannerConfig(emptyViper) 277 | if !result.IsSet("workers") || result.GetUint("workers") != 250 { 278 | t.Errorf("Test failed: Passing empty viper to config") 279 | } 280 | if !result.IsSet("ratelimit") || result.GetString("ratelimit") != "none" { 281 | t.Errorf("Test failed: Passing empty viper to config") 282 | } 283 | 284 | // Pass a viper with a value explicitly set. The value mustn't change. 285 | viperWithValue := viper.New() 286 | viperWithValue.Set("workers", 1000) 287 | viperWithValue.Set("ratelimit", 25) 288 | result = utils.ApplyDefaultScannerConfig(viperWithValue) 289 | if !result.IsSet("workers") || result.GetUint("workers") != 1000 { 290 | t.Errorf("Test failed: Passing changed value to config") 291 | } 292 | if !result.IsSet("ratelimit") || result.GetFloat64("ratelimit") != 25 { 293 | t.Errorf("Test failed: Passing changed value to config") 294 | } 295 | 296 | } 297 | 298 | func TestApplyDefaultScannerTCPConfig(t *testing.T) { 299 | var result *viper.Viper 300 | 301 | // Test passing nil to the function 302 | result = utils.ApplyDefaultScannerTCPConfig(nil) 303 | if !result.IsSet("timeout") || result.GetDuration("timeout") != (2500*time.Millisecond) { 304 | t.Errorf("Test failed: Passing nil to config") 305 | } 306 | 307 | // Test passing an empty viper to the function 308 | emptyViper := viper.New() 309 | result = utils.ApplyDefaultScannerTCPConfig(emptyViper) 310 | if !result.IsSet("timeout") || result.GetDuration("timeout") != (2500*time.Millisecond) { 311 | t.Errorf("Test failed: Passing empty viper to config") 312 | } 313 | 314 | // Pass a viper with a value explicitly set. The value mustn't change. 315 | viperWithValue := viper.New() 316 | viperWithValue.Set("timeout", "1500ms") 317 | result = utils.ApplyDefaultScannerTCPConfig(viperWithValue) 318 | if !result.IsSet("timeout") || result.GetDuration("timeout") != (1500*time.Millisecond) { 319 | t.Errorf("Test failed: Passing changed value to config") 320 | } 321 | } 322 | 323 | func TestApplyDefaultScannerUDPConfig(t *testing.T) { 324 | var result *viper.Viper 325 | 326 | // Test passing nil to the function 327 | result = utils.ApplyDefaultScannerUDPConfig(nil) 328 | if !result.IsSet("timeout") || result.GetDuration("timeout") != (2500*time.Millisecond) { 329 | t.Errorf("Test failed: Passing nil to config") 330 | } 331 | if !result.IsSet("fast") || result.GetBool("fast") != false { 332 | t.Errorf("Test failed: Passing nil to config") 333 | } 334 | if !result.IsSet("defaultHexPayload") || result.GetString("defaultHexPayload") != "\x6e\x72\x61\x79" { 335 | t.Errorf("Test failed: Passing nil to config") 336 | } 337 | 338 | // Test passing an empty viper to the function 339 | emptyViper := viper.New() 340 | result = utils.ApplyDefaultScannerUDPConfig(emptyViper) 341 | if !result.IsSet("timeout") || result.GetDuration("timeout") != (2500*time.Millisecond) { 342 | t.Errorf("Test failed: Passing empty viper to config") 343 | } 344 | if !result.IsSet("fast") || result.GetBool("fast") != false { 345 | t.Errorf("Test failed: Passing empty viper to config") 346 | } 347 | if !result.IsSet("defaultHexPayload") || result.GetString("defaultHexPayload") != "\x6e\x72\x61\x79" { 348 | t.Errorf("Test failed: Passing empty viper to config") 349 | } 350 | 351 | // Pass a viper with a value explicitly set. The value mustn't change. 352 | viperWithValue := viper.New() 353 | viperWithValue.Set("timeout", "1500ms") 354 | viperWithValue.Set("fast", true) 355 | result = utils.ApplyDefaultScannerUDPConfig(viperWithValue) 356 | if !result.IsSet("timeout") || result.GetDuration("timeout") != (1500*time.Millisecond) { 357 | t.Errorf("Test failed: Passing changed value to config") 358 | } 359 | if !result.IsSet("fast") || result.GetBool("fast") != true { 360 | t.Errorf("Test failed: Passing changed value to config") 361 | } 362 | if !result.IsSet("defaultHexPayload") || result.GetString("defaultHexPayload") != "\x6e\x72\x61\x79" { 363 | t.Errorf("Test failed: Passing changed value to config") 364 | } 365 | } 366 | 367 | func TestApplyDefaultEventTerminalConfig(t *testing.T) { 368 | var result *viper.Viper 369 | 370 | // Test passing nil to the function 371 | result = utils.ApplyDefaultEventTerminalConfig(nil) 372 | if !result.IsSet("internal.channelsize") || result.GetUint("internal.channelsize") != 1000 { 373 | t.Errorf("Test failed: Passing nil to config") 374 | } 375 | 376 | // Test passing an empty viper to the function 377 | emptyViper := viper.New() 378 | result = utils.ApplyDefaultEventTerminalConfig(emptyViper) 379 | if !result.IsSet("internal.channelsize") || result.GetUint("internal.channelsize") != 1000 { 380 | t.Errorf("Test failed: Passing empty viper to config") 381 | } 382 | 383 | // Pass a viper with a value explicitly set. The value mustn't change. 384 | viperWithValue := viper.New() 385 | viperWithValue.Set("internal.channelsize", 1500) 386 | result = utils.ApplyDefaultEventTerminalConfig(viperWithValue) 387 | if !result.IsSet("internal.channelsize") || result.GetUint("internal.channelsize") != 1500 { 388 | t.Errorf("Test failed: Passing changed value to config") 389 | } 390 | } 391 | 392 | func TestApplyDefaultEventJSONFileConfig(t *testing.T) { 393 | var result *viper.Viper 394 | 395 | // Test passing nil to the function 396 | result = utils.ApplyDefaultEventJSONFileConfig(nil) 397 | if !result.IsSet("filename") || result.GetString("filename") != "nray-output.json" { 398 | t.Errorf("Test failed: Passing nil to config") 399 | } 400 | if !result.IsSet("overwriteExisting") || result.GetBool("overwriteExisting") != false { 401 | t.Errorf("Test failed: Passing nil to config") 402 | } 403 | if !result.IsSet("internal.channelsize") || result.GetUint("internal.channelsize") != 10000 { 404 | t.Errorf("Test failed: Passing nil to config") 405 | } 406 | if !result.IsSet("internal.synctimer") || result.GetDuration("internal.synctimer") != 10*time.Second { 407 | t.Errorf("Test failed: Passing nil to config") 408 | } 409 | 410 | // Test passing an empty viper to the function 411 | emptyViper := viper.New() 412 | result = utils.ApplyDefaultEventJSONFileConfig(emptyViper) 413 | if !result.IsSet("filename") || result.GetString("filename") != "nray-output.json" { 414 | t.Errorf("Test failed: Passing empty viper to config") 415 | } 416 | if !result.IsSet("overwriteExisting") || result.GetBool("overwriteExisting") != false { 417 | t.Errorf("Test failed: Passing empty viper to config") 418 | } 419 | if !result.IsSet("internal.channelsize") || result.GetUint("internal.channelsize") != 10000 { 420 | t.Errorf("Test failed: Passing empty viper to config") 421 | } 422 | if !result.IsSet("internal.synctimer") || result.GetDuration("internal.synctimer") != 10*time.Second { 423 | t.Errorf("Test failed: Passing empty viper to config") 424 | } 425 | 426 | // Pass a viper with a value explicitly set. The value mustn't change. 427 | viperWithValue := viper.New() 428 | viperWithValue.Set("filename", "top25.json") 429 | viperWithValue.Set("overwriteExisting", true) 430 | result = utils.ApplyDefaultEventJSONFileConfig(viperWithValue) 431 | if !result.IsSet("filename") || result.GetString("filename") != "top25.json" { 432 | t.Errorf("Test failed: Passing changed value to config") 433 | } 434 | if !result.IsSet("overwriteExisting") || result.GetBool("overwriteExisting") != true { 435 | t.Errorf("Test failed: Passing changed value to config") 436 | } 437 | if !result.IsSet("internal.channelsize") || result.GetUint("internal.channelsize") != 10000 { 438 | t.Errorf("Test failed: Passing changed value to config") 439 | } 440 | if !result.IsSet("internal.synctimer") || result.GetDuration("internal.synctimer") != 10*time.Second { 441 | t.Errorf("Test failed: Passing changed value to config") 442 | } 443 | } 444 | --------------------------------------------------------------------------------