├── .cloud-graphrc.example.json
├── .editorconfig
├── .eslintignore
├── .eslintrc.json
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
├── SECURITY.md
├── pull_request_template.md
└── workflows
│ ├── homebrew.yaml
│ ├── notify.yml
│ ├── pr-validator.yml
│ └── publish.yml
├── .gitignore
├── .npmignore
├── .prettierrc.json
├── .releaserc.yml
├── CHANGELOG.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── bin
├── dev
├── dev.cmd
├── run
└── run.cmd
├── docs
└── images
│ ├── autoCloud.png
│ ├── exampleQueries.gif
│ ├── gqlAltair.png
│ ├── gqlPlayground.png
│ ├── init.gif
│ ├── install.gif
│ ├── launch.gif
│ ├── load.png
│ ├── logo.png
│ ├── scan.gif
│ └── voyager.png
├── examples
└── examples.txt
├── jest.config.js
├── package.json
├── release
├── scripts
│ └── homebrew.js
└── templates
│ ├── cg-node.rb
│ └── cg.rb
├── src
├── commands
│ ├── base.ts
│ ├── init.ts
│ ├── launch.ts
│ ├── load.ts
│ ├── operation.ts
│ ├── policy
│ │ ├── add.ts
│ │ ├── index.ts
│ │ ├── install.ts
│ │ ├── list.ts
│ │ ├── remove.ts
│ │ └── update.ts
│ ├── provider
│ │ ├── add.ts
│ │ ├── index.ts
│ │ ├── install.ts
│ │ ├── list.ts
│ │ ├── remove.ts
│ │ └── update.ts
│ ├── scan.ts
│ ├── serve.ts
│ ├── teardown.ts
│ └── update.ts
├── index.ts
├── manager
│ ├── index.ts
│ └── npm
│ │ └── index.ts
├── reports
│ ├── index.ts
│ ├── rules-report.ts
│ └── scan-report.ts
├── scripts
│ └── openChrome.applescript
├── server
│ └── index.ts
├── storage
│ ├── dgraph
│ │ ├── base.ts
│ │ ├── index.ts
│ │ └── utils.ts
│ ├── enums.ts
│ ├── index.ts
│ └── types.ts
├── types
│ ├── cfonts
│ │ └── index.d.ts
│ └── index.ts
└── utils
│ ├── constants.ts
│ ├── data.ts
│ ├── flags.ts
│ ├── index.ts
│ ├── mutation.ts
│ ├── open.ts
│ └── questions.ts
├── test
├── commands
│ ├── init.test.ts
│ ├── launch.test.ts
│ ├── serve.test.ts
│ └── teardown.test.ts
├── helpers
│ ├── index.ts
│ ├── mocks.ts
│ └── types.ts
├── tsconfig.json
└── utils
│ └── mutation.test.ts
├── tsconfig.json
├── tsconfig.tsbuildinfo
└── yarn.lock
/.cloud-graphrc.example.json:
--------------------------------------------------------------------------------
1 | {
2 | "aws": {
3 | "regions": "us-east-1,us-east-2,us-west-2",
4 | "resources": "alb,apiGatewayResource,apiGatewayRestApi,apiGatewayStage,appSync,asg,billing,cognitoIdentityPool,cognitoUserPool,cloudFormationStack,cloudFormationStackSet,cloudfront,cloudtrail,cloudwatch,dynamodb,ebs,ec2Instance,eip,elb,igw,kinesisFirehose,kinesisStream,kms,lambda,nacl,nat,networkInterface,route53HostedZone,route53Record,routeTable,sg,vpc,sqs,s3",
5 | "accounts": [
6 | {
7 | "profile": "default",
8 | "roleArn": "",
9 | "externalId": ""
10 | },
11 | {
12 | "profile": "master",
13 | "roleArn": "arn:aws:iam::123456678:role/my-readonly-role",
14 | "externalId": ""
15 | },
16 | {
17 | "profile": "master",
18 | "roleArn": "arn:aws:iam::123456678:role/my-readonly-role-with-external-id",
19 | "externalId": "my-external-id"
20 | }
21 | ]
22 | },
23 | "cloudGraph": {
24 | "storageConfig": {
25 | "host": "localhost",
26 | "port": "8997",
27 | "scheme": "http"
28 | },
29 | "versionLimit": 10,
30 | "queryEngine": "playground",
31 | "port": "5555",
32 | "plugins": {
33 | "policyPack": [
34 | {
35 | "name": "aws-cis-1.2",
36 | "providers": ["aws"]
37 | }
38 | ]
39 | }
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/.editorconfig:
--------------------------------------------------------------------------------
1 | root = true
2 |
3 | [*]
4 | indent_style = space
5 | indent_size = 2
6 | charset = utf-8
7 | trim_trailing_whitespace = true
8 | insert_final_newline = true
9 |
10 | [*.md]
11 | trim_trailing_whitespace = false
12 |
--------------------------------------------------------------------------------
/.eslintignore:
--------------------------------------------------------------------------------
1 | /lib
2 |
--------------------------------------------------------------------------------
/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": [
3 | "@autocloud"
4 | ],
5 | "rules": {
6 | "no-console": "off"
7 | },
8 | "ignorePatterns": ["src/plugins/", "examples"]
9 | }
10 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve CloudGraph
4 | title: ''
5 | labels: bug
6 | assignees: tyler-dunkel
7 |
8 | ---
9 |
10 | Thank you for filling out a bug report, we really appreciate any help in improving the CloudGraph CLI and providers!
11 |
12 | **Describe the bug**
13 | A clear and concise description of what the bug is.
14 |
15 | **To Reproduce**
16 | Steps to reproduce the behavior:
17 | 1. Run command '...' NOTE: please run the command in DEBUG mode for additional debugging info [e.g. `CG_DEBUG=5 cg scan aws`]
18 | 2. Run GraphQL query '....'
19 | 4. See error
20 |
21 | Please include the `cg-debug.log` file if applicable
22 |
23 | **Expected behavior**
24 | A clear and concise description of what you expected to happen.
25 |
26 | **Environment (please complete the following information):**
27 | - CLI version [e.g. `0.11.7`]
28 | - Provider versions [e.g. `aws@0.30.0`, `azure@0.15.1`]
29 | - Context [e.g. Local machine, EC2 Instance, Other]
30 |
31 | **Screenshots**
32 | If applicable, add screenshots to help explain your problem.
33 |
34 | **Additional context**
35 | Add any other context about the problem here.
36 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest a feature you would like to see CloudGraph implement
4 | title: ''
5 | labels: enhancement
6 | assignees: tyler-dunkel
7 |
8 | ---
9 |
10 | Thank you for taking the time to suggest a way the CloudGraph tool could imrpove!
11 |
12 | If this is for a larger feature request, please use our [Slack channel](https://cloudgraph-workspace.slack.com) so we can discuss and avoid duplicate work (we may already be working on it!)
13 | .
14 | **Is your feature request related to a problem? Please describe.**
15 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
16 |
17 | **Describe the solution you'd like**
18 | A clear and concise description of what you want to happen.
19 |
20 | **Describe alternatives you've considered**
21 | A clear and concise description of any alternative solutions or features you've considered.
22 |
23 | **How would this be useful to you**
24 | Tell us what this feature would help you achieve in your workflow
25 |
26 | **Additional context**
27 | Add any other context or screenshots about the feature request here.
28 |
--------------------------------------------------------------------------------
/.github/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | 1. [Reporting security problems to CloudGraph](#reporting)
4 | 2. [Security Point of Contact](#contact)
5 | 3. [Incident Response Process](#process)
6 |
7 |
8 | ## Reporting security problems to CloudGraph
9 |
10 | **DO NOT CREATE AN ISSUE** to report a security problem. Instead, please
11 | send an email to security@autocloud.dev
12 |
13 |
14 | ## Security Point of Contact
15 |
16 | The security point of contact is Tyler Dunkel. Tyler responds to security
17 | incident reports as fast as possible, within one business day at the latest.
18 |
19 | In case Tyler does not respond within a reasonable time, the secondary point
20 | of contact is [Tyson Kunovsky](https://github.com/orgs/cloudgraphdev/people/kunovsky).
21 |
22 | If neither Tyler nor Tyson responds then please contact support@github.com
23 | who can disable any access for the CloudGraph CLI tool until the security incident is resolved.
24 |
25 |
26 | ## Incident Response Process
27 |
28 | In case an incident is discovered or reported, CloudGraph will follow the following
29 | process to contain, respond and remediate:
30 |
31 | ### 1. Containment
32 |
33 | The first step is to find out the root cause, nature and scope of the incident.
34 |
35 | - Is still ongoing? If yes, first priority is to stop it.
36 | - Is the incident outside of my influence? If yes, first priority is to contain it.
37 | - Find out knows about the incident and who is affected.
38 | - Find out what data was potentially exposed.
39 |
40 | One way to immediately remove all access for CloudGraph is to uninstall CloudGraph globally and/or locally using
41 | `npm uninstall -g @cloudgraph/cli` && `npm uninstall @cloudgraph/cli`
42 |
43 | ### 2. Response
44 |
45 | After the initial assessment and containment to out best abilities, CloudGraph will
46 | document all actions taken in a response plan.
47 |
48 | CloudGraph will create an RCA (Root Cause Analysis) document in the [CloudGraph documentation site](https://docs.cloudgraph.dev/overview) that describes what happened and what was done to resolve it.
49 |
50 | ### 3. Remediation
51 |
52 | Once the incident is confirmed to be resolved, CloudGraph will summarize the lessons
53 | learned from the incident and create a list of actions CloudGraph will take to prevent
54 | it from happening again.
55 |
56 | ### Keep permissions to a minimum
57 |
58 | The CloudGraph CLI tool uses the least amount of access to limit the impact of possible
59 | security incidents, see [README - How It Works](https://github.com/cloudgraphdev/cli#how-it-works).
60 |
61 | ### Secure accounts with access
62 |
63 | The [CloudGraph GitHub Organization](https://github.com/cloudgraphdev) requires 2FA authorization
64 | for all members.
65 |
66 | ### Critical Updates And Security Notices
67 |
68 | We learn about critical software updates and security threats from these sources
69 |
70 | 1. GitHub Security Alerts
71 | 2. [Snyk open source vulnerability dectection](https://snyk.io/product/open-source-security-management/)
72 | 3. GitHub: https://githubstatus.com/ & [@githubstatus](https://twitter.com/githubstatus)
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | ## Issue tracker links
2 |
3 | _Add links to any relevant tasks/stories/bugs/pagerduty/etc_
4 |
5 | *Example - dummy TODO project*
6 |
7 | [TODO-123](https://autoclouddev.atlassian.net/browse/TODO-123)
8 |
9 | ## Changes/solution
10 |
11 | _How does this change address the problem?_
12 |
13 | ## Testing
14 |
15 | _Describe how the testing was done, plus evidence, if not covered by automated tests_
16 |
17 | ## Notes and considerations
18 |
19 | _Add any additional notes and/or considerations_
20 |
21 | ## Dependencies
22 |
23 | _Add dependencies on any other PRs, if applicable
24 |
--------------------------------------------------------------------------------
/.github/workflows/homebrew.yaml:
--------------------------------------------------------------------------------
1 | name: homebrew
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | jobs:
9 | homebrew:
10 | runs-on: ubuntu-latest
11 | env:
12 | NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}}
13 | AWS_SDK_LOAD_CONFIG: true
14 | AWS_PROFILE: cloudgraph-iac
15 | NODE_ENV: "cicd"
16 | steps:
17 | - uses: actions/checkout@v3
18 | with:
19 | fetch-depth: 0
20 | persist-credentials: false
21 | token: ${{secrets.GH_TOKEN}}
22 |
23 | - uses: actions/setup-node@v3
24 | with:
25 | node-version: 16
26 | registry-url: "https://registry.npmjs.org"
27 | cache: yarn
28 |
29 | - name: Mkdir .aws
30 | run: mkdir -p ~/.aws
31 |
32 | - name: Set .aws/config
33 | run: |
34 | cat << EOF > ~/.aws/config
35 | [default]
36 | region=us-east-1
37 | output=json
38 |
39 | [profile cloudgraph-iac]
40 | role_arn = ${{ secrets.AWS_ROLE_ARN }}
41 | source_profile = default
42 | EOF
43 | - name: Set .aws/credentials
44 | run: |
45 | cat << EOF > ~/.aws/credentials
46 | [default]
47 | aws_access_key_id = ${{ secrets.AWS_ACCESS_KEY_ID }}
48 | aws_secret_access_key = ${{ secrets.AWS_SECRET_ACCESS_KEY }}
49 | EOF
50 |
51 | - name: Install Packages
52 | run: yarn install --prefer-offline --frozen-lockfile
53 |
54 | - name: Build
55 | run: yarn build
56 |
57 | - name: Add SSH key
58 | env:
59 | SSH_AUTH_SOCK: /tmp/ssh_agent.sock
60 | run: |
61 | mkdir -p ~/.ssh
62 | ssh-keyscan github.com >> ~/.ssh/known_hosts
63 | echo "${{ secrets.AUTODEPLOY_SSH_KEY }}" > ~/.ssh/github_actions
64 | chmod 600 ~/.ssh/github_actions
65 | ssh-agent -a $SSH_AUTH_SOCK > /dev/null
66 | ssh-add ~/.ssh/github_actions
67 |
68 | - name: Homebrew
69 | env:
70 | SSH_AUTH_SOCK: /tmp/ssh_agent.sock
71 | run: |
72 | git config --global user.email "no-reply@autocloud.dev"
73 | git config --global user.name "autocloud-deploy-bot"
74 | yarn homebrew
75 |
--------------------------------------------------------------------------------
/.github/workflows/notify.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: notify
3 |
4 | on:
5 | release:
6 | types: [published]
7 |
8 | jobs:
9 | notify:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v3
13 | - uses: actions/setup-node@v3
14 | with:
15 | node-version: 16
16 | - run: |
17 | GIT_COMMIT_TILE=$(git log -1 --pretty=format:"%s")
18 | curl -X POST --data-urlencode "payload={\"attachments\":[{\"fallback\":\"$GIT_AUTHOR_NAME released new $ORGANIZATION_NAME $REPO_NAME version of $GITHUB_REF_NAME\",\"color\":\"good\",\"title\":\"Version $GITHUB_REF_NAME of $ORGANIZATION_NAME $REPO_NAME released\",\"title_link\":\"$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/releases/tag/$GITHUB_REF_NAME\",\"fields\":[{\"title\":\"Tag\",\"value\":\"<$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/commits/$GITHUB_REF_NAME|$GITHUB_REF_NAME>\",\"short\":true},{\"title\":\"Commit\",\"value\":\"<$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/tree/$GITHUB_REF_NAME|$GIT_COMMIT_TILE>\",\"short\":true}],\"footer\":\"$ORGANIZATION_NAME $REPO_NAME \",\"ts\":\"$( date +%s )\"}]}" $SLACK_WEBHOOK
19 | env:
20 | REPO_NAME: ${{ github.event.repository.name }}
21 | GIT_AUTHOR_NAME: "AutoCloud Deploy Bot"
22 | SLACK_WEBHOOK: ${{secrets.slack_api_endpoint}}
23 | ORGANIZATION_NAME: ${{secrets.organization_name}}
24 |
--------------------------------------------------------------------------------
/.github/workflows/pr-validator.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: pr-validator
3 |
4 | on:
5 | pull_request:
6 | types: [synchronize, opened, reopened, edited]
7 | branches:
8 | - main
9 | - beta
10 |
11 | jobs:
12 | pr-validation:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v3
16 | - run: |
17 | if [ "$TARGET_BRANCH" == "main" ] && [ "$SOURCE_BRANCH" == "beta" ]; then
18 | echo "Merge from $SOURCE_BRANCH to $TARGET_BRANCH is valid"
19 | exit 0
20 | elif [ "$TARGET_BRANCH" == "beta" ] && [ "$SOURCE_BRANCH" == "alpha" ]; then
21 | echo "Merge from $SOURCE_BRANCH to $TARGET_BRANCH is valid"
22 | exit 0
23 | else
24 | echo "You cannot merge from $SOURCE_BRANCH to $TARGET_BRANCH"
25 | exit 1
26 | fi
27 | env:
28 | SOURCE_BRANCH: ${{ github.head_ref }}
29 | TARGET_BRANCH: ${{ github.base_ref }}
30 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: publish
3 |
4 | on:
5 | push:
6 | branches:
7 | - alpha
8 | - beta
9 | - main
10 |
11 | jobs:
12 | build:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v3
16 | with:
17 | fetch-depth: 0
18 | persist-credentials: false
19 | token: ${{secrets.gh_token}}
20 | - uses: actions/setup-node@v3
21 | with:
22 | node-version: 16
23 | registry-url: "https://registry.npmjs.org"
24 |
25 | - name: Get cache directory
26 | id: yarn-cache-dir
27 | run: |
28 | echo "::set-output name=dir::$(yarn cache dir)"
29 |
30 | - name: Restoring cache
31 | uses: actions/cache@v3
32 | id: yarn-cache # use this to check for `cache-hit` ==> if: steps.yarn-cache.outputs.cache-hit != 'true'
33 | with:
34 | path: ${{ steps.yarn-cache-dir.outputs.dir }}
35 | key: ${{ runner.os }}-node-${{ hashFiles('**/yarn.lock') }}
36 | restore-keys: |
37 | ${{ runner.os }}-yarn-
38 |
39 | - name: Install Packages
40 | # NOTE: The --ignore-scripts flag is required to prevent leakage of NPM_TOKEN value
41 | # See https://github.com/actions/setup-node/blob/main/docs/advanced-usage.md#use-private-packages
42 | run: yarn install --frozen-lockfile --prefer-offline --ignore-scripts
43 |
44 | - name: Build
45 | run: yarn prepack
46 |
47 | # - name: Test
48 | # run: yarn lint
49 |
50 | - name: Publish
51 | run: npx semantic-release
52 | env:
53 | NODE_ENV: "cicd"
54 | NODE_AUTH_TOKEN: ${{secrets.npm_token}}
55 | GITHUB_TOKEN: ${{secrets.gh_token}}
56 | GIT_AUTHOR_NAME: "autocloud-deploy-bot"
57 | GIT_AUTHOR_EMAIL: "no-reply@autocloud.dev"
58 | GIT_COMMITTER_NAME: "autocloud-deploy-bot"
59 | GIT_COMMITTER_EMAIL: "no-reply@autocloud.dev"
60 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *-debug.log
2 | *-error.log
3 | /.nyc_output
4 | /dist
5 | /lib
6 | /package-lock.json
7 | /tmp
8 | node_modules
9 | .cloud-graphrc.json
10 | aws_*.json
11 | /.vscode
12 | /.yalc
13 | yalc.lock
14 | /src/plugins
15 | /cg
16 | /dgraph
17 | .DS_Store
18 | /coverage
19 | /test/.testConfigDir
20 | /test/.testData
21 |
--------------------------------------------------------------------------------
/.npmignore:
--------------------------------------------------------------------------------
1 | *
2 | !lib/**/*
3 | !package.json
4 | !LICENSE
5 | !AUTHORS
6 | !CHANGELOG.md
7 | !CODE_OF_CONDUCT.md
8 | !CODEOWNERS
9 | !CONTRIBUTING.md
10 | !README.md
11 |
--------------------------------------------------------------------------------
/.prettierrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "semi": false,
3 | "singleQuote": true,
4 | "arrowParens": "avoid"
5 | }
6 |
--------------------------------------------------------------------------------
/.releaserc.yml:
--------------------------------------------------------------------------------
1 | ---
2 | branches:
3 | - name: alpha
4 | channel: alpha
5 | prerelease: true
6 | - name: beta
7 | channel: beta
8 | prerelease: true
9 | - name: main
10 |
11 | plugins:
12 | - "@semantic-release/commit-analyzer"
13 | - "@semantic-release/release-notes-generator"
14 | - - "@semantic-release/changelog"
15 | - changelogFile: CHANGELOG.md
16 | - - "@semantic-release/git"
17 | - assets:
18 | - CHANGELOG.md
19 | - package.json
20 | - - "@semantic-release/npm"
21 | - npmPublish: true
22 | - "@semantic-release/github"
23 | verifyConditions:
24 | - "@semantic-release/changelog"
25 | - "@semantic-release/github"
26 | - "@semantic-release/npm"
27 | prepare:
28 | - "@semantic-release/changelog"
29 | - "@semantic-release/npm"
30 | - - "@semantic-release/git"
31 | - message: "chore(release): ${nextRelease.version} \n\n${nextRelease.notes}"
32 | publish:
33 | - "@semantic-release/github"
34 | - "@semantic-release/npm"
35 | success: false
36 | fail: false
37 | tagFormat: "${version}"
38 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contribution Guidelines
2 |
3 |
4 |
5 |
6 |
7 | - [Paid contributions](#paid-contributions)
8 |
9 | - [Creating A new provider](#creating-a-new-provider)
10 |
11 | - [Adding A new entity to an existing provider](#Adding-a-new-entity-to-an-existing-provider)
12 |
13 | - [Adding new data to an existing entity](#Adding-new-data-to-an-existing-entity)
14 |
15 |
16 |
17 | ## Getting Started
18 |
19 | To setup `CloudGraph` in development mode, first clone the CLI repo.
20 | **TODO:** update to correct url
21 |
22 | ```bash
23 | git clone https://github.com/cloudgraphdev/cli.git
24 | ```
25 |
26 | Next, if you are doing updates to an **existing** provider module, clone that as well. For example `cg-provider-aws`
27 |
28 | ```bash
29 | git clone https://github.com/cloudgraphdev/cloudgraph-provider-aws.git
30 | ```
31 |
32 | `cd` into the provider repo and run the repos build command. For `cg-provider-aws` this would be:
33 |
34 | ```
35 | yarn build
36 | ```
37 |
38 | In order to have the `CLI` pick up changes you have made locally, you must link the two repos. In the provider repo, run:
39 |
40 | ```bash
41 | yarn link
42 | ```
43 |
44 | The output of `yarn link` will tell you what command to run within the CLI repo. For example:
45 |
46 | ```bash
47 | yarn link cg-provider-aws
48 | ```
49 |
50 | Next, make your changes within the provider repo and run `yarn build` again. And that's it! Now the `CLI` will pick up your changes when it pulls in the provider client.
51 |
52 | ## Creating A New Provider
53 |
54 | To create a new provider, you must create a new NPM module that is publicly available within the NPM registry and conforms to the naming convention `@${yourOrgName}/cg-provider-${providerName}`. For example `@myOrg/cg-provider-pivotal`. The module must export a client for your provider that extends the `Client` class found in `@cloudgraph/sdk` shown below and defines the functions `configure`, `getSchema`, and `getData` . We will describe what each function should do below.
55 |
56 | ```
57 | export default abstract class Provider {
58 | constructor(config: any) {
59 | this.logger = config.logger
60 | this.config = config.provider
61 | }
62 |
63 | interface = inquirer
64 |
65 | logger: Logger
66 |
67 | config: any
68 |
69 |
70 | async configure(flags: any): Promise {
71 | throw new Error('Function configure has not been defined')
72 | }
73 |
74 | getSchema(): string {
75 | throw new Error('Function getSchema has not been defined')
76 | }
77 |
78 | async getData({ opts }: { opts: Opts }): Promise {
79 | throw new Error('Function getData has not been defined')
80 | }
81 | }
82 | ```
83 |
84 | ### Configure
85 |
86 | The `configure` function is called by `@cloudgraph/cli` in the `INIT` command to allow each provider to control its own configuration. This configuration will then be passed to the provider client's `constructor` as `config.provider`. The provider client must call `super(config)` within its `constructor` to allow the `@cloudgraph/sdk` client to set the `this.config` which can then be consumed within the provider. The `configure` function should return an `Object` containing all the properties and values the provider wants to allow the end user to set. Here is an example configuration for `aws`
87 |
88 | ```
89 | {
90 | "regions": "us-east-1,us-east-2,us-west-1",
91 | "resources": "alb,lambda,ebs"
92 | }
93 | ```
94 |
95 | You may prompt the user to enter values using `this.interface` which is an instance of `Inquirer.js` https://github.com/SBoudrias/Inquirer.js
96 |
97 | ### getSchema
98 |
99 | The `getSchema` function should return the stringified GraphQL schema that will be used by your provider. You can add any valid [Dgraph directive](https://dgraph.io/docs/graphql/directives/) to your schema in order to control the results of the schema generated by Dgraph. Below is an example implementation of `getSchema` used in `@cloudgraph/cg-provider-aws`.
100 |
101 | **NOTE**: You will only need to define the GraphQL **types** that describe your schema and Dgraph will automatically generate the queries and mutations to access those types.
102 |
103 | ```
104 | /**
105 | * getSchema is used to get the schema for provider
106 | * @returns A string of graphql sub schemas
107 | */
108 |
109 | getSchema(): string {
110 | const typesArray = loadFilesSync(path.join(__dirname), {
111 | recursive: true,
112 | extensions: ['graphql'],
113 | })
114 |
115 | return print(mergeTypeDefs(typesArray))
116 | }
117 | ```
118 |
119 | ## getData
120 |
121 | The `getData` function is responsible for collecting and returning all the provider data that you would like to be query-able by the end user. `@cloudgraph/cli` creates **nodes** in the graph through the concept of `entities` and **edges** in the graph through the concept of `connections`. `entities` are the provider data objects themselves as described by the defined GraphQL schema for the provider. `connections` are objects that describe how the tool should make connections **between** entities in the provider data. The data structure returned by the `getData` function should match the `ProviderData` interface below:
122 |
123 | **Note**: Please see the [`@cloudgraph/cg-template-provider`](https://github.com/cloudgraphdev/cloudgraph-provider-aws.git) (**TODO**: update with link to actual template) for an example on how to create entities and connections for a provider
124 |
125 | ```
126 | export interface ServiceConnection {
127 | id: string // The id of the entity to make a connection to
128 |
129 | resourceType?: string // [Optional] The name of the connection
130 |
131 | relation?: string // [Optional] The relation beteen the entity and its connection
132 |
133 | field: string // The property on the parent schema this connected entity should be added to
134 | }
135 |
136 |
137 |
138 | export interface Entity {
139 | name: string, // The name of the entity
140 |
141 | mutation: string, // The GraphQL mutation that should be called to push this entity to Dgraph
142 |
143 | /**
144 | * An array of the entity data supplied by the provider
145 | * that matches the GraphQL schema of that entity
146 | * (except for connections)
147 | */
148 | data: any[]
149 | }
150 |
151 |
152 |
153 | export interface ProviderData {
154 | entities: Entity[], // An array of objects matching the Entity interface
155 |
156 | /**
157 | * An object where the keys are the ids of parent entities
158 | * to make connections to and where the values are an array of ServiceConnection
159 | * objects denoting which child entities the parent is connected to.
160 | */
161 | connections: {[key: string]: ServiceConnection[]}
162 | }
163 | ```
164 |
165 | ## Adding a new entity to an existing provider
166 |
167 | To add a new entity (i.e. adding RDS to AWS) to an existing provider, (i.e. `@cloudgraph/cg-provider-aws`), you must create a new GraphQL sub-schema for that entity. This GraphQL schema should define the **type(s)** for the new entity and add any directives wanted. You must then define the functions the provider requires to query, format, and form connections for the new entity. In the case of **officially** supported providers under the `@cloudgraph` org, this would be done by creating the functions defined in the `Service` interface below.
168 |
169 | **NOTE**: community supported providers could handle entities differently, consult with the creators of those providers if the way to add new entities is unclear.
170 |
171 | ```
172 | export interface Service {
173 | /**
174 | * function that formats an entity to match the GraphQL schema for that entity
175 | */
176 | format: ({
177 | service,
178 | region,
179 | account,
180 | }: {
181 | service: any
182 | region: string
183 | account: string
184 | }) => any
185 |
186 | /**
187 | * [Optional] function that returns the connections for an entity
188 | */
189 | getConnections?: ({
190 | service,
191 | region,
192 | account,
193 | data,
194 | }: {
195 | service: any
196 | region: string
197 | account: string
198 | data: any
199 | }) => {[key: string]: ServiceConnection[],
200 |
201 | mutation: string, // GraphQL mutation used to insert this entity into the DB
202 |
203 | /**
204 | * Function to get the RAW entity data from the provider (such as the aws-sdk)
205 | */
206 | getData: ({
207 | regions,
208 | credentials,
209 | opts,
210 | }: {
211 | regions: string
212 | credentials: any
213 | opts: Opts
214 | }) => any
215 | }
216 | ```
217 |
218 | You then must ensure that the `getData` function for the provider client knows about the new entity. In the case of `@cloudgraph/cg-provider-aws` this would be done by updating the `ServiceMap` Object and `services.js` file to include the new entity. For example, if you created a new entity `MyEntity`, you would first update the `services.js` file to include your new entity.
219 |
220 | ```
221 | export default {
222 | alb: 'alb',
223 | cloudwatch: 'cloudwatch',
224 | ebs: 'ebs',
225 |
226 | ...
227 |
228 | myEntity: 'myEntity', // The new entity you are adding
229 |
230 | ...
231 |
232 | subnet: 'subnet',
233 | vpc: 'vpc',
234 | }
235 | ```
236 |
237 | You would then update the `ServiceMap` to point to the new entity's class as seen below:
238 |
239 | ```
240 | export const ServiceMap = {
241 | [services.alb]: ALB,
242 | [services.cloudwatch]: CloudWatch,
243 |
244 | ...
245 |
246 | [services.myEntity]: MyEntity, // The new entity class you have created
247 | [services.vpc]: VPC,
248 |
249 | ...
250 |
251 | [services.ebs]: EBS,
252 | }
253 | ```
254 |
255 | ## Adding new data to an existing entity
256 |
257 | In order to add new data to an existing entity for **Officially** supported providers, you must update the entity's `schema`, `format` function, and `getData` function. Lets say you have an entity called `MyEntity` with the following schema, `getData` and `format`.
258 |
259 | ```
260 | type MyEntity {
261 | id: String!
262 | name: String!
263 | someDataPoint: String
264 | }
265 |
266 | function getData() => {
267 | return {
268 | id: 'fakeId',
269 | name: 'fakeName',
270 | someDataFieldToChange: 'isADataPoint'
271 | }
272 | }
273 |
274 | function format(rawData) => {
275 | return {
276 | id: rawData.id,
277 | name: rawData.name,
278 | someDataPoint: rawData.someDataFieldToChange
279 | }
280 | }
281 | ```
282 |
283 | and you wanted to add a new attribute called `myNewData`. You would update the entity like so:
284 |
285 | ```
286 | type MyEntity {
287 | id: String!
288 | name: String!
289 | someDataPoint: String
290 | myNewData: String // or whatever type the new data is
291 | }
292 |
293 | function getData() => {
294 | return {
295 | id: 'fakeId',
296 | name: 'fakeName',
297 | someDataFieldToChange: 'isADataPoint',
298 | myNewData: 'myNewDataToAdd'
299 | }
300 | }
301 |
302 | function format(rawData) => {
303 | return {
304 | id: rawData.id,
305 | name: rawData.name,
306 | someDataPoint: rawData.someDataFieldToChange,
307 | myNewData: rawData.myNewData
308 | }
309 | }
310 | ```
311 |
312 | And that's it! The CLI will now pick up the new data point and push it to the DB.
313 |
314 | If you have any ideas for how to make this contribution guide more effective or easier to work with please let us know, we would love to hear your feedback.
315 |
316 |
--------------------------------------------------------------------------------
/bin/dev:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | // eslint-disable-next-line @typescript-eslint/no-var-requires
4 | const oclif = require('@oclif/core')
5 |
6 | // eslint-disable-next-line @typescript-eslint/no-var-requires
7 | const path = require('path')
8 |
9 | const project = path.join(__dirname, '..', 'tsconfig.json')
10 |
11 | // In dev mode -> use ts-node and dev plugins
12 | process.env.NODE_ENV = 'development'
13 |
14 | // eslint-disable-next-line @typescript-eslint/no-var-requires
15 | require('ts-node').register({project})
16 |
17 | // In dev mode, always show stack traces
18 | oclif.settings.debug = true;
19 |
20 | // Start the CLI
21 | oclif.run().then(oclif.flush).catch(oclif.Errors.handle)
22 |
--------------------------------------------------------------------------------
/bin/dev.cmd:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | node "%~dp0\dev" %*
4 |
--------------------------------------------------------------------------------
/bin/run:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | // eslint-disable-next-line @typescript-eslint/no-var-requires
4 | require('@oclif/core').run().then(require('@oclif/core/flush')).catch(require('@oclif/core/handle'))
5 |
6 |
--------------------------------------------------------------------------------
/bin/run.cmd:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | node "%~dp0\run" %*
4 |
--------------------------------------------------------------------------------
/docs/images/autoCloud.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudgraphdev/cli/55f50f944d3b9b74e18a82329bcc93ed2265914f/docs/images/autoCloud.png
--------------------------------------------------------------------------------
/docs/images/exampleQueries.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudgraphdev/cli/55f50f944d3b9b74e18a82329bcc93ed2265914f/docs/images/exampleQueries.gif
--------------------------------------------------------------------------------
/docs/images/gqlAltair.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudgraphdev/cli/55f50f944d3b9b74e18a82329bcc93ed2265914f/docs/images/gqlAltair.png
--------------------------------------------------------------------------------
/docs/images/gqlPlayground.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudgraphdev/cli/55f50f944d3b9b74e18a82329bcc93ed2265914f/docs/images/gqlPlayground.png
--------------------------------------------------------------------------------
/docs/images/init.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudgraphdev/cli/55f50f944d3b9b74e18a82329bcc93ed2265914f/docs/images/init.gif
--------------------------------------------------------------------------------
/docs/images/install.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudgraphdev/cli/55f50f944d3b9b74e18a82329bcc93ed2265914f/docs/images/install.gif
--------------------------------------------------------------------------------
/docs/images/launch.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudgraphdev/cli/55f50f944d3b9b74e18a82329bcc93ed2265914f/docs/images/launch.gif
--------------------------------------------------------------------------------
/docs/images/load.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudgraphdev/cli/55f50f944d3b9b74e18a82329bcc93ed2265914f/docs/images/load.png
--------------------------------------------------------------------------------
/docs/images/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudgraphdev/cli/55f50f944d3b9b74e18a82329bcc93ed2265914f/docs/images/logo.png
--------------------------------------------------------------------------------
/docs/images/scan.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudgraphdev/cli/55f50f944d3b9b74e18a82329bcc93ed2265914f/docs/images/scan.gif
--------------------------------------------------------------------------------
/docs/images/voyager.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudgraphdev/cli/55f50f944d3b9b74e18a82329bcc93ed2265914f/docs/images/voyager.png
--------------------------------------------------------------------------------
/examples/examples.txt:
--------------------------------------------------------------------------------
1 | Visit https://docs.cloudgraph.dev/ for examples and documentation
--------------------------------------------------------------------------------
/jest.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | collectCoverage: true,
3 | collectCoverageFrom: ['src/**/*.ts'],
4 | coveragePathIgnorePatterns: [
5 | '/templates/',
6 | '/plugins/',
7 | '/examples/',
8 | '/docs/',
9 | '/coverage/',
10 | '/bin/',
11 | ],
12 | coverageReporters: ['lcov', 'text-summary'],
13 | coverageThreshold: {
14 | global: {
15 | branches: 80,
16 | functions: 80,
17 | lines: 80,
18 | statements: 80,
19 | },
20 | },
21 | moduleFileExtensions: ['ts', 'js', 'json'],
22 | testEnvironment: 'node',
23 | testMatch: ['/test/**/*.test.ts'],
24 | transform: { '\\.ts$': 'ts-jest' },
25 | }
26 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@cloudgraph/cli",
3 | "description": "Scan your cloud infrastructure data and query it with GraphQL",
4 | "version": "0.25.1",
5 | "author": "AutoCloud",
6 | "license": "MPL-2.0",
7 | "publishConfig": {
8 | "registry": "https://registry.npmjs.org/",
9 | "access": "public"
10 | },
11 | "main": "lib/index.js",
12 | "types": "lib/index.d.ts",
13 | "bin": {
14 | "cg": "./bin/run"
15 | },
16 | "bugs": "https://github.com/cloudgraphdev/cli/issues",
17 | "dependencies": {
18 | "@cloudgraph/sdk": "^0.22.0",
19 | "@graphql-tools/load-files": "^6.3.2",
20 | "@graphql-tools/merge": "^8.2.0",
21 | "@oclif/core": "1.6.1",
22 | "@oclif/plugin-help": "^5.1.12",
23 | "@types/lodash": "^4.14.175",
24 | "altair-express-middleware": "^4.0.8",
25 | "axios": "^0.21.1",
26 | "boxen": "^5.0.1",
27 | "cfonts": "^2.9.3",
28 | "chalk": "^4.1.1",
29 | "cli-table": "^0.3.6",
30 | "cosmiconfig": "^7.0.0",
31 | "detect-port": "^1.3.0",
32 | "express": "^4.17.1",
33 | "glob": "^7.1.7",
34 | "graphql": "^15.6.1",
35 | "graphql-playground-middleware-express": "^1.7.22",
36 | "graphql-tools": "^8.2.0",
37 | "inquirer": "^8.1.1",
38 | "jsonpath": "^1.1.1",
39 | "lodash": "^4.17.21",
40 | "npm": "^8.1.2",
41 | "oclif": "2.6.0",
42 | "open": "^8.2.1",
43 | "semver": "^7.3.5",
44 | "tslib": "^1"
45 | },
46 | "devDependencies": {
47 | "@autocloud/eslint-config": "^0.1.0",
48 | "@oclif/test": "^2.1.0",
49 | "@semantic-release/changelog": "^6.0.1",
50 | "@semantic-release/git": "^10.0.1",
51 | "@semantic-release/github": "^8.0.1",
52 | "@semantic-release/npm": "^9.0.1",
53 | "@types/chai": "^4",
54 | "@types/cli-table": "^0.3.0",
55 | "@types/detect-port": "^1.3.1",
56 | "@types/express": "^4.17.13",
57 | "@types/inquirer": "^7.3.2",
58 | "@types/jest": "^27.0.1",
59 | "@types/jsonpath": "^0.2.0",
60 | "@types/node": "^14",
61 | "@types/npm": "^7.19.0",
62 | "@types/pino": "^6.3.8",
63 | "@types/semver": "^7.3.9",
64 | "@typescript-eslint/eslint-plugin": "^4.28.5",
65 | "@typescript-eslint/parser": "^4.28.5",
66 | "aws-sdk": "^2.1060.0",
67 | "chai": "^4.3.4",
68 | "cpx": "^1.5.0",
69 | "cross-env": "^7.0.3",
70 | "eslint-config-airbnb-base": "14.2.1",
71 | "eslint-config-prettier": "^6.11.0",
72 | "eslint-plugin-import": "^2.22.1",
73 | "eslint-plugin-prettier": "^3.4.0",
74 | "globby": "^10",
75 | "husky": "^4.3.0",
76 | "jest": "^27.1.0",
77 | "jest-diff": "^27.1.0",
78 | "lint-staged": "^11.1.1",
79 | "mkdirp": "^1.0.4",
80 | "rimraf": "^3.0.2",
81 | "semantic-release": "^19.0.2",
82 | "ts-jest": "^27.0.5",
83 | "ts-node": "^10.2.1",
84 | "typescript": "4.3.5"
85 | },
86 | "engines": {
87 | "node": ">=16.0.0"
88 | },
89 | "files": [
90 | "/bin",
91 | "/lib",
92 | "/npm-shrinkwrap.json",
93 | "/oclif.manifest.json"
94 | ],
95 | "homepage": "https://www.cloudgraph.dev/",
96 | "keywords": [
97 | "cloudgraph"
98 | ],
99 | "oclif": {
100 | "commands": "./lib/commands",
101 | "bin": "cg",
102 | "dirname": "cloudgraph",
103 | "plugins": [
104 | "@oclif/plugin-help"
105 | ],
106 | "topicSeparator": " ",
107 | "additionalHelpFlags": [
108 | "-h"
109 | ],
110 | "additionalVersionFlags": [
111 | "-v"
112 | ],
113 | "update": {
114 | "s3": {
115 | "bucket": "cloudgraph-production-cli-assets"
116 | },
117 | "node": {
118 | "version": "16.0.0"
119 | }
120 | }
121 | },
122 | "repository": "github:cloudgraphdev/cli",
123 | "scripts": {
124 | "build": "yarn prepack && oclif pack tarballs -t linux-x64,linux-arm,darwin-x64,darwin-arm64 && yarn postpack",
125 | "launch": "./bin/run launch",
126 | "postpack": "rm -f oclif.manifest.json",
127 | "posttest": "eslint . --ext .ts --config .eslintrc.json",
128 | "prepack": "rm -rf lib && npx tsc -b --force && oclif manifest && oclif readme && yarn cpx 'src/scripts/*' lib/scripts",
129 | "run:init": "cross-env NODE_ENV=development ./bin/dev init",
130 | "run:init:aws": "cross-env NODE_ENV=development ./bin/dev init aws",
131 | "run:load": "cross-env NODE_ENV=development ./bin/dev load",
132 | "run:load:aws": "cross-env NODE_ENV=development ./bin/dev load aws",
133 | "run:scan": "cross-env NODE_ENV=development ./bin/dev scan",
134 | "run:scan:aws": "cross-env NODE_ENV=development ./bin/dev scan aws",
135 | "run:launch": "cross-env NODE_ENV=development ./bin/dev launch",
136 | "run:teardown": "cross-env NODE_ENV=development ./bin/dev teardown",
137 | "run:update": "cross-env NODE_ENV=test ./bin/dev update",
138 | "run:provider": "cross-env NODE_ENV=test ./bin/dev provider",
139 | "run:policy": "cross-env NODE_ENV=test ./bin/dev policy",
140 | "test": "cross-env CG_DEBUG=-1 NODE_ENV=test jest --coverage=true --detectOpenHandles --runInBand --forceExit",
141 | "test:debug": "cross-env CG_DEBUG=5 NODE_ENV=test jest --coverage=false --detectOpenHandles --runInBand --forceExit",
142 | "version": "oclif readme && git add README.md",
143 | "lint": "eslint --config .eslintrc.json --ext .js,.ts ./",
144 | "lint:fix": "eslint --fix --config .eslintrc.json --ext .js,.ts ./",
145 | "homebrew": "node release/scripts/homebrew.js"
146 | },
147 | "husky": {
148 | "hooks": {
149 | "pre-commit": "yarn lint-staged"
150 | }
151 | },
152 | "lint-staged": {
153 | "*.{ts,graphql,json}": [
154 | "yarn lint:fix",
155 | "git add --force"
156 | ]
157 | }
158 | }
159 |
--------------------------------------------------------------------------------
/release/scripts/homebrew.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | const fs = require('fs')
4 | const execa = require('execa')
5 | const https = require('https')
6 | const path = require('path')
7 | const rm = require('rimraf')
8 | const mkdirp = require('mkdirp')
9 | const { promisify } = require('util')
10 | const { pipeline } = require('stream')
11 | const crypto = require('crypto')
12 | const AWS = require('aws-sdk')
13 |
14 | const NODE_JS_BASE = 'https://nodejs.org/download/release'
15 | const CLI_DIR = path.join(__dirname, '..', '..')
16 | const DIST_DIR = path.join(CLI_DIR, 'dist')
17 | const PJSON = require(path.join(CLI_DIR, 'package.json'))
18 | const NODE_VERSION = PJSON.oclif.update.node.version
19 | const SHORT_VERSION = PJSON.version
20 | async function getText(url) {
21 | return new Promise((resolve, reject) => {
22 | https
23 | .get(url, res => {
24 | let buffer = []
25 |
26 | res.on('data', buf => {
27 | buffer.push(buf)
28 | })
29 |
30 | res.on('close', () => {
31 | resolve(Buffer.concat(buffer).toString('utf-8'))
32 | })
33 | })
34 | .on('error', reject)
35 | })
36 | }
37 |
38 | async function getDownloadInfoForNodeVersion(version) {
39 | // https://nodejs.org/download/release/v12.21.0/SHASUMS256.txt
40 | const url = `${NODE_JS_BASE}/v${version}/SHASUMS256.txt`
41 | const shasums = await getText(url)
42 | const shasumLine = shasums.split('\n').find(line => {
43 | return line.includes(`node-v${version}-darwin-x64.tar.xz`)
44 | })
45 |
46 | if (!shasumLine) {
47 | throw new Error(`could not find matching shasum for ${version}`)
48 | }
49 |
50 | const [shasum, filename] = shasumLine.trim().split(/\s+/)
51 | return {
52 | url: `${NODE_JS_BASE}/v${version}/${filename}`,
53 | sha256: shasum,
54 | }
55 | }
56 |
57 | async function calculateSHA256(fileName) {
58 | const hash = crypto.createHash('sha256')
59 | hash.setEncoding('hex')
60 | await promisify(pipeline)(fs.createReadStream(fileName), hash)
61 | return hash.read()
62 | }
63 |
64 | async function uploadToS3(file) {
65 | console.log(`Uploading ${file} to S3`)
66 | await new Promise((resolve, reject) => {
67 | const pathToFile = path.join(DIST_DIR, file)
68 | const fileStream = fs.createReadStream(pathToFile)
69 | fileStream.on('error', err => {
70 | if (err) {
71 | reject(err)
72 | }
73 | })
74 | fileStream.on('open', () => {
75 | const credentials = new AWS.SharedIniFileCredentials({
76 | profile: 'cloudgraph-iac',
77 | callback: err => {
78 | if (err) {
79 | console.log('No credentials found for profile cloudgraph-iac')
80 | console.log(err)
81 | }
82 | },
83 | })
84 | sts = new AWS.STS()
85 | const { roleArn } = credentials
86 | const options = {
87 | RoleSessionName: 'CloudGraph-IAC',
88 | RoleArn: roleArn,
89 | }
90 | console.log(options)
91 | sts.assumeRole(options, (err, data) => {
92 | if (err) {
93 | console.log(`No valid credentials found for roleARN: ${roleArn}`)
94 | console.log(err)
95 | reject(err)
96 | } else {
97 | // successful response
98 | console.log('successfully got access keys from role')
99 | const {
100 | AccessKeyId: accessKeyId,
101 | SecretAccessKey: secretAccessKey,
102 | SessionToken: sessionToken,
103 | } = data.Credentials
104 | const creds = {
105 | accessKeyId,
106 | secretAccessKey,
107 | sessionToken,
108 | }
109 | const S3 = new AWS.S3({ credentials: creds })
110 | S3.putObject(
111 | {
112 | Bucket: PJSON.oclif.update.s3.bucket,
113 | Key: `cg-v${SHORT_VERSION}/${file}`,
114 | Body: fileStream,
115 | ServerSideEncryption: 'AES256',
116 | ACL: 'bucket-owner-full-control',
117 | },
118 | err => {
119 | if (err) {
120 | reject(err)
121 | }
122 | }
123 | )
124 | resolve()
125 | }
126 | })
127 | })
128 | })
129 | }
130 |
131 | function getFilesByOS(os) {
132 | const files = fs.readdirSync(DIST_DIR)
133 | return files.filter(file => file.includes(os) && !file.includes('.xz'))
134 | }
135 |
136 | const ROOT = path.join(__dirname, '..')
137 | const TEMPLATES = path.join(ROOT, 'templates')
138 |
139 | const CLI_ASSETS_URL =
140 | process.env.CLI_ASSETS_URL || 'https://cli-assets.cloudgraph.dev'
141 |
142 | async function updateCgFormula(brewDir) {
143 | const templatePath = path.join(TEMPLATES, 'cg.rb')
144 | const template = fs.readFileSync(templatePath).toString('utf-8')
145 | const files = getFilesByOS('darwin-x64')
146 | const zipFile = files.find(file => file.includes('tar.gz'))
147 | const pathToFile = path.join(DIST_DIR, zipFile)
148 | const sha256 = await calculateSHA256(pathToFile)
149 | const url = `${CLI_ASSETS_URL}/cg-v${SHORT_VERSION}/${zipFile}`
150 |
151 | const templateReplaced = template
152 | .replace('__VERSION__', SHORT_VERSION)
153 | .replace('__CLI_DOWNLOAD_URL__', url)
154 | .replace('__TARBALL_HASH__', sha256)
155 | .replace('__NODE_VERSION__', NODE_VERSION)
156 |
157 | fs.writeFileSync(path.join(brewDir, 'cg.rb'), templateReplaced)
158 | if (process.env.WRITE_TO_S3 === undefined) {
159 | files.forEach(async file => {
160 | await uploadToS3(file)
161 | })
162 | }
163 | }
164 |
165 | async function updateCgNodeFormula(brewDir) {
166 | const formulaPath = path.join(brewDir, 'cg-node.rb')
167 |
168 | console.log(`updating CloudGraph-node Formula in ${formulaPath}`)
169 | console.log(`getting SHA and URL for Node.js version ${NODE_VERSION}`)
170 |
171 | const { url, sha256 } = await getDownloadInfoForNodeVersion(NODE_VERSION)
172 |
173 | console.log(`done getting SHA for Node.js version ${NODE_VERSION}: ${sha256}`)
174 | console.log(`done getting URL for Node.js version ${NODE_VERSION}: ${url}`)
175 |
176 | const templatePath = path.join(TEMPLATES, 'cg-node.rb')
177 | const template = fs.readFileSync(templatePath).toString('utf-8')
178 |
179 | const templateReplaced = template
180 | .replace('__NODE_BIN_URL__', url)
181 | .replace('__NODE_SHA256__', sha256)
182 | .replace('__NODE_VERSION__', NODE_VERSION)
183 |
184 | fs.writeFileSync(formulaPath, templateReplaced)
185 | console.log(`done updating cg-node Formula in ${formulaPath}`)
186 | }
187 |
188 | async function updateHomebrew() {
189 | const tmp = path.join(__dirname, 'tmp')
190 | const homebrewDir = path.join(tmp, 'homebrew-tap')
191 | mkdirp.sync(tmp)
192 | rm.sync(homebrewDir)
193 |
194 | console.log(
195 | `cloning https://github.com/cloudgraphdev/homebrew-tap to ${homebrewDir}`
196 | )
197 | await execa('git', [
198 | 'clone',
199 | 'git@github.com:cloudgraphdev/homebrew-tap.git',
200 | homebrewDir,
201 | ])
202 | console.log(`done cloning cloudgraphdev/homebrew-tap to ${homebrewDir}`)
203 |
204 | console.log('updating local git...')
205 | await updateCgNodeFormula(homebrewDir)
206 | await updateCgFormula(homebrewDir).catch((err) => { throw new Error(err) })
207 |
208 | // run in git in cloned cloudgraph/homebrew-tap git directory
209 | const git = async (args, opts = {}) => {
210 | await execa('git', ['-C', homebrewDir, ...args], opts)
211 | }
212 | try {
213 | await git(['add', '.'])
214 | await git(['config', '--local', 'core.pager', 'cat'])
215 | await git(['diff', '--cached'], { stdio: 'inherit' })
216 | await git(['commit', '-m', `CloudGraph v${SHORT_VERSION}`])
217 | if (process.env.SKIP_GIT_PUSH === undefined) {
218 | await git(['push', 'origin', 'main'])
219 | }
220 | } catch (e) {
221 | console.log('Error attempting to update git repo')
222 | console.log(e)
223 | }
224 | }
225 |
226 | updateHomebrew().catch(err => {
227 | console.error(`error running scripts/release/homebrew.js`, err)
228 | process.exit(1)
229 | })
230 |
--------------------------------------------------------------------------------
/release/templates/cg-node.rb:
--------------------------------------------------------------------------------
1 | class CgNode < Formula
2 | desc "node.js dependency for CloudGraph"
3 | homepage "https://cloudgraph.dev"
4 | url "__NODE_BIN_URL__"
5 | version "__NODE_VERSION__"
6 | sha256 "__NODE_SHA256__"
7 | keg_only "cg-node is only used by CloudGraph CLI (cloudgraphdev/tap/cli), which explicitly requires from Cellar"
8 |
9 | def install
10 | bin.install buildpath/"bin/node"
11 | end
12 |
13 | def test
14 | output = system bin/"node", "version"
15 | assert output.strip == "v#{version}"
16 | end
17 | end
--------------------------------------------------------------------------------
/release/templates/cg.rb:
--------------------------------------------------------------------------------
1 | # This file is automatically generated by https://github.com/heroku/cli/blob/master/scripts/release/homebrew.js
2 | # Do not update this file directly;
3 | # Please update the template instead:
4 | # https://github.com/heroku/cli/blob/master/scripts/release/homebrew/templates/heroku.rb
5 | class Cg < Formula
6 | desc "Query your cloud and SaaS data with GraphQL"
7 | homepage "https://cloudgraph.dev"
8 | version "__VERSION__"
9 | url "__CLI_DOWNLOAD_URL__"
10 | sha256 "__TARBALL_HASH__"
11 |
12 | def install
13 | inreplace "bin/cg", /^CLIENT_HOME=/, "export CG_OCLIF_CLIENT_HOME=#{lib/"client"}\nCLIENT_HOME="
14 | libexec.install Dir["**"]
15 | bin.install_symlink libexec/"bin/cg"
16 |
17 | # bash_completion.install libexec/"node_modules/@heroku-cli/plugin-autocomplete/autocomplete/brew/bash" => "heroku"
18 | # zsh_completion.install libexec/"node_modules/@heroku-cli/plugin-autocomplete/autocomplete/brew/zsh/_heroku"
19 | end
20 |
21 | # def caveats; <<~EOS
22 | # To use the Heroku CLI's autocomplete --
23 | # Via homebrew's shell completion:
24 | # 1) Follow homebrew's install instructions https://docs.brew.sh/Shell-Completion
25 | # NOTE: For zsh, as the instructions mention, be sure compinit is autoloaded
26 | # and called, either explicitly or via a framework like oh-my-zsh.
27 | # 2) Then run
28 | # $ heroku autocomplete --refresh-cache
29 | # OR
30 | # Use our standalone setup:
31 | # 1) Run and follow the install steps:
32 | # $ heroku autocomplete
33 | # EOS
34 | # end
35 | # end 3
36 |
37 | test do
38 | system bin/"cg", "version"
39 | end
40 | end
41 |
--------------------------------------------------------------------------------
/src/commands/init.ts:
--------------------------------------------------------------------------------
1 | import {
2 | ConfiguredPlugin,
3 | StorageEngineConnectionConfig,
4 | } from '@cloudgraph/sdk'
5 | import { Flags as CommandFlags } from '@oclif/core'
6 | import fs from 'fs'
7 | import path from 'path'
8 | import chalk from 'chalk'
9 | import { fileUtils, getStorageEngineConnectionConfig } from '../utils'
10 |
11 | import Command from './base'
12 | import { CloudGraphConfig } from '../types'
13 | import {
14 | dGraphConfigQuestions,
15 | getProviderQuestion,
16 | overwriteQuestionPrompt,
17 | queryEngineConfigQuestions,
18 | } from '../utils/questions'
19 |
20 | export default class Init extends Command {
21 | static description = 'Set initial configuration for providers'
22 |
23 | static examples = [
24 | '$ cg init',
25 | '$ cg init aws [Initialize AWS provider]',
26 | '$ cg init aws -r [Specify resources to crawl]',
27 | ]
28 |
29 | static flags = {
30 | ...Command.flags,
31 | // select resources flag
32 | resources: CommandFlags.boolean({ char: 'r' }),
33 | }
34 |
35 | static hidden = false
36 |
37 | static strict = false
38 |
39 | static args = Command.args
40 |
41 | async getProvider(): Promise {
42 | const { provider } = await this.interface.prompt(getProviderQuestion)
43 | this.logger.debug(provider)
44 | return provider
45 | }
46 |
47 | async promptForConfigOverwrite(
48 | category: string
49 | ): Promise> {
50 | return this.interface.prompt(overwriteQuestionPrompt(category))
51 | }
52 |
53 | /**
54 | * Searchs in the config object for the provider to see if its already been configured,
55 | * asks for config details if not found or if the user chooses to overwrite the existent config
56 | */
57 | async checkProviderConfig(provider: string, client: any): Promise {
58 | const config = this.getCGConfig(provider)
59 | if (config) {
60 | this.logger.info(`Config for ${provider} already exists`)
61 | const { overwrite } = await this.promptForConfigOverwrite(`${provider}'s`)
62 | if (overwrite) {
63 | return client.configure()
64 | }
65 | this.logger.warn(`Init command for ${provider} aborted`)
66 | return config
67 | }
68 | return client.configure()
69 | }
70 |
71 | getPluginConfig(): {
72 | plugins: { [pluginType: string]: ConfiguredPlugin[] }
73 | } {
74 | const plugins = this.getCGConfigKey('plugins')
75 | return {
76 | plugins: plugins ?? {},
77 | }
78 | }
79 |
80 | async askForDGraphConfig(overwrite = false): Promise<{
81 | versionLimit: string
82 | storageConfig: StorageEngineConnectionConfig
83 | }> {
84 | let {
85 | flags: { dgraph, 'version-limit': versionLimit = '10' },
86 | } = await this.parse(Init)
87 | if (!dgraph && overwrite) {
88 | const { receivedUrl, vLimit } = await this.interface.prompt(
89 | dGraphConfigQuestions
90 | )
91 | dgraph = receivedUrl
92 | versionLimit = vLimit
93 | }
94 | this.logger.info(
95 | "Note that none of your cloud's information is ever sent to or stored by CloudGraph or third parties"
96 | )
97 | if (!overwrite) {
98 | return {
99 | storageConfig: this.getCGConfigKey('storageConfig'),
100 | versionLimit: this.getCGConfigKey('versionLimit'),
101 | }
102 | }
103 | return {
104 | storageConfig: getStorageEngineConnectionConfig(dgraph),
105 | versionLimit: versionLimit ?? '10',
106 | }
107 | }
108 |
109 | async askForQueryEngineConfig(overwrite = false): Promise<{
110 | queryEngine: string
111 | port: number | string
112 | }> {
113 | const {
114 | flags: { port = '5555' },
115 | } = await this.parse(Init)
116 | let {
117 | flags: { 'query-engine': queryEngine },
118 | } = await this.parse(Init)
119 | if (!queryEngine && overwrite) {
120 | const { inputQueryEngine } = await this.interface.prompt(
121 | queryEngineConfigQuestions
122 | )
123 | queryEngine = inputQueryEngine
124 | }
125 | if (!overwrite) {
126 | return {
127 | queryEngine: this.getCGConfigKey('queryEngine'),
128 | port: this.getCGConfigKey('port'),
129 | }
130 | }
131 | return { queryEngine: queryEngine ?? 'playground', port }
132 | }
133 |
134 | async getCloudGraphConfig(overwrite = false): Promise {
135 | return {
136 | ...this.getPluginConfig(),
137 | ...(await this.askForDGraphConfig(overwrite)),
138 | ...(await this.askForQueryEngineConfig(overwrite)),
139 | }
140 | }
141 |
142 | /**
143 | * Searchs in the config object for the CloudGraph config to see if is already configured,
144 | * asks for config details if not found or if the user chooses to overwrite the existent config
145 | */
146 | async fetchCloudGraphConfig(): Promise {
147 | const cloudGraphConfig = this.getCGConfig('cloudGraph')
148 | if (cloudGraphConfig) {
149 | this.logger.info('CloudGraph config found...')
150 | const { overwrite } = await this.promptForConfigOverwrite('CloudGraph')
151 | return this.getCloudGraphConfig(overwrite)
152 | }
153 | return this.getCloudGraphConfig(true)
154 | }
155 |
156 | /**
157 | * Ensures that the configuration path exists and saves the CloudGraph json config file in it
158 | */
159 | saveCloudGraphConfigFile(configResult: CloudGraphConfig): void {
160 | const { configDir } = this.config
161 | const previousConfig = this.getCGConfig()
162 | const newConfig = configResult
163 | if (previousConfig) {
164 | for (const key of Object.keys(previousConfig)) {
165 | if (!configResult[key]) {
166 | newConfig[key] = previousConfig[key]
167 | }
168 | }
169 | } else {
170 | fileUtils.makeDirIfNotExists(configDir)
171 | }
172 | fs.writeFileSync(
173 | path.join(configDir, '.cloud-graphrc.json'),
174 | JSON.stringify(newConfig, null, 2)
175 | )
176 | }
177 |
178 | async run(): Promise {
179 | const { argv } = await this.parse(Init)
180 | const { configDir, dataDir } = this.config
181 | // const opts: Opts = {logger: this.logger, debug, devMode}
182 | // First determine the provider if one has not been passed in args
183 | // if no provider is passed, they can select from a list of offically supported providers
184 | let allProviders: string[] = argv
185 | if (allProviders.length === 0) {
186 | allProviders = [await this.getProvider()]
187 | }
188 | const configResult: { [key: string]: Record } = {}
189 | for (const provider of allProviders) {
190 | /**
191 | * setup base config for provider
192 | */
193 | configResult[provider] = {}
194 | /**
195 | * First install and require the provider plugin
196 | */
197 | const { client } = await this.getProviderClient(provider)
198 | if (!client) {
199 | this.logger.warn(
200 | `There was an issue initializing ${provider} plugin, skipping...`
201 | )
202 | continue // eslint-disable-line no-continue
203 | }
204 | configResult[provider] = await this.checkProviderConfig(provider, client)
205 | this.logger.debug(
206 | `${provider} provider config: ${JSON.stringify(configResult[provider])}`
207 | )
208 | }
209 | const cloudGraphConfig = await this.fetchCloudGraphConfig()
210 | if (cloudGraphConfig) {
211 | configResult.cloudGraph = cloudGraphConfig
212 | }
213 | this.saveCloudGraphConfigFile(configResult)
214 | this.logger.success(
215 | `Your config has been successfully stored at ${chalk.italic.green(
216 | path.join(configDir, '.cloud-graphrc.json')
217 | )}`
218 | )
219 | this.logger.success(
220 | `Your data will be stored at ${chalk.italic.green(
221 | path.join(dataDir, this.versionDirectory)
222 | )}`
223 | )
224 | }
225 | }
226 |
--------------------------------------------------------------------------------
/src/commands/launch.ts:
--------------------------------------------------------------------------------
1 | import chalk from 'chalk'
2 | import path from 'path'
3 |
4 | import Command from './base'
5 | import {
6 | sleep,
7 | fileUtils,
8 | getDefaultStorageEngineConnectionConfig,
9 | execCommand,
10 | findExistingDGraphContainerId,
11 | } from '../utils'
12 | import DgraphEngine from '../storage/dgraph'
13 | import { DGRAPH_CONTAINER_LABEL, DGRAPH_DOCKER_IMAGE_NAME } from '../utils/constants'
14 |
15 | export default class Launch extends Command {
16 | static description = 'Launch an instance of Dgraph to store data'
17 |
18 | static examples = ['$ cg launch']
19 |
20 | static strict = false
21 |
22 | static flags = {
23 | ...Command.flags,
24 | }
25 |
26 | static hidden = false
27 |
28 | static args = Command.args
29 |
30 | createDgraphFolder(): void {
31 | const { dataDir } = this.config
32 | fileUtils.makeDirIfNotExists(path.join(dataDir, '/dgraph'))
33 | }
34 |
35 | async checkForDockerInstallation(): Promise {
36 | await execCommand('docker -v')
37 | }
38 |
39 | async pullDGraphDockerImage(): Promise {
40 | await execCommand(`docker pull ${DGRAPH_DOCKER_IMAGE_NAME}`)
41 | }
42 |
43 | async startDGraphContainer(
44 | containerId?: string
45 | ): Promise {
46 | const { dataDir } = this.config
47 | let output: undefined | unknown
48 | if (containerId) {
49 | output = await execCommand(`docker container start ${containerId}`)
50 | } else {
51 | const {
52 | connectionConfig: {
53 | port = getDefaultStorageEngineConnectionConfig().port,
54 | },
55 | } = await this.getStorageEngine() as DgraphEngine
56 | output = await execCommand(
57 | `docker run -d -p 8995:5080 -p 8996:6080 -p ${port}:8080 -p 8998:9080 -p 8999:8000 --label ${
58 | DGRAPH_CONTAINER_LABEL
59 | } -v ${dataDir}/dgraph:/dgraph --name dgraph ${DGRAPH_DOCKER_IMAGE_NAME}`
60 | )
61 | }
62 | return output
63 | }
64 |
65 | // eslint-disable-next-line no-warning-comments
66 | // TODO: convert this func to handle any storage provider
67 | async run() : Promise {
68 | // const {flags: {debug, dev: devMode}} = this.parse(Launch)
69 | // eslint-disable-next-line no-warning-comments
70 | // TODO: not a huge fan of this pattern, rework how to do debug and devmode tasks (specifically how to use in providers)
71 | // const opts: Opts = {logger: this.logger, debug, devMode}
72 | this.logger.startSpinner('Checking for Docker')
73 | try {
74 | await this.checkForDockerInstallation()
75 | this.logger.successSpinner('Docker found')
76 | } catch (error: any) {
77 | this.logger.failSpinner(
78 | 'It appears Docker is not installed, please install it at: https://docs.docker.com/get-docker/'
79 | // { level: 'error' }
80 | )
81 | this.logger.error(error)
82 | this.exit()
83 | }
84 |
85 | this.logger.startSpinner(
86 | 'Checking for an existing Dgraph docker instance'
87 | )
88 | let runningContainerId
89 | try {
90 | const containerId = await findExistingDGraphContainerId('running')
91 | if (containerId) {
92 | runningContainerId = containerId
93 | }
94 | } catch (error: any) {
95 | this.logger.error(error)
96 | }
97 |
98 | let exitedContainerId
99 | if (!runningContainerId) {
100 | try {
101 | const containerId = await findExistingDGraphContainerId('exited')
102 | if (containerId) {
103 | exitedContainerId = containerId
104 | this.logger.successSpinner('Reusable container found!')
105 | }
106 | } catch (error: any) {
107 | this.logger.error(error)
108 | }
109 | }
110 |
111 | if (!exitedContainerId && !runningContainerId) {
112 | this.logger.successSpinner('No reusable instances found')
113 | this.logger.startSpinner(
114 | 'pulling Dgraph Docker image'
115 | )
116 | try {
117 | this.createDgraphFolder()
118 | await this.pullDGraphDockerImage()
119 | this.logger.successSpinner('Pulled Dgraph Docker image')
120 | } catch (error: any) {
121 | this.logger.failSpinner(
122 | 'Failed pulling Dgraph Docker image please check your docker installation'
123 | // { level: 'error' }
124 | )
125 | this.logger.error(error)
126 | }
127 | }
128 |
129 | if (runningContainerId) {
130 | this.logger.successSpinner('Reusable container found')
131 | } else {
132 | this.logger.startSpinner(
133 | `Spinning up ${exitedContainerId ? 'existing' : 'new'} Dgraph instance`
134 | )
135 | try {
136 | if (exitedContainerId) {
137 | await this.startDGraphContainer(exitedContainerId)
138 | } else {
139 | await this.startDGraphContainer()
140 | }
141 | this.logger.successSpinner('Dgraph instance running')
142 | } catch (error: any) {
143 | this.logger.failSpinner('Failed starting Dgraph instance')
144 | this.logger.error(error)
145 | throw new Error(
146 | 'Dgraph was unable to start: Failed starting stopped Dgraph instance'
147 | )
148 | }
149 | }
150 |
151 | await this.checkIfInstanceIsRunningReportStatus()
152 | }
153 |
154 | async checkIfInstanceIsRunningReportStatus(): Promise {
155 | this.logger.startSpinner(
156 | 'Running health check on Dgraph'
157 | )
158 | // eslint-disable-next-line no-warning-comments
159 | // TODO: smaller sleep time and exponential backoff for ~5 tries
160 | await sleep(10000)
161 | try {
162 | const storageEngine = await this.getStorageEngine()
163 | const running = await storageEngine.healthCheck(false)
164 | if (running) {
165 | this.logger.successSpinner('Dgraph health check passed')
166 | } else {
167 | throw new Error('Dgraph was unable to start: Dgraph not running')
168 | }
169 | } catch (error: any) {
170 | this.logger.debug(error)
171 | throw new Error('Dgraph was unable to start: Failed running health check')
172 | }
173 | this.logger.success(
174 | `Access your dgraph instance at ${chalk.underline.green(
175 | this.getHost((await this.getStorageEngine() as DgraphEngine).connectionConfig)
176 | )}`
177 | )
178 | this.logger.info(
179 | `For more information on dgraph, see the dgraph docs at: ${chalk.underline.green(
180 | 'https://dgraph.io/docs/graphql/'
181 | )}`
182 | )
183 | }
184 | }
185 |
--------------------------------------------------------------------------------
/src/commands/load.ts:
--------------------------------------------------------------------------------
1 | import { getSchemaFromFolder } from '@cloudgraph/sdk'
2 | import chalk from 'chalk'
3 | import fs from 'fs'
4 | import path from 'path'
5 | import { isEmpty } from 'lodash'
6 |
7 | import Command from './base'
8 | import { fileUtils } from '../utils'
9 | import { loadAllData } from '../utils/data'
10 |
11 | export default class Load extends Command {
12 | static description = 'Load a specific version of your CloudGraph data'
13 |
14 | static examples = [
15 | '$ cg load [Load data for all providers configured]',
16 | '$ cg load aws [Load data for AWS]',
17 | ]
18 |
19 | static strict = false
20 |
21 | static flags = {
22 | ...Command.flags,
23 | }
24 |
25 | static hidden = false
26 |
27 | static args = Command.args
28 |
29 | async run(): Promise {
30 | const {
31 | argv,
32 | // flags: { debug, dev: devMode },
33 | } = await this.parse(Load)
34 | const { dataDir } = this.config
35 | const storageEngine = await this.getStorageEngine()
36 | const storageRunning = await storageEngine.healthCheck()
37 | if (!storageRunning) {
38 | this.logger.error(
39 | `Storage engine check at ${storageEngine.host} FAILED canceling LOAD`
40 | )
41 | this.exit()
42 | }
43 | // const opts: Opts = { logger: this.logger, debug, devMode }
44 | let allProviders = argv
45 | // if (!provider) {
46 | // provider = await this.getProvider()
47 | // }
48 |
49 | /**
50 | * Handle 2 methods of scanning, either for explicitly passed providers OR
51 | * try to scan for all providers found within the config file
52 | * if we still have 0 providers, fail and exit.
53 | */
54 | if (allProviders.length >= 1) {
55 | this.logger.info(
56 | `Loading data to Dgraph for providers: ${allProviders.join(' | ')}`
57 | )
58 | } else {
59 | this.logger.info('Searching config for initialized providers')
60 | const config = this.getCGConfig()
61 | allProviders = Object.keys(config).filter(
62 | (val: string) => val !== 'cloudGraph'
63 | )
64 | // TODO: keep this log?
65 | this.logger.info(
66 | `Found providers ${allProviders.join(' | ')} in cloud-graph config`
67 | )
68 | if (allProviders.length === 0) {
69 | this.logger.error(
70 | 'Error, there are no providers configured and none were passed to load, try "cg init" to set some up!'
71 | )
72 | this.exit()
73 | }
74 | }
75 |
76 | /**
77 | * loop through providers and attempt to scan each of them
78 | */
79 | const schema: any[] = []
80 | for (const provider of allProviders) {
81 | this.logger.info(
82 | `Beginning ${chalk.italic.green('LOAD')} for ${provider}`
83 | )
84 | const { client: providerClient, schemasMap: schemaMap } =
85 | await this.getProviderClient(provider)
86 | if (!providerClient) {
87 | continue // eslint-disable-line no-continue
88 | }
89 |
90 | // const allTagData: any[] = []
91 | // TODO: not in order?
92 | const folders = fileUtils.getVersionFolders(
93 | path.join(dataDir, this.versionDirectory),
94 | provider
95 | )
96 | if (isEmpty(folders)) {
97 | this.logger.error(
98 | `Unable to find saved data for ${provider}, run "cg scan aws" to fetch new data for ${provider}`
99 | )
100 | }
101 | // Get array of files for provider sorted by creation time
102 | const files: { name: string; version: number; folder: string }[] = []
103 | try {
104 | folders.forEach(({ name }: { name: string }) => {
105 | const file = fileUtils.getProviderDataFile(name, provider)
106 | const folderSplits = name.split('/')
107 | const versionString = folderSplits.find((val: string) =>
108 | val.includes('version')
109 | )
110 | if (!versionString || !file) {
111 | return
112 | }
113 | const version = versionString.split('-')[1]
114 | // TODO: better to extract version from folder name here?
115 | files.push({
116 | name: file,
117 | version: Number(version),
118 | folder: name,
119 | })
120 | })
121 | } catch (error: any) {
122 | this.logger.error(
123 | `Unable to find saved data for ${provider}, run "cg scan aws" to fetch new data for ${provider}`
124 | )
125 | this.exit()
126 | }
127 | // If there is one file, just load it, otherwise prompt user to pick a version
128 | let file = ''
129 | let version = ''
130 | if (files.length > 1) {
131 | // TODO: rework this using choices[].value to not need to do string manipulation to extract answer
132 | const answer: { file: string } = await this.interface.prompt([
133 | {
134 | type: 'list',
135 | message: `Select ${provider} version to load into dgraph`,
136 | loop: false,
137 | name: 'file',
138 | choices: files.map(({ name: file, version }) => {
139 | const fileName = fileUtils.mapFileNameToHumanReadable(file)
140 | return `version ${version} ... ${fileName}`
141 | }),
142 | },
143 | ])
144 | try {
145 | const [versionString, fileName]: string[] = answer.file.split('...')
146 | version = versionString.split('-')[1] // eslint-disable-line prefer-destructuring
147 | file = fileUtils.findProviderFileLocation(
148 | path.join(dataDir, this.versionDirectory),
149 | fileName
150 | )
151 | const foundFile = files.find(val => val.name === file)
152 | if (!foundFile) {
153 | this.logger.error(
154 | `Unable to find file for ${provider} for ${versionString}`
155 | )
156 | this.exit()
157 | }
158 | version = foundFile?.folder || ''
159 | this.logger.debug(file)
160 | this.logger.debug(version)
161 | } catch (error: any) {
162 | this.logger.error('Please choose a file to load')
163 | this.exit()
164 | }
165 | } else {
166 | file = files[0].name
167 | version = files[0].folder
168 | }
169 | this.logger.startSpinner(
170 | `updating ${chalk.italic.green('Schema')} for ${chalk.italic.green(
171 | provider
172 | )}`
173 | )
174 | const providerData = JSON.parse(fs.readFileSync(file, 'utf8'))
175 | const providerSchema = getSchemaFromFolder(version, provider)
176 | if (!providerSchema) {
177 | this.logger.warn(`No schema found for ${provider}, moving on`)
178 | continue // eslint-disable-line no-continue
179 | }
180 | schema.push(providerSchema)
181 | if (allProviders.indexOf(provider) === allProviders.length - 1) {
182 | await storageEngine.setSchema(schema)
183 | }
184 | this.logger.successSpinner(
185 | `${chalk.italic.green(
186 | 'Schema'
187 | )} loaded successfully for ${chalk.italic.green(provider)}`
188 | )
189 |
190 | loadAllData(
191 | providerClient,
192 | {
193 | provider,
194 | providerData,
195 | storageEngine,
196 | storageRunning,
197 | schemaMap,
198 | },
199 | this.logger
200 | )
201 | }
202 |
203 | // Execute services mutations promises
204 | this.logger.startSpinner('Inserting loaded data into Dgraph')
205 | // Execute services mutations promises
206 | await storageEngine.run(true)
207 | this.logger.successSpinner('Data insertion into Dgraph complete')
208 | this.logger.success(
209 | `Your data for ${allProviders.join(
210 | ' | '
211 | )} has been loaded to Dgraph. Query at ${chalk.underline.green(
212 | `${storageEngine.host}/graphql`
213 | )}`
214 | )
215 | await this.startQueryEngine()
216 | }
217 | }
218 |
--------------------------------------------------------------------------------
/src/commands/operation.ts:
--------------------------------------------------------------------------------
1 | import { PluginType } from '@cloudgraph/sdk'
2 | import { Flags as flags } from '@oclif/core'
3 | import { isEmpty, pickBy } from 'lodash'
4 | import chalk from 'chalk'
5 |
6 | import Command from './base'
7 | import { messages } from '../utils/constants'
8 | import Manager from '../manager'
9 |
10 | const configurationLogs = [PluginType.Provider]
11 | export default abstract class OperationBaseCommand extends Command {
12 | static strict = false
13 |
14 | static hidden = true
15 |
16 | static flags = {
17 | 'no-save': flags.boolean({
18 | default: false,
19 | description: 'Set to not alter lock file, just delete plugin',
20 | }),
21 | ...Command.flags,
22 | }
23 |
24 | static args = Command.args
25 |
26 | private getPlugin(val: string): string {
27 | return val.includes('@') ? val.split('@')[0] : val
28 | }
29 |
30 | async add(type: PluginType): Promise<
31 | {
32 | key: string
33 | version: string
34 | plugin: any
35 | }[]
36 | > {
37 | const { argv } = await this.parse(OperationBaseCommand)
38 | const allPlugins = argv
39 | const manager = await this.getPluginManager(type)
40 | const addedPlugins = []
41 |
42 | if (isEmpty(allPlugins)) {
43 | this.logger.info(
44 | `No ${messages[type]?.plural} were passed as a parameter.`
45 | )
46 | }
47 |
48 | for (let key of allPlugins) {
49 | let version = 'latest'
50 | if (key.includes('@')) {
51 | [key, version] = key.split('@')
52 | }
53 | const plugin = await manager.getPlugin(key, version)
54 |
55 | // Only shows for certain plugins
56 | configurationLogs.includes(type) &&
57 | this.logger.info(
58 | `Run ${chalk.italic.green(
59 | `$cg init ${key}`
60 | )} to setup configuration for this ${messages[type]?.singular}`
61 | )
62 |
63 | addedPlugins.push({
64 | key,
65 | version,
66 | plugin,
67 | })
68 | }
69 | return addedPlugins
70 | }
71 |
72 | async installPlugin(type: PluginType): Promise {
73 | const manager = await this.getPluginManager(type)
74 | const lockFile = this.getLockFile()
75 | if (isEmpty(lockFile?.[type])) {
76 | this.logger.info(
77 | `No ${messages[type]?.plural} found in lock file, have you added any?`
78 | )
79 | this.exit()
80 | }
81 | for (const [key, value] of Object.entries(lockFile[type])) {
82 | await manager.getPlugin(key, value as string)
83 | }
84 | }
85 |
86 | async remove(type: PluginType): Promise<{
87 | manager: Manager
88 | noSave: boolean
89 | plugins: string[]
90 | }> {
91 | const {
92 | argv,
93 | flags: { 'no-save': noSave },
94 | } = await this.parse(OperationBaseCommand)
95 | const allPlugins = argv
96 | const manager = await this.getPluginManager(type)
97 | const lockFile = this.getLockFile()
98 | const plugins = []
99 |
100 | if (isEmpty(allPlugins)) {
101 | this.logger.info(
102 | `No ${messages[type]?.plural} were passed as a parameter.`
103 | )
104 | this.exit()
105 | }
106 |
107 | if (isEmpty(lockFile?.[type])) {
108 | this.logger.info(
109 | `No ${messages[type]?.plural} found, have you installed any?`
110 | )
111 | this.exit()
112 | }
113 |
114 | for (const key of allPlugins) {
115 | this.logger.startSpinner(
116 | `Removing ${chalk.italic.green(key)} ${messages[
117 | type
118 | ]?.singular?.toLowerCase()}`
119 | )
120 |
121 | await manager.removePlugin(key)
122 |
123 | this.logger.successSpinner(
124 | `${chalk.italic.green(key)} ${messages[
125 | type
126 | ]?.singular?.toLowerCase()} removed successfully`
127 | )
128 |
129 | plugins.push(key)
130 | }
131 | return {
132 | manager,
133 | noSave,
134 | plugins,
135 | }
136 | }
137 |
138 | async update(type: PluginType): Promise {
139 | const { argv } = await this.parse(OperationBaseCommand)
140 | const allPlugins = argv
141 | const manager = await this.getPluginManager(type)
142 | const lockFile = this.getLockFile()
143 |
144 | if (isEmpty(lockFile?.[type])) {
145 | this.logger.info(
146 | `No ${messages[type]?.plural} found in lock file, have you added any?`
147 | )
148 | this.exit()
149 | }
150 | // Get the plugins from the lock file that user wants to update
151 | // If user passes something like aws@1.1.0, filter the lock file to only grab 'aws' entry
152 | const pluginsToList =
153 | allPlugins.length >= 1
154 | ? pickBy(lockFile?.[type], (_, key) => {
155 | const plugins = allPlugins.map(val => {
156 | return this.getPlugin(val)
157 | })
158 | return plugins.indexOf(key) > -1
159 | })
160 | : lockFile?.[type] || {}
161 |
162 | // Warn the user if they are trying to update plugins they have not installed.
163 | const nonInstalledPlugins = allPlugins.filter(rawPlugin => {
164 | const plugin = this.getPlugin(rawPlugin)
165 | return Object.keys(lockFile).includes(plugin)
166 | })
167 |
168 | for (const plugin of nonInstalledPlugins) {
169 | this.logger.warn(
170 | `${chalk.green(
171 | this.getPlugin(plugin)
172 | )} not found in lock file, have you installed it?`
173 | )
174 | this.exit()
175 | }
176 | // Loop through plugins and try to update them
177 | for (const [key] of Object.entries(pluginsToList)) {
178 | let version = 'latest'
179 | const rawPlugin = allPlugins.find(val => val.includes(key))
180 | if (rawPlugin && rawPlugin.includes('@')) {
181 | [, version] = rawPlugin.split('@')
182 | }
183 |
184 | this.logger.startSpinner(
185 | `Updating ${chalk.italic.green(key)} ${messages[
186 | type
187 | ]?.singular?.toLowerCase()} to ${version} version`
188 | )
189 |
190 | await manager.getPlugin(key, version)
191 |
192 | this.logger.successSpinner(
193 | `${chalk.italic.green(key)} ${messages[
194 | type
195 | ]?.singular?.toLowerCase()} updated successfully`
196 | )
197 |
198 | // Only shows for certain plugins
199 | configurationLogs.includes(type) &&
200 | this.logger.info(
201 | `Run ${chalk.italic.green(
202 | `$cg init ${key}`
203 | )} to ensure you have the latest configuration for this version (including new services).`
204 | )
205 | }
206 | }
207 |
208 | async list(type: PluginType): Promise {
209 | const { argv } = await this.parse(OperationBaseCommand)
210 | const allPlugins = argv
211 | const lockFile = this.getLockFile()
212 | if (isEmpty(lockFile?.[type])) {
213 | this.logger.info(
214 | `No ${messages[type]?.plural} found, have you installed any?`
215 | )
216 | this.exit()
217 | }
218 | const pluginsToList =
219 | allPlugins.length >= 1
220 | ? pickBy(lockFile?.[type], (_, key) => {
221 | return allPlugins.some(p => key.includes(p))
222 | })
223 | : lockFile?.[type] || {}
224 | for (const [key, value] of Object.entries(pluginsToList)) {
225 | this.logger.success(
226 | `${messages[type]?.singular} ${chalk.green(
227 | `${key}@${value}`
228 | )} is installed`
229 | )
230 | }
231 | }
232 | }
233 |
--------------------------------------------------------------------------------
/src/commands/policy/add.ts:
--------------------------------------------------------------------------------
1 | import { PluginType } from '@cloudgraph/sdk'
2 | import { isEmpty, uniqBy } from 'lodash'
3 | import { DEFAULT_CG_CONFIG } from '../../utils/constants'
4 |
5 | import OperationBaseCommand from '../operation'
6 |
7 | export default class AddPolicy extends OperationBaseCommand {
8 | static description = 'Add new policy packs'
9 |
10 | static aliases = ['add:policy']
11 |
12 | static examples = [
13 | '$ cg policy add aws-cis-1.2.0',
14 | '$ cg policy add aws-cis-1.2.0@0.12.0',
15 | ]
16 |
17 | static strict = false
18 |
19 | static hidden = false
20 |
21 | async run(): Promise {
22 | try {
23 | const installedPolicies = await this.add(PluginType.PolicyPack)
24 |
25 | for (const installedPolicy of installedPolicies) {
26 | const {
27 | key,
28 | plugin: { default: { provider } } = { default: { provider: '' } },
29 | } = installedPolicy
30 |
31 | // Save policy to CG config file
32 | const config = this.getCGConfig() || DEFAULT_CG_CONFIG
33 | let configuredPolicies =
34 | config.cloudGraph.plugins?.[PluginType.PolicyPack] || []
35 | if (isEmpty(configuredPolicies)) {
36 | // Set new Policy Pack Plugin array
37 | configuredPolicies = [
38 | {
39 | name: key,
40 | providers: [provider],
41 | },
42 | ]
43 | } else {
44 | // Add policy to Policy Pack Plugin array
45 | configuredPolicies = [
46 | ...configuredPolicies,
47 | {
48 | name: key,
49 | providers: [provider],
50 | },
51 | ]
52 | }
53 | if (!config.cloudGraph.plugin) {
54 | config.cloudGraph.plugins = {}
55 | }
56 | config.cloudGraph.plugins[PluginType.PolicyPack] = uniqBy(
57 | configuredPolicies,
58 | 'name'
59 | )
60 | this.saveCloudGraphConfigFile(config)
61 | }
62 | } catch (error) {
63 | this.logger.debug(error)
64 | }
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/src/commands/policy/index.ts:
--------------------------------------------------------------------------------
1 | import Command from '../base'
2 |
3 | export default class PolicyIndex extends Command {
4 | static description =
5 | 'Commands to manage policy pack modules, run $ cg policy for more info.'
6 |
7 | static strict = false
8 |
9 | static hidden = false
10 |
11 | static flags = {
12 | ...Command.flags,
13 | }
14 |
15 | static args = Command.args
16 |
17 | async run(): Promise {
18 | this.logger.info('cg policy is a topic not a command')
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/src/commands/policy/install.ts:
--------------------------------------------------------------------------------
1 | import { PluginType } from '@cloudgraph/sdk'
2 |
3 | import OperationBaseCommand from '../operation'
4 |
5 | export default class InstallPolicy extends OperationBaseCommand {
6 | static description = 'Install policy packs based on the lock file'
7 |
8 | static aliases = ['install:policy']
9 |
10 | static examples = ['$ cg policy install']
11 |
12 | static strict = false
13 |
14 | static hidden = false
15 |
16 | async run(): Promise {
17 | try {
18 | await this.installPlugin(PluginType.PolicyPack)
19 | } catch (error) {
20 | this.logger.debug(error)
21 | }
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/src/commands/policy/list.ts:
--------------------------------------------------------------------------------
1 | import { PluginType } from '@cloudgraph/sdk'
2 |
3 | import OperationBaseCommand from '../operation'
4 |
5 | export default class ListPolicy extends OperationBaseCommand {
6 | static description = 'List currently installed policy packs and versions'
7 |
8 | static aliases = ['ls:policy', 'list:policy']
9 |
10 | static examples = ['$ cg policy list', '$ cg policy list aws']
11 |
12 | static strict = false
13 |
14 | static hidden = false
15 |
16 | async run(): Promise {
17 | try {
18 | await this.list(PluginType.PolicyPack)
19 | } catch (error) {
20 | this.logger.debug(error)
21 | }
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/src/commands/policy/remove.ts:
--------------------------------------------------------------------------------
1 | import { ConfiguredPlugin, PluginType } from '@cloudgraph/sdk'
2 | import isEmpty from 'lodash/isEmpty'
3 |
4 | import OperationBaseCommand from '../operation'
5 |
6 | export default class RemovePolicy extends OperationBaseCommand {
7 | static description = 'Remove currently installed policy pack'
8 |
9 | static aliases = [
10 | 'remove:policy',
11 | 'policy:remove',
12 | 'policy:rm',
13 | 'del:policy',
14 | 'rm:policy',
15 | ]
16 |
17 | static examples = [
18 | '$ cg policy remove',
19 | '$ cg policy remove aws-cis-1.2.0',
20 | '$ cg policy remove aws-cis-1.2.0 --no-save',
21 | ]
22 |
23 | static strict = false
24 |
25 | static hidden = false
26 |
27 | async run(): Promise {
28 | try {
29 | const {
30 | manager,
31 | noSave = false,
32 | plugins: pluginsRemoved = [],
33 | } = await this.remove(PluginType.PolicyPack)
34 |
35 | for (const key of pluginsRemoved) {
36 | if (manager && !noSave) {
37 | manager.removeFromLockFile(key)
38 |
39 | const config = this.getCGConfig()
40 | if (config) {
41 | const configuredPolicies =
42 | config.cloudGraph.plugins?.[PluginType.PolicyPack] || []
43 |
44 | if (!isEmpty(configuredPolicies)) {
45 | // Remove policy from Policy Pack Plugin array
46 | config.cloudGraph.plugins[PluginType.PolicyPack] =
47 | configuredPolicies.filter(
48 | (p: ConfiguredPlugin) => p.name !== key
49 | )
50 |
51 | this.saveCloudGraphConfigFile(config)
52 | }
53 | }
54 | }
55 | }
56 | } catch (error) {
57 | this.logger.stopSpinner()
58 | }
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/src/commands/policy/update.ts:
--------------------------------------------------------------------------------
1 | import { PluginType } from '@cloudgraph/sdk'
2 |
3 | import OperationBaseCommand from '../operation'
4 |
5 | export default class UpdatePolicy extends OperationBaseCommand {
6 | static description = 'Update currently installed policy packs'
7 |
8 | static aliases = []
9 |
10 | static examples = [
11 | '$ cg policy update',
12 | '$ cg policy update aws-cis-1.2.0',
13 | '$ cg policy update aws-cis-1.2.0@0.12.0',
14 | ]
15 |
16 | static strict = false
17 |
18 | static hidden = false
19 |
20 | async run(): Promise {
21 | try {
22 | await this.update(PluginType.PolicyPack)
23 | } catch (error) {
24 | this.logger.stopSpinner()
25 | this.logger.debug(error)
26 | }
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/commands/provider/add.ts:
--------------------------------------------------------------------------------
1 | import { PluginType } from '@cloudgraph/sdk'
2 |
3 | import OperationBaseCommand from '../operation'
4 |
5 | export default class AddProvider extends OperationBaseCommand {
6 | static description = 'Add new providers'
7 |
8 | static aliases = ['add:provider']
9 |
10 | static examples = ['$ cg provider add aws', '$ cg provider add aws@0.12.0']
11 |
12 | static strict = false
13 |
14 | static hidden = false
15 |
16 | async run(): Promise {
17 | try {
18 | await this.add(PluginType.Provider)
19 | } catch (error) {
20 | this.logger.debug(error)
21 | }
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/src/commands/provider/index.ts:
--------------------------------------------------------------------------------
1 | import Command from '../base'
2 |
3 | export default class ProviderIndex extends Command {
4 | static description =
5 | 'Commands to manage provider modules, run $ cg provider for more info.'
6 |
7 | static strict = false
8 |
9 | static hidden = false
10 |
11 | static flags = {
12 | ...Command.flags,
13 | }
14 |
15 | static args = Command.args
16 |
17 | async run(): Promise {
18 | this.logger.info('cg provider is a topic not a command')
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/src/commands/provider/install.ts:
--------------------------------------------------------------------------------
1 | import { PluginType } from '@cloudgraph/sdk'
2 |
3 | import OperationBaseCommand from '../operation'
4 |
5 | export default class InstallProvider extends OperationBaseCommand {
6 | static description = 'Install providers based on the lock file'
7 |
8 | static aliases = ['install:provider']
9 |
10 | static examples = ['$ cg provider install']
11 |
12 | static strict = false
13 |
14 | static hidden = false
15 |
16 | async run(): Promise {
17 | try {
18 | await this.installPlugin(PluginType.Provider)
19 | } catch (error) {
20 | this.logger.debug(error)
21 | }
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/src/commands/provider/list.ts:
--------------------------------------------------------------------------------
1 | import { PluginType } from '@cloudgraph/sdk'
2 |
3 | import OperationBaseCommand from '../operation'
4 |
5 | export default class ListProvider extends OperationBaseCommand {
6 | static description = 'List currently installed providers and versions'
7 |
8 | static aliases = ['ls:provider', 'list:provider']
9 |
10 | static examples = ['$ cg provider list', '$ cg provider list aws']
11 |
12 | static strict = false
13 |
14 | static hidden = false
15 |
16 | async run(): Promise {
17 | try {
18 | await this.list(PluginType.Provider)
19 | } catch (error) {
20 | this.logger.debug(error)
21 | }
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/src/commands/provider/remove.ts:
--------------------------------------------------------------------------------
1 | import { PluginType } from '@cloudgraph/sdk'
2 |
3 | import OperationBaseCommand from '../operation'
4 |
5 | export default class RemoveProvider extends OperationBaseCommand {
6 | static description = 'Remove currently installed provider'
7 |
8 | static aliases = [
9 | 'remove:provider',
10 | 'provider:remove',
11 | 'provider:rm',
12 | 'del:provider',
13 | 'rm:provider',
14 | ]
15 |
16 | static examples = [
17 | '$ cg provider remove',
18 | '$ cg provider remove aws',
19 | '$ cg provider remove aws --no-save',
20 | ]
21 |
22 | static strict = false
23 |
24 | static hidden = false
25 |
26 | async run(): Promise {
27 | try {
28 | const {
29 | manager,
30 | noSave = false,
31 | plugins: pluginsRemoved = [],
32 | } = await this.remove(PluginType.Provider)
33 |
34 | for (const key of pluginsRemoved) {
35 | if (manager && !noSave) {
36 | manager.removeFromLockFile(key)
37 | }
38 | }
39 | } catch (error) {
40 | this.logger.stopSpinner()
41 | }
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/src/commands/provider/update.ts:
--------------------------------------------------------------------------------
1 | import { PluginType } from '@cloudgraph/sdk'
2 |
3 | import OperationBaseCommand from '../operation'
4 |
5 | export default class UpdateProvider extends OperationBaseCommand {
6 | static description = 'Update currently installed providers'
7 |
8 | static aliases = []
9 |
10 | static examples = [
11 | '$ cg provider update',
12 | '$ cg provider update aws',
13 | '$ cg provider update aws@0.12.0',
14 | ]
15 |
16 | static strict = false
17 |
18 | static hidden = false
19 |
20 | async run(): Promise {
21 | try {
22 | await this.update(PluginType.Provider)
23 | } catch (error) {
24 | this.logger.stopSpinner()
25 | this.logger.debug(error)
26 | }
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/commands/scan.ts:
--------------------------------------------------------------------------------
1 | import chalk from 'chalk'
2 | import fs from 'fs'
3 | import path from 'path'
4 | import {
5 | Opts,
6 | pluginMap,
7 | PluginType,
8 | ProviderData,
9 | StorageEngine,
10 | } from '@cloudgraph/sdk'
11 | import { range } from 'lodash'
12 | import { print } from 'graphql'
13 |
14 | import Command from './base'
15 | import { fileUtils } from '../utils'
16 | import DgraphEngine from '../storage/dgraph'
17 | import { scanReport } from '../reports'
18 | import { loadAllData, processConnectionsBetweenEntities } from '../utils/data'
19 |
20 | export default class Scan extends Command {
21 | static description =
22 | 'Scan one or multiple providers data to be queried through Dgraph'
23 |
24 | static examples = [
25 | '$ cg scan',
26 | '$ cg scan aws',
27 | '$ cg scan aws --dgraph http://localhost:1000 [Save data in dgraph running on port 1000]',
28 | '$ cg scan aws --no-serve [Do not start the query engine]',
29 | ]
30 |
31 | static strict = false
32 |
33 | static flags = {
34 | ...Command.flags,
35 | }
36 |
37 | static hidden = false
38 |
39 | static args = Command.args
40 |
41 | private async plugins({
42 | storage: { isRunning, engine },
43 | flags,
44 | providerData,
45 | }: {
46 | storage: {
47 | isRunning: boolean
48 | engine: StorageEngine
49 | }
50 | flags: {
51 | [flag: string]: any
52 | }
53 | providerData: ProviderData
54 | }): Promise {
55 | const config = this.getCGConfig('cloudGraph')
56 | const { plugins = {} } = config
57 | for (const pluginType in plugins) {
58 | if (pluginType) {
59 | try {
60 | // Get Plugin Interface
61 | const Plugin = pluginMap[pluginType]
62 |
63 | // Execute Plugins by Provider
64 | for (const provider in this.providers) {
65 | if (provider) {
66 | const { schemasMap, serviceKey } = this.providers[provider]
67 |
68 | // Initialize
69 | const PluginInstance = new Plugin({
70 | config,
71 | provider: {
72 | name: provider,
73 | schemasMap,
74 | serviceKey,
75 | },
76 | flags: flags as { [flag: string]: any },
77 | logger: this.logger,
78 | })
79 |
80 | // Get the Plugin Manager
81 | const pluginManager = await this.getPluginManager(
82 | pluginType as PluginType
83 | )
84 |
85 | // Configure
86 | await PluginInstance.configure(pluginManager, plugins[pluginType])
87 |
88 | // Execute plugins
89 | await PluginInstance.execute({
90 | storageRunning: isRunning,
91 | storageEngine: engine,
92 | providerData,
93 | processConnectionsBetweenEntities,
94 | })
95 | }
96 | }
97 | } catch (error) {
98 | this.logger.warn('Plugin not supported by CG')
99 | }
100 | }
101 | }
102 | }
103 |
104 | async run(): Promise {
105 | const { argv, flags } = await this.parse(Scan)
106 | const { dev: devMode } = flags as {
107 | [flag: string]: any
108 | }
109 |
110 | const { dataDir } = this.config
111 | const opts: Opts = { logger: this.logger, debug: true, devMode }
112 | let allProviders = argv
113 |
114 | // Run dgraph health check
115 | const storageEngine = (await this.getStorageEngine()) as DgraphEngine
116 | const storageRunning = await storageEngine.healthCheck()
117 | /**
118 | * Handle 2 methods of scanning, either for explicitly passed providers OR
119 | * try to scan for all providers found within the config file
120 | * if we still have 0 providers, fail and exit.
121 | */
122 | if (allProviders.length >= 1) {
123 | this.logger.debug(`Scanning for providers: ${allProviders}`)
124 | } else {
125 | this.logger.debug('Scanning for providers found in config')
126 | const config = this.getCGConfig()
127 |
128 | allProviders = Object.keys(config).filter(
129 | (val: string) => val !== 'cloudGraph'
130 | )
131 | if (allProviders.length === 0) {
132 | this.logger.error(
133 | 'There are no providers configured and none were passed to scan'
134 | )
135 | this.exit()
136 | }
137 | }
138 |
139 | // Build folder structure for saving CloudGraph data by version
140 | const schema: any[] = []
141 | let folders = fileUtils.getVersionFolders(
142 | path.join(dataDir, this.versionDirectory)
143 | )
144 | let dataFolder = 'version-1'
145 |
146 | if (folders.length >= this.versionLimit) {
147 | this.logger.warn(
148 | `Maximum number of data versions has been reached, deleting version-1 and creating a new version-${this.versionLimit}`
149 | )
150 | // version 1 gets deleted, version 2 becomes version 1 … new version gets created
151 | const pathPrefix = path.join(dataDir, this.versionDirectory)
152 | const versionPrefix = path.join(pathPrefix, 'version-')
153 | for (const version of [
154 | 1,
155 | ...range(this.versionLimit + 1, folders.length + 1),
156 | ]) {
157 | fs.rmSync(versionPrefix + version, { recursive: true })
158 | }
159 | for (const version of range(1, this.versionLimit)) {
160 | fs.renameSync(versionPrefix + (version + 1), versionPrefix + version)
161 | }
162 | folders = fileUtils.getVersionFolders(
163 | path.join(dataDir, this.versionDirectory)
164 | )
165 | }
166 |
167 | if (folders) {
168 | dataFolder = `version-${folders.length + 1}`
169 | }
170 | const dataStorageLocation = path.join(
171 | dataDir,
172 | `${this.versionDirectory}/${dataFolder}`
173 | )
174 | fileUtils.makeDirIfNotExists(dataStorageLocation)
175 |
176 | /**
177 | * loop through providers and attempt to scan each of them
178 | */
179 | const failedProviderList: string[] = []
180 | const allProviderData: ProviderData = { entities: [], connections: {} }
181 | for (const provider of allProviders) {
182 | this.logger.info(
183 | `Beginning ${chalk.italic.green('SCAN')} for ${provider}`
184 | )
185 |
186 | const providerPlugin = await this.getProviderClient(provider)
187 | const { client: providerClient, schemasMap } = providerPlugin
188 |
189 | if (!providerClient) {
190 | failedProviderList.push(provider)
191 | this.logger.warn(`No valid client found for ${provider}, skipping...`)
192 | continue // eslint-disable-line no-continue
193 | }
194 | const config = this.getCGConfig(provider)
195 | this.providers[provider] = providerPlugin
196 |
197 | this.logger.debug(config)
198 | if (!config) {
199 | failedProviderList.push(provider)
200 | this.logger.warn(
201 | `No configuration found for ${provider}, run "cg init ${provider}" to create one`
202 | )
203 | continue // eslint-disable-line no-continue
204 | }
205 | this.logger.startSpinner(
206 | `${chalk.italic.green('SCANNING')} data for ${chalk.italic.green(
207 | provider
208 | )}`
209 | )
210 | const providerData = await providerClient.getData({
211 | opts,
212 | })
213 | this.logger.successSpinner(
214 | `${chalk.italic.green(provider)} data scanned successfully`
215 | )
216 |
217 | // Merge all providers data
218 | allProviderData.entities.push(...providerData.entities)
219 | Object.assign(allProviderData.connections, providerData.connections)
220 |
221 | // Handle schema, write provider and combined schema to file and store in Dgraph if running
222 | this.logger.startSpinner(
223 | `updating ${chalk.italic.green('Schema')} for ${chalk.italic.green(
224 | provider
225 | )}`
226 | )
227 |
228 | const rawSchema = providerClient.getSchema()
229 | const providerSchema: string =
230 | typeof rawSchema === 'object' ? print(rawSchema) : rawSchema
231 |
232 | if (!providerSchema) {
233 | this.logger.warn(`No schema found for ${provider}, moving on`)
234 | continue // eslint-disable-line no-continue
235 | }
236 | schema.push(providerSchema)
237 | fileUtils.writeGraphqlSchemaToFile(
238 | dataStorageLocation,
239 | providerSchema,
240 | provider
241 | )
242 | if (allProviders.indexOf(provider) === allProviders.length - 1) {
243 | fileUtils.writeGraphqlSchemaToFile(dataStorageLocation, schema.join())
244 | if (storageRunning) {
245 | try {
246 | if (storageEngine instanceof DgraphEngine) {
247 | await storageEngine.validateSchema(schema, dataFolder)
248 | }
249 | await storageEngine.dropAll() // Delete schema before change it
250 | await storageEngine.setSchema(schema)
251 | } catch (error: any) {
252 | this.logger.error(
253 | `There was an issue pushing schema for providers: ${allProviders.join(
254 | ' | '
255 | )} to dgraph at ${storageEngine.host}`
256 | )
257 | this.logger.debug(error)
258 | fileUtils.deleteFolder(dataStorageLocation)
259 | this.exit()
260 | }
261 | }
262 | }
263 | this.logger.successSpinner(
264 | `${chalk.italic.green(
265 | 'Schema'
266 | )} loaded successfully for ${chalk.italic.green(provider)}`
267 | )
268 |
269 | try {
270 | const dataPath = path.join(
271 | dataStorageLocation,
272 | `/${provider}_${Date.now()}.json`
273 | )
274 | fs.writeFileSync(dataPath, JSON.stringify(providerData, null, 2))
275 | } catch (error: any) {
276 | this.logger.error(`There was a problem saving data for ${provider}`)
277 | this.logger.debug(error)
278 | fileUtils.deleteFolder(dataStorageLocation)
279 | this.exit()
280 | }
281 |
282 | loadAllData(
283 | providerClient,
284 | {
285 | provider,
286 | providerData,
287 | storageEngine,
288 | storageRunning,
289 | schemaMap: schemasMap,
290 | },
291 | this.logger
292 | )
293 | }
294 |
295 | // If every provider that has been passed is a failure, just exit
296 | if (failedProviderList.length === allProviders.length) {
297 | this.logger.warn(
298 | `No providers in list: [${allProviders.join(
299 | ' | '
300 | )}] have a valid module and config, exiting`
301 | )
302 | this.exit()
303 | }
304 | if (storageRunning) {
305 | this.logger.startSpinner(
306 | 'Inserting data into Dgraph and generating scan report'
307 | )
308 | // Execute services mutations promises
309 | await storageEngine.run()
310 |
311 | this.logger.successSpinner('Data insertion into Dgraph complete')
312 |
313 | await this.plugins({
314 | flags: flags as { [flag: string]: any },
315 | storage: {
316 | isRunning: storageRunning,
317 | engine: storageEngine,
318 | },
319 | providerData: allProviderData,
320 | })
321 | }
322 |
323 | scanReport.print()
324 |
325 | this.logger.success(
326 | `Your data for ${allProviders.join(
327 | ' | '
328 | )} has been saved to ${chalk.italic.green(dataStorageLocation)}`
329 | )
330 |
331 | if (storageRunning) {
332 | this.logger.success(
333 | `Your data for ${allProviders.join(
334 | ' | '
335 | )} has been saved to Dgraph. Query at ${chalk.underline.green(
336 | `${storageEngine.host}/graphql`
337 | )}`
338 | )
339 | }
340 | storageRunning && (await this.startQueryEngine())
341 | }
342 | }
343 |
--------------------------------------------------------------------------------
/src/commands/serve.ts:
--------------------------------------------------------------------------------
1 | /* eslint-disable no-console */
2 | import Command from './base'
3 |
4 | export default class Serve extends Command {
5 | static description =
6 | 'Serve a GraphQL query tool to query your CloudGraph data.'
7 |
8 | static examples = ['$ cg serve']
9 |
10 | static strict = false
11 |
12 | static hidden = false
13 |
14 | static flags = {
15 | ...Command.flags,
16 | }
17 |
18 | static args = Command.args
19 |
20 | async run(): Promise {
21 | const storageEngine = await this.getStorageEngine()
22 | const storageRunning = await storageEngine.healthCheck()
23 | if (!storageRunning) {
24 | const msg = `Storage engine check at ${storageEngine.host} FAILED canceling SERVE`
25 | this.logger.error(msg)
26 | throw new Error(msg)
27 | }
28 | await this.startQueryEngine()
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/src/commands/teardown.ts:
--------------------------------------------------------------------------------
1 | import { Flags as flags } from '@oclif/core'
2 | import { execCommand, findExistingDGraphContainerId } from '../utils'
3 | import Command from './base'
4 |
5 | export default class Teardown extends Command {
6 | static description = 'Stops the Dgraph Docker container.'
7 |
8 | static examples = ['$ cg teardown', '$ cg teardown --delete-image']
9 |
10 | static strict = false
11 |
12 | static hidden = false
13 |
14 | static flags = {
15 | // delete dgraph docker image after stopping it
16 | 'delete-image': flags.boolean({
17 | default: false,
18 | description: 'Remove dgraph docker image after stopping it',
19 | }),
20 | }
21 |
22 | static args = Command.args
23 |
24 | async run(): Promise {
25 | try {
26 | const {
27 | flags: { 'delete-image': rmContainer },
28 | } = await this.parse(Teardown)
29 | let containerToRemove: undefined | string
30 | const runningContainerId = await findExistingDGraphContainerId('running')
31 | this.logger.startSpinner('Stopping Dgraph container...')
32 | if (runningContainerId) {
33 | await execCommand(`docker stop ${runningContainerId}`)
34 | this.logger.successSpinner(
35 | `Dgraph container(${runningContainerId}) stopped successfully!`
36 | )
37 | containerToRemove = runningContainerId
38 | } else {
39 | const exitedContainerId = await findExistingDGraphContainerId('exited')
40 | if (exitedContainerId && exitedContainerId !== '') {
41 | this.logger.successSpinner(
42 | `Existing Dgraph container(${exitedContainerId}) is already stopped!`
43 | )
44 | containerToRemove = exitedContainerId
45 | } else {
46 | this.logger.successSpinner('No Dgraph containers found!')
47 | }
48 | }
49 | if (rmContainer && containerToRemove) {
50 | this.logger.startSpinner(
51 | `Removing Dgraph container(${containerToRemove})...`
52 | )
53 | await execCommand(`docker rm ${containerToRemove}`)
54 | this.logger.successSpinner(
55 | `Dgraph container(${containerToRemove}) removed successfully`
56 | )
57 | }
58 | } catch (error: any) {
59 | this.logger.failSpinner('Error while stopping dgraph container')
60 | this.logger.debug(error)
61 | }
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/src/commands/update.ts:
--------------------------------------------------------------------------------
1 | import { PluginType } from '@cloudgraph/sdk'
2 |
3 | import OperationCommand from './operation'
4 |
5 | export default class Upgrade extends OperationCommand {
6 | static description = 'Upgrade currently installed plugins.'
7 |
8 | static examples = ['$ cg update']
9 |
10 | static aliases = ['update']
11 |
12 | static strict = false
13 |
14 | static hidden = false
15 |
16 | async run(): Promise {
17 | try {
18 | const lockFile = this.getLockFile()
19 |
20 | for (const pluginType in lockFile) {
21 | if (pluginType) {
22 | await this.update(pluginType as PluginType)
23 | }
24 | }
25 | } catch (error) {
26 | this.logger.stopSpinner()
27 | this.logger.debug(error)
28 | }
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------
1 | import { run } from '@oclif/core'
2 |
3 | export default run
4 |
--------------------------------------------------------------------------------
/src/manager/index.ts:
--------------------------------------------------------------------------------
1 | import { Logger, PluginModule, PluginType } from '@cloudgraph/sdk'
2 | import { cosmiconfigSync } from 'cosmiconfig'
3 | import { Config } from '@oclif/core'
4 | import path from 'path'
5 | import chalk from 'chalk'
6 | import fs from 'fs'
7 | import satisfies from 'semver/functions/satisfies'
8 | import gt from 'semver/functions/gt'
9 | import { printBoxMessage, fileUtils } from '../utils'
10 | import NpmManager from './npm'
11 |
12 | export class Manager {
13 | constructor(config: {
14 | logger: Logger
15 | devMode: boolean
16 | cliConfig: Config
17 | pluginType: PluginType
18 | }) {
19 | this.pluginManager = new NpmManager()
20 | this.plugins = {}
21 | this.logger = config.logger
22 | this.devMode = config.devMode
23 | this.cliConfig = config.cliConfig
24 | this.pluginType = config.pluginType
25 | }
26 |
27 | plugins: Record
28 |
29 | cliConfig: Config
30 |
31 | logger: Logger
32 |
33 | pluginManager: NpmManager
34 |
35 | devMode: boolean
36 |
37 | pluginType: PluginType
38 |
39 | private getImportPath(plugin: string): {
40 | importPath: string
41 | name: string
42 | } {
43 | let pluginNamespace = '@cloudgraph'
44 | let pluginName = plugin
45 |
46 | if (plugin.includes('/')) {
47 | [pluginNamespace, pluginName] = plugin.split('/')
48 | }
49 | return {
50 | importPath: `${pluginNamespace}/${
51 | PluginModule[this.pluginType]
52 | }-${pluginName}`,
53 | name: pluginName,
54 | }
55 | }
56 |
57 | async getPlugin(plugin: string, version?: string): Promise {
58 | /**
59 | * Determine if the user has passed a plugin and prompt them if not
60 | */
61 | let pluginInstance
62 | let pluginName = plugin
63 |
64 | this.logger.startSpinner(
65 | `Checking for ${this.pluginType} ${chalk.green(plugin)} module...`
66 | )
67 | try {
68 | const { importPath, name } = this.getImportPath(plugin)
69 | pluginName = name
70 | if (process.env.NODE_ENV === 'development' || this.devMode) {
71 | const isValidVersion = await this.checkRequiredVersion(importPath)
72 | if (!isValidVersion) {
73 | throw new Error('Version check failed')
74 | }
75 | this.logger.warn(
76 | // eslint-disable-next-line max-len
77 | `You are running CloudGraph in devMode. In devMode, CG will assume plugin modules are already installed. use ${chalk.italic.green(
78 | '$yarn link {pluginModule}'
79 | )} to work with a local copy of a plugin module`
80 | )
81 | pluginInstance = await import(importPath)
82 | } else {
83 | this.logger.startSpinner(`Installing ${chalk.green(pluginName)} plugin`)
84 | const pluginLockVersion = this.getVersionFromLock(plugin)
85 | this.logger.info(
86 | `Installing ${chalk.green(plugin)} module version: ${chalk.green(
87 | version ?? pluginLockVersion
88 | )}`
89 | )
90 | await this.pluginManager.install(
91 | importPath,
92 | version ?? pluginLockVersion
93 | )
94 | this.logger.successSpinner(
95 | `${chalk.green(pluginName)} plugin installed successfully!`
96 | )
97 | const isValidVersion = await this.checkRequiredVersion(importPath)
98 | if (!isValidVersion) {
99 | throw new Error(`Version check ${chalk.red('failed')}`)
100 | }
101 | // If there is no lock file, we download latest and then update the lock file with latest version
102 | if (version || pluginLockVersion === 'latest') {
103 | const newLockVersion = await this.getVersion(importPath)
104 | this.logger.info(
105 | `${chalk.green(plugin)} version locked at: ${chalk.green(
106 | version && version !== 'latest' ? version : newLockVersion
107 | )}`
108 | )
109 | this.writeVersionToLockFile({
110 | plugin,
111 | version: version && version !== 'latest' ? version : newLockVersion,
112 | })
113 | }
114 | pluginInstance = import(importPath)
115 | }
116 | } catch (error: any) {
117 | this.logger.debug(error)
118 | this.logger.failSpinner(
119 | `Manager failed to install ${this.pluginType} plugin for ${chalk.green(
120 | pluginName
121 | )}`
122 | )
123 | throw new Error(
124 | `${this.pluginType} ${plugin} module check ${chalk.red(
125 | 'FAILED'
126 | )}, unable to find plugin`
127 | )
128 | }
129 | this.logger.successSpinner(
130 | `${this.pluginType} ${chalk.green(pluginName)} module check complete`
131 | )
132 | this.plugins[pluginName] = pluginInstance
133 | return pluginInstance
134 | }
135 |
136 | async queryRemoteVersion(importPath: string): Promise {
137 | try {
138 | const info = await this.pluginManager.queryPackage(importPath)
139 | return info.version
140 | } catch (error) {
141 | this.logger.error('There was an error checking the latest version')
142 | return '0.0.0'
143 | }
144 | }
145 |
146 | async getVersion(importPath: string): Promise {
147 | const pluginInfo = await import(`${importPath}/package.json`)
148 | return pluginInfo.version
149 | }
150 |
151 | async checkRequiredVersion(importPath: string): Promise {
152 | const pluginInfo = await import(`${importPath}/package.json`)
153 | const pluginVersion = pluginInfo?.version
154 |
155 | if (process.env.NODE_ENV !== 'development' && !this.devMode) {
156 | const latestRemoteVersion = await this.queryRemoteVersion(importPath)
157 |
158 | if (gt(latestRemoteVersion, pluginVersion)) {
159 | const stoppedMsg = this.logger.stopSpinner()
160 | printBoxMessage(
161 | `Update for ${chalk.italic.green(
162 | importPath
163 | )} is available: ${pluginVersion} -> ${latestRemoteVersion}. \n
164 | Run ${chalk.italic.green('cg update')} to install`
165 | )
166 | this.logger.startSpinner(stoppedMsg)
167 | }
168 | }
169 | const requiredVersion = pluginInfo?.cloudGraph?.version
170 | if (!requiredVersion) {
171 | this.logger.warn(
172 | `No required cli version found in ${this.pluginType} module, assuming compatability`
173 | )
174 | return true
175 | }
176 | const [cliVersion] = this.cliConfig.version.split('-')
177 | const test = satisfies(cliVersion, requiredVersion)
178 | if (!test) {
179 | // eslint-disable-next-line max-len
180 | const errText = `${this.pluginType} ${importPath}@${pluginVersion} requires cli version ${requiredVersion} but cli version is ${this.cliConfig.version}`
181 | this.logger.error(errText)
182 | return false
183 | }
184 | return true
185 | }
186 |
187 | getVersionFromLock(plugin: string): string {
188 | const lockPath = path.join(
189 | this.cliConfig.configDir,
190 | '.cloud-graph.lock.json'
191 | )
192 | let config
193 | try {
194 | config = cosmiconfigSync('cloud-graph').load(lockPath)
195 | } catch (error: any) {
196 | this.logger.info('No lock file found for Cloud Graph, creating one...')
197 | }
198 | const lockVersion = config?.config[this.pluginType]?.[plugin]
199 | if (!lockVersion) {
200 | return 'latest'
201 | }
202 | return lockVersion
203 | }
204 |
205 | removeFromLockFile(plugin: string): void {
206 | const lockPath = path.join(
207 | this.cliConfig.configDir,
208 | '.cloud-graph.lock.json'
209 | )
210 | try {
211 | const oldLock = cosmiconfigSync('cloud-graph').load(lockPath)
212 | const lockFile = oldLock?.config
213 | if (!lockFile || !lockFile[this.pluginType]?.[plugin]) {
214 | this.logger.warn(
215 | `No lock file found containing ${plugin}, could not remove`
216 | )
217 | return
218 | }
219 | delete lockFile[this.pluginType][plugin]
220 | this.logger.success(
221 | `${this.pluginType} ${chalk.green(plugin)} has been removed`
222 | )
223 | fs.writeFileSync(lockPath, JSON.stringify(lockFile, null, 2))
224 | } catch (error: any) {
225 | this.logger.error(`There was an error removing ${plugin} from lock file`)
226 | this.logger.debug(error)
227 | }
228 | }
229 |
230 | writeVersionToLockFile({
231 | plugin,
232 | version,
233 | }: {
234 | plugin: string
235 | version: string
236 | }): void {
237 | const lockPath = path.join(
238 | this.cliConfig.configDir,
239 | '.cloud-graph.lock.json'
240 | )
241 | let oldLock
242 | try {
243 | oldLock = cosmiconfigSync('cloud-graph').load(lockPath)
244 | } catch (e: any) {
245 | this.logger.debug(e)
246 | }
247 | try {
248 | let newLockFile
249 | if (oldLock?.config) {
250 | newLockFile = {
251 | ...oldLock.config,
252 | [this.pluginType]: {
253 | ...oldLock.config[this.pluginType],
254 | [plugin]: version,
255 | },
256 | }
257 | } else {
258 | newLockFile = {
259 | [this.pluginType]: {
260 | [plugin]: version,
261 | },
262 | }
263 | }
264 | fileUtils.makeDirIfNotExists(this.cliConfig.configDir)
265 | fs.writeFileSync(lockPath, JSON.stringify(newLockFile, null, 2))
266 | } catch (error: any) {
267 | this.logger.error(
268 | 'There was an error writing latest version to the lock file'
269 | )
270 | this.logger.debug(error)
271 | }
272 | }
273 |
274 | async removePlugin(plugin: string): Promise {
275 | const { importPath } = this.getImportPath(plugin)
276 | try {
277 | await this.pluginManager.uninstall(importPath)
278 | } catch (error: any) {
279 | this.logger.error(
280 | `There was an error uninstalling ${this.pluginType} ${plugin}`
281 | )
282 | this.logger.debug(error)
283 | }
284 | }
285 | }
286 |
287 | export default Manager
288 |
--------------------------------------------------------------------------------
/src/manager/npm/index.ts:
--------------------------------------------------------------------------------
1 | import { exec } from 'child_process'
2 | import path from 'path'
3 |
4 | export default class NpmManager {
5 | constructor() {
6 | this.npmBinary = path.normalize('./node_modules/.bin/npm')
7 | }
8 |
9 | npmBinary: string
10 |
11 | getProviderImportPath(provider: string): {
12 | importPath: string
13 | name: string
14 | } {
15 | let providerNamespace = '@cloudgraph'
16 | let providerName = provider
17 | if (provider.includes('/')) {
18 | [providerNamespace, providerName] = provider.split('/')
19 | }
20 | return {
21 | importPath: `${providerNamespace}/cg-provider-${providerName}`,
22 | name: providerName,
23 | }
24 | }
25 |
26 | async install(_path: string, version?: string): Promise {
27 | return new Promise((resolve, reject) => {
28 | const module = `${_path}${version ? `@${version}` : ''}`
29 |
30 | const flags = ['--no-audit', '--no-fund', '--ignore-scripts', '--silent']
31 | exec(
32 | `${this.npmBinary} install ${module} ${flags.join(' ')}`,
33 | { cwd: path.resolve(__dirname, '../../../') },
34 |
35 | err => {
36 | if (err) return reject(err)
37 | resolve(0)
38 | }
39 | )
40 | })
41 | }
42 |
43 | async uninstall(_path: string, version?: string): Promise {
44 | return new Promise((resolve, reject) => {
45 | const module = `${_path}${version ? `@${version}` : ''}`
46 |
47 | const flags = [
48 | '--no-audit',
49 | '--no-fund',
50 | '--no-save',
51 | '--ignore-scripts',
52 | '--silent',
53 | ]
54 | exec(
55 | `${this.npmBinary} uninstall ${module} ${flags.join(' ')}`,
56 | { cwd: path.resolve(__dirname, '../../../') },
57 | err => {
58 | if (err) return reject(err)
59 |
60 | resolve(0)
61 | }
62 | )
63 | })
64 | }
65 |
66 | async queryPackage(module: string): Promise {
67 | return new Promise((resolve, reject) => {
68 | exec(
69 | `${this.npmBinary} view ${module} --json`,
70 | { cwd: path.resolve(__dirname, '../../../') },
71 | (err, stdout) => {
72 | if (err) return reject(err)
73 |
74 | const res = JSON.parse(stdout)
75 | resolve(res)
76 | }
77 | )
78 | })
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/src/reports/index.ts:
--------------------------------------------------------------------------------
1 | import scanReport from './scan-report'
2 | import rulesReport from './rules-report'
3 |
4 | const enum scanResult {
5 | pass = 'pass',
6 | fail = 'fail',
7 | warn = 'warn',
8 | }
9 |
10 | const enum scanDataType {
11 | status = 'status',
12 | count = 'count',
13 | }
14 |
15 | export { rulesReport, scanReport, scanResult, scanDataType }
16 |
--------------------------------------------------------------------------------
/src/reports/rules-report.ts:
--------------------------------------------------------------------------------
1 | import Table from 'cli-table'
2 | import chalk from 'chalk'
3 | import CloudGraph, { RuleFinding } from '@cloudgraph/sdk'
4 | import { isEmpty } from 'lodash'
5 |
6 | const { logger } = CloudGraph
7 |
8 | export class RulesReport {
9 | tableHeaders = [chalk.green('ResourceId'), chalk.green('Result')]
10 |
11 | tables: { [policyPack: string]: Table } = {}
12 |
13 | pushData({
14 | policyPack,
15 | ruleDescription,
16 | results,
17 | }: {
18 | policyPack: string
19 | ruleDescription: string
20 | results: RuleFinding[]
21 | }): void {
22 | for (const { resourceId, id, result } of results) {
23 | const tableName = `${policyPack}-${id}`
24 | if (!this.tables[tableName]) {
25 | this.tables[tableName] = new Table({ style: { head: [], border: [] } })
26 | this.tables[tableName].push(
27 | [chalk.italic.green(ruleDescription)],
28 | this.tableHeaders
29 | )
30 | }
31 |
32 | const status = this.getStatus(result)
33 | this.tables[tableName].push([resourceId, status])
34 | }
35 | }
36 |
37 | private getStatus(result: string): string {
38 | let status
39 | switch (result) {
40 | case 'MISSING': {
41 | status = chalk.yellow(
42 | String.fromCodePoint(0x26a0) // warning symbol
43 | )
44 | break
45 | }
46 | case 'FAIL': {
47 | status = chalk.red(
48 | String.fromCodePoint(0x1f6ab) // failure symbol
49 | )
50 | break
51 | }
52 | default: {
53 | status = chalk.green(String.fromCodePoint(0x2714)) // checkmark symbol
54 | }
55 | }
56 | return status
57 | }
58 |
59 | print(): void {
60 | if (!isEmpty(this.tables)) {
61 | logger.info('Printing rules result...')
62 |
63 | for (const tableName in this.tables) {
64 | if (tableName) {
65 | console.log(this.tables[tableName].toString())
66 | }
67 | }
68 | }
69 | }
70 | }
71 |
72 | export default new RulesReport()
73 |
--------------------------------------------------------------------------------
/src/reports/scan-report.ts:
--------------------------------------------------------------------------------
1 | import Table from 'cli-table'
2 | import chalk from 'chalk'
3 | import CloudGraph from '@cloudgraph/sdk'
4 |
5 | import { scanDataType, scanResult } from '.'
6 |
7 | const { logger } = CloudGraph
8 |
9 | interface pushDataParams {
10 | service: string
11 | type: scanDataType
12 | result: scanResult
13 | msg?: string
14 | }
15 |
16 | enum statusLevel {
17 | warn = 'warn',
18 | fail = 'fail',
19 | pass = 'pass',
20 | }
21 |
22 | // used for updating status of a service
23 | enum statusKeyWords {
24 | data = 'data',
25 | connections = 'connections',
26 | }
27 |
28 | const servicesToIgnore = [
29 | /^account$/,
30 | /^tag$/,
31 | /^label$/,
32 | /^billing$/,
33 | /Findings$/,
34 | /^subscription$/,
35 | ]
36 |
37 | // TODO: come back and add tests once testing strategy is determined
38 | export class ScanReport {
39 | constructor() {
40 | this.table = new Table({ head: this.tableHeaders })
41 | }
42 |
43 | tableHeaders = [
44 | chalk.green('Service'),
45 | chalk.green('Resources Found'),
46 | chalk.green('Status'),
47 | ]
48 |
49 | internalTable: { status: statusLevel; data: { [key: string]: string[] }[] } =
50 | { status: statusLevel.pass, data: [{ total: ['0', 'N/A'] }] }
51 |
52 | table: Table
53 |
54 | private containsService(
55 | ignoreExpression: RegExp,
56 | serviceName: string
57 | ): boolean {
58 | try {
59 | const regex = new RegExp(ignoreExpression)
60 | return regex.test(serviceName)
61 | } catch (error) {
62 | return false
63 | }
64 | }
65 |
66 | pushData({ service, type, result }: pushDataParams): void {
67 | if (
68 | servicesToIgnore.some(ignore => this.containsService(ignore, service))
69 | ) {
70 | return
71 | }
72 | const status = this.getStatus(result)
73 | if (this.isInTable(service)) {
74 | this.internalTable.data = this.internalTable.data.map(val => {
75 | if (Object.keys(val).includes(service)) {
76 | const [count, oldStatus] = val[service]
77 |
78 | // Handle count type of data push
79 | if (type === scanDataType.count) {
80 | const newCount: number = 1 + Number(count)
81 | return { [service]: [String(newCount), status] }
82 | }
83 |
84 | // Handle status, we do not want to "upgrade" from a failed or warning status to pass
85 | let newStatus = status
86 | if (oldStatus.includes(statusKeyWords.data)) {
87 | newStatus = oldStatus
88 | }
89 | if (
90 | oldStatus.includes(statusKeyWords.connections) &&
91 | !status.includes(statusKeyWords.data)
92 | ) {
93 | newStatus = oldStatus
94 | }
95 | return { [service]: [`${count}`, newStatus] }
96 | }
97 | // Handle parts of the table that dont need to update
98 | return val
99 | })
100 | } else {
101 | this.internalTable.data.push({
102 | [service]: [type === scanDataType.count ? '1' : '0', status],
103 | })
104 | }
105 | if (type === scanDataType.count) {
106 | this.incrementTotalTable()
107 | }
108 | }
109 |
110 | private incrementTotalTable(): void {
111 | const totalIndex = this.internalTable.data.findIndex(val => {
112 | return Object.keys(val).includes('total')
113 | })
114 | if (this.internalTable?.data?.[totalIndex]?.total) {
115 | const [currentCount, status] = this.internalTable.data[totalIndex].total
116 | const newCount = 1 + Number(currentCount)
117 | this.internalTable.data[totalIndex] = {
118 | total: [String(newCount), status],
119 | }
120 | }
121 | }
122 |
123 | private getStatus(result: string): string {
124 | let status
125 | switch (result) {
126 | case statusLevel.warn: {
127 | status = `${chalk.yellow(
128 | String.fromCodePoint(0x26a0) // warning symbol
129 | )} unable to make some connections`
130 | if (this.internalTable.status !== statusLevel.fail) {
131 | this.internalTable.status = statusLevel.warn
132 | }
133 | break
134 | }
135 | case statusLevel.fail: {
136 | status = `${chalk.red(
137 | String.fromCodePoint(0x1f6ab) // failure symbol
138 | )} unable to store data in Dgraph`
139 | this.internalTable.status = statusLevel.fail
140 | break
141 | }
142 | default: {
143 | status = chalk.green(String.fromCodePoint(0x2714)) // checkmark symbol
144 | }
145 | }
146 | return status
147 | }
148 |
149 | print(): void {
150 | logger.info('Printing scan report...')
151 | // flip the table to put total at the bottom
152 | const tableToPrint = [
153 | ...this.internalTable.data.slice(1),
154 | this.internalTable.data[0],
155 | ]
156 | this.table.push(
157 | ...tableToPrint.map(val => {
158 | const key = Object.keys(val)[0]
159 | const [, status] = val[key]
160 | /**
161 | * Color the service key based upon the status in the table.
162 | * We must do this at the end because coloring the text alters the text and we use the text
163 | * to find the correct object when adding to the table
164 | */
165 | let coloredKey = chalk.green(key)
166 | if (status?.includes('connections')) {
167 | coloredKey = chalk.yellow(key)
168 | }
169 | if (status?.includes('data')) {
170 | coloredKey = chalk.red(key)
171 | }
172 | return { [coloredKey]: val[key] }
173 | })
174 | )
175 | console.log(this.table.toString())
176 | if (this.internalTable.status !== statusLevel.pass) {
177 | logger[this.internalTable.status === statusLevel.fail ? 'error' : 'warn'](
178 | `While CG ran successfully, there were some ${
179 | this.internalTable.status === statusLevel.fail ? 'major' : 'minor'
180 | } issues formatting and inserting your data into dGraph.`
181 | )
182 | logger.info(
183 | 'For a complete list of these errors and what they mean for you, please see https://github.com/cloudgraphdev/cli#common-errors'
184 | )
185 | }
186 | }
187 |
188 | private isInTable(service: string): boolean {
189 | return !!this.internalTable.data.find(val => {
190 | return Object.keys(val).includes(service)
191 | })
192 | }
193 | }
194 |
195 | export default new ScanReport()
196 |
--------------------------------------------------------------------------------
/src/scripts/openChrome.applescript:
--------------------------------------------------------------------------------
1 | (*
2 | Copyright (c) 2015-present, Facebook, Inc.
3 |
4 | This source code is licensed under the MIT license found in the
5 | LICENSE file in the root directory of this source tree.
6 | *)
7 |
8 | property targetTab: null
9 | property targetTabIndex: -1
10 | property targetWindow: null
11 | property theProgram: "Google Chrome"
12 |
13 | on run argv
14 | set theURL to item 1 of argv
15 |
16 | -- Allow requested program to be optional,
17 | -- default to Google Chrome
18 | if (count of argv) > 1 then
19 | set theProgram to item 2 of argv
20 | end if
21 |
22 | using terms from application "Google Chrome"
23 | tell application theProgram
24 |
25 | if (count every window) = 0 then
26 | make new window
27 | end if
28 |
29 | -- 1: Looking for tab running debugger
30 | -- then, Reload debugging tab if found
31 | -- then return
32 | set found to my lookupTabWithUrl(theURL)
33 | if found then
34 | set targetWindow's active tab index to targetTabIndex
35 | tell targetTab to reload
36 | tell targetWindow to activate
37 | set index of targetWindow to 1
38 | return
39 | end if
40 |
41 | -- 2: Looking for Empty tab
42 | -- In case debugging tab was not found
43 | -- We try to find an empty tab instead
44 | set found to my lookupTabWithUrl("chrome://newtab/")
45 | if found then
46 | set targetWindow's active tab index to targetTabIndex
47 | set URL of targetTab to theURL
48 | tell targetWindow to activate
49 | return
50 | end if
51 |
52 | -- 3: Create new tab
53 | -- both debugging and empty tab were not found
54 | -- make a new tab with url
55 | tell window 1
56 | activate
57 | make new tab with properties {URL:theURL}
58 | end tell
59 | end tell
60 | end using terms from
61 | end run
62 |
63 | -- Function:
64 | -- Lookup tab with given url
65 | -- if found, store tab, index, and window in properties
66 | -- (properties were declared on top of file)
67 | on lookupTabWithUrl(lookupUrl)
68 | using terms from application "Google Chrome"
69 | tell application theProgram
70 | -- Find a tab with the given url
71 | set found to false
72 | set theTabIndex to -1
73 | repeat with theWindow in every window
74 | set theTabIndex to 0
75 | repeat with theTab in every tab of theWindow
76 | set theTabIndex to theTabIndex + 1
77 | if (theTab's URL as string) contains lookupUrl then
78 | -- assign tab, tab index, and window to properties
79 | set targetTab to theTab
80 | set targetTabIndex to theTabIndex
81 | set targetWindow to theWindow
82 | set found to true
83 | exit repeat
84 | end if
85 | end repeat
86 |
87 | if found then
88 | exit repeat
89 | end if
90 | end repeat
91 | end tell
92 | end using terms from
93 | return found
94 | end lookupTabWithUrl
95 |
--------------------------------------------------------------------------------
/src/server/index.ts:
--------------------------------------------------------------------------------
1 | import express from 'express'
2 | import { altairExpress } from 'altair-express-middleware'
3 | import expressPlayground from 'graphql-playground-middleware-express'
4 | import { Server } from 'http'
5 |
6 | function renderVoyagerPage(options: { endpointUrl: string }): string {
7 | const { endpointUrl } = options
8 | const version = '1.0.0-rc.31'
9 | return `
10 |
11 |
12 |
13 |
14 |
15 | GraphQL Voyager
16 |
28 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 | Loading...
39 |
40 |
67 |
68 |
69 | `
70 | }
71 |
72 | const voyagerMiddleware = (options: { endpointUrl: string }) => {
73 | return (_req: any, res: any): void => {
74 | res.setHeader('Content-Type', 'text/html')
75 | res.write(renderVoyagerPage(options))
76 | res.end()
77 | }
78 | }
79 | export default class QueryEngine {
80 | constructor(port: string) {
81 | this.port = port
82 | }
83 |
84 | port
85 |
86 | startServer(host: string): Promise {
87 | return new Promise(resolve => {
88 | const app = express()
89 |
90 | app.use(
91 | '/altair',
92 | altairExpress({
93 | endpointURL: `${host}/graphql`,
94 | initialQuery: '{ queryawsAlb { arn }}',
95 | initialSettings: {
96 | addQueryDepthLimit: 3,
97 | },
98 | })
99 | )
100 |
101 | app.use('/voyager', voyagerMiddleware({ endpointUrl: `${host}/graphql` }))
102 |
103 | // TODO: rework QueryEngine to do this better and only serve one
104 | app.get(
105 | '/playground',
106 | expressPlayground({
107 | endpoint: `${host}/graphql`,
108 | settings: {
109 | 'request.globalHeaders': {},
110 | 'editor.cursorShape': 'line',
111 | 'editor.fontFamily': '\'Source Code Pro\', \'Consolas\', \'Inconsolata\', \'Droid Sans Mono\', \'Monaco\', monospace',
112 | 'editor.fontSize': 14,
113 | 'editor.reuseHeaders': true,
114 | 'editor.theme': 'dark',
115 | 'general.betaUpdates': false,
116 | 'request.credentials': 'omit',
117 | 'schema.polling.enable': false,
118 | 'schema.polling.endpointFilter': '',
119 | 'schema.polling.interval': 100000000,
120 | 'tracing.hideTracingResponse': true,
121 | 'tracing.tracingSupported': false,
122 | },
123 | })
124 | )
125 |
126 | const server = app.listen(Number(this.port), () => {
127 | resolve(server)
128 | })
129 | })
130 | }
131 | }
132 |
--------------------------------------------------------------------------------
/src/storage/dgraph/base.ts:
--------------------------------------------------------------------------------
1 | import {
2 | Logger,
3 | StorageEngineConfig,
4 | StorageEngineConnectionConfig,
5 | } from '@cloudgraph/sdk'
6 | import axios, { AxiosPromise } from 'axios'
7 | import chalk from 'chalk'
8 | import { ExecutionResult } from 'graphql'
9 | import { RequestConfig } from '../types'
10 |
11 | export default class DGraphClientWrapper {
12 | constructor(config: StorageEngineConfig) {
13 | const { logger, ...rest } = config
14 | this.connectionConfig = rest
15 | this.logger = logger
16 | }
17 |
18 | connectionConfig: StorageEngineConnectionConfig
19 |
20 | logger: Logger
21 |
22 | get host(): string {
23 | return `${this.connectionConfig.host}:${this.connectionConfig.port}`
24 | }
25 |
26 | get baseUrl(): string {
27 | return `${this.connectionConfig.scheme}://${this.connectionConfig.host}:${this.connectionConfig.port}`
28 | }
29 |
30 | generateAxiosRequest({
31 | baseUrl,
32 | path: url,
33 | data,
34 | verb,
35 | headers,
36 | }: RequestConfig): AxiosPromise {
37 | return axios({
38 | method: verb || 'post',
39 | baseURL: baseUrl || this.baseUrl,
40 | maxBodyLength: Number(process.env.MAX_BODY_LENGTH) || Infinity,
41 | maxContentLength: Number(process.env.MAX_CONTENT_LENGTH) || Infinity,
42 | url,
43 | headers: {
44 | ...headers,
45 | },
46 | data,
47 | })
48 | }
49 |
50 | async dropAll(): Promise {
51 | return new Promise(async (resolve, reject) => {
52 | this.logger.debug('Dropping schemas and data')
53 | try {
54 | const result = await this.generateAxiosRequest({
55 | path: '/alter',
56 | data: '{"drop_all": true}',
57 | })
58 | this.logger.debug(result.data)
59 | this.logger.debug(`${chalk.green('dropAll')}: Operation successful`)
60 | resolve(result)
61 | } catch (error) {
62 | this.logger.error(`${chalk.red('dropAll')}: Operation failed`)
63 | this.logger.debug(JSON.stringify(error))
64 | reject(error)
65 | }
66 | })
67 | }
68 |
69 | // Drop All Data, but keep the schema.
70 | async dropData(): Promise {
71 | return new Promise(async (resolve, reject) => {
72 | this.logger.debug('Dropping all data')
73 | try {
74 | const result = await this.generateAxiosRequest({
75 | path: '/alter',
76 | data: '{"drop_op": "DATA"}',
77 | })
78 | this.logger.debug(result.data)
79 | this.logger.debug(`${chalk.green('dropData')}: Operation successful.`)
80 | resolve(result)
81 | } catch (error) {
82 | this.logger.error(`${chalk.red('dropData')}: Operation failed.`)
83 | this.logger.debug(JSON.stringify(error))
84 | reject(error)
85 | }
86 | })
87 | }
88 | }
89 |
--------------------------------------------------------------------------------
/src/storage/dgraph/index.ts:
--------------------------------------------------------------------------------
1 | import { ExecutionResult } from 'graphql'
2 | import {
3 | StorageEngineConfig,
4 | StorageEngine,
5 | GraphQLInputData,
6 | GraphQLQueryData
7 | } from '@cloudgraph/sdk'
8 |
9 | import DGraphClientWrapper from './base'
10 | import {
11 | GET_SCHEMA_QUERY,
12 | processGQLExecutionResult,
13 | UPDATE_SCHEMA_QUERY,
14 | } from './utils'
15 |
16 | export default class DgraphEngine
17 | extends DGraphClientWrapper
18 | implements StorageEngine
19 | {
20 | constructor(config: StorageEngineConfig) {
21 | super(config)
22 | this.axiosPromises = []
23 | }
24 |
25 | axiosPromises: (() => Promise)[]
26 |
27 | async healthCheck(showInitialStatus = true): Promise {
28 | showInitialStatus &&
29 | this.logger.debug(`running dgraph health check at ${this.host}`)
30 | try {
31 | const healthCheck = await this.generateAxiosRequest({
32 | path: '/health?all',
33 | headers: {
34 | 'Content-Type': 'application/json',
35 | },
36 | })
37 | this.logger.debug(healthCheck.data)
38 | return true
39 | } catch (error: any) {
40 | this.logger.warn(
41 | `dgraph at ${this.host} failed health check. Is dgraph running?`
42 | )
43 | this.logger.debug(error)
44 | return false
45 | }
46 | }
47 |
48 | async validateSchema(schema: string[], versionString: string): Promise {
49 | const versionCaption = versionString.split('-').join(' ')
50 | this.logger.debug(`Validating Schema for ${versionCaption}`)
51 | return new Promise(async (resolve, reject) => {
52 | try {
53 | await this.generateAxiosRequest({
54 | path: '/admin/schema/validate',
55 | data: schema.join(),
56 | headers: {
57 | 'Content-Type': 'text/plain',
58 | },
59 | })
60 | resolve()
61 | } catch (error: any) {
62 | const {
63 | response: {
64 | data: { errors },
65 | },
66 | } = error
67 | this.logger.error('Schema validation failed')
68 | const errMsgs = errors.map((e: Error) =>
69 | e.message.replace('input:', 'line ')
70 | )
71 | this.logger.error(
72 | `${
73 | errMsgs.length
74 | } errors found in ${versionCaption} schema. Check the following lines in the schema.graphql file:\n${errMsgs.join(
75 | '\n'
76 | )}`
77 | )
78 | reject()
79 | }
80 | })
81 | }
82 |
83 | async setSchema(schemas: string[]): Promise {
84 | const data = {
85 | query: UPDATE_SCHEMA_QUERY,
86 | variables: {
87 | schema: schemas.join(),
88 | },
89 | }
90 | try {
91 | await this.generateAxiosRequest({
92 | path: '/admin',
93 | data,
94 | })
95 | .then((res: ExecutionResult) => {
96 | const { data: resData, errors } = res
97 | processGQLExecutionResult({
98 | reqData: data,
99 | resData,
100 | errors,
101 | })
102 | })
103 | .catch(error => Promise.reject(error))
104 | } catch (error: any) {
105 | const {
106 | response: { data: resData, errors },
107 | message,
108 | } = error
109 | this.logger.error(
110 | 'There was an issue pushing the schema into the Dgraph db'
111 | )
112 | this.logger.debug(message)
113 | processGQLExecutionResult({
114 | reqData: data,
115 | resData,
116 | errors,
117 | })
118 | }
119 | }
120 |
121 | async getSchema(): Promise {
122 | try {
123 | const { data } = await this.query(GET_SCHEMA_QUERY, '/admin')
124 | return data?.getGQLSchema?.schema || ''
125 | } catch (error: any) {
126 | const {
127 | response: { data: resData, errors } = { data: null, errors: null },
128 | message,
129 | } = error ?? {}
130 | this.logger.error('There was an issue getting the Dgraph schema')
131 | this.logger.debug(message)
132 | processGQLExecutionResult({ resData, errors })
133 | return ''
134 | }
135 | }
136 |
137 | query(query: string, path = '/graphql'): Promise {
138 | return this.generateAxiosRequest({
139 | path,
140 | data: {
141 | query,
142 | },
143 | })
144 | .then((res: ExecutionResult) => {
145 | const { data: resData } = res
146 | return resData
147 | })
148 | .catch(error => Promise.reject(error))
149 | }
150 |
151 | /**
152 | * Add Service Mutation to axiosPromises Array
153 | */
154 | push(data: GraphQLInputData): void {
155 | const { query, input, patch } = data
156 | const queryData: GraphQLQueryData = {
157 | query,
158 | variables: {
159 | input,
160 | patch,
161 | },
162 | }
163 | this.axiosPromises.push(() =>
164 | this.generateAxiosRequest({
165 | path: '/graphql',
166 | data: queryData,
167 | })
168 | .then((res: ExecutionResult) => {
169 | const { data: resData, errors } = res
170 | processGQLExecutionResult({
171 | reqData: queryData,
172 | resData,
173 | errors,
174 | service: data.name,
175 | })
176 | })
177 | .catch(error => Promise.reject(error))
178 | )
179 | }
180 |
181 | /**
182 | * Executes mutations sequentially into Dgraph
183 | */
184 | async run(dropData = true): Promise {
185 | dropData && (await this.dropData())
186 | for (const mutation of this.axiosPromises) {
187 | try {
188 | await mutation()
189 | } catch (error: any) {
190 | const {
191 | response: { data: resData, errors } = { data: null, errors: null },
192 | message,
193 | } = error ?? {}
194 | this.logger.error('There was an issue pushing data into the Dgraph db')
195 | this.logger.debug(message)
196 | processGQLExecutionResult({ resData, errors })
197 | }
198 | }
199 |
200 | // Ensure mutations array is clean after execution
201 | this.axiosPromises = []
202 | }
203 | }
204 |
--------------------------------------------------------------------------------
/src/storage/dgraph/utils.ts:
--------------------------------------------------------------------------------
1 | import CloudGraph from '@cloudgraph/sdk'
2 | import chalk from 'chalk'
3 | import isEmpty from 'lodash/isEmpty'
4 | import { GraphQLError } from 'graphql'
5 |
6 | import { GraphQLFormattedQuery } from '../types'
7 | import { scanReport, scanDataType, scanResult } from '../../reports'
8 |
9 | const { logger } = CloudGraph
10 |
11 | export const UPDATE_SCHEMA_QUERY = `
12 | mutation($schema: String!) {
13 | updateGQLSchema(input: { set: { schema: $schema } }) {
14 | gqlSchema {
15 | schema
16 | }
17 | }
18 | }`
19 | export const GET_SCHEMA_QUERY = `{
20 | getGQLSchema {
21 | schema
22 | }
23 | }`
24 |
25 | // Look for mutation name and color it red
26 | function printErrorMessage(message: string, additionalInfo: any): void {
27 | let messageToShow = message
28 | const found = additionalInfo?.executedMutationNames?.find((v: string) =>
29 | message.includes(v)
30 | )
31 | if (found) {
32 | messageToShow = message.replace(found, chalk.red(found))
33 | }
34 | messageToShow && logger.error(messageToShow)
35 | }
36 |
37 | function processErrorArrayIfExists({
38 | errors,
39 | variables,
40 | additionalInfo,
41 | service,
42 | }: {
43 | errors?: ReadonlyArray
44 | variables: any
45 | additionalInfo?: { executedMutationNames?: string[] }
46 | service?: string
47 | }): void {
48 | let result = scanResult.pass
49 | if (errors) {
50 | result = scanResult.fail
51 | errors.forEach((err: GraphQLError) => {
52 | const { path, locations, message, extensions = {} } = err
53 | printErrorMessage(message, additionalInfo)
54 | // Sometimes dgraph can provide extra information about an error
55 | extensions.code &&
56 | logger.debug(`Additional error info: ${extensions.code}`)
57 | // Happens when data to load into Dgraph fails to pass the schema validation
58 | path && logger.debug(`Additional path info: ${JSON.stringify(path)}`)
59 | if (path?.[0] && path?.[1] && path?.[2]) {
60 | if (path[0] === 'variable') {
61 | if (path[1] === 'input') {
62 | if (typeof path[2] === 'number') {
63 | logger.debug(variables[path[1]][path[2]][path[3]])
64 | }
65 | }
66 | }
67 | }
68 | // Errors that can be schema format/syntax errors
69 | locations &&
70 | logger.debug(
71 | `Additional location info: ${JSON.stringify(locations, null, 2)}`
72 | )
73 | })
74 | }
75 | if (service) {
76 | scanReport.pushData({ service, result, type: scanDataType.status })
77 | }
78 | }
79 |
80 | export function processGQLExecutionResult({
81 | errors: resErrors,
82 | reqData = { query: '', variables: {} },
83 | resData,
84 | service,
85 | }: {
86 | errors?: ReadonlyArray
87 | reqData?: GraphQLFormattedQuery
88 | resData?: { [key: string]: any } | null
89 | service?: string
90 | }): void {
91 | // Data interpolated to query. Works for both schema push and data load
92 | const { variables } = reqData
93 | if (resData && !resErrors) {
94 | const { data: mutationResultData, errors: dataErrors } = resData
95 | let executedMutationNames: string[] = []
96 | if (!isEmpty(mutationResultData)) {
97 | executedMutationNames = Object.keys(mutationResultData) || []
98 | if (
99 | !isEmpty(executedMutationNames) &&
100 | executedMutationNames[0].includes('add')
101 | ) {
102 | executedMutationNames.forEach(mutationName => {
103 | if (mutationResultData[mutationName]) {
104 | const { numUids } = mutationResultData[mutationName]
105 | const numUidsString = numUids ? `numUids affected: ${numUids}` : ''
106 | logger.debug(
107 | `mutation ${chalk.green(
108 | mutationName
109 | )} completed successfully. ${numUidsString}`
110 | )
111 | }
112 | })
113 | }
114 | // Leaving this block here in case we need/want
115 | // to print the output the result of the patch mutations
116 | //
117 | // if (executedMutationNames[0].includes('update')) {
118 | // executedMutationNames.forEach(mutation => {
119 | // const serviceName = mutation.split('update')[1]
120 | // const filter = mutationResultData[mutation][serviceName]
121 | // if (filter && filter[0]) {
122 | // const { id } = filter[0]
123 | // logger.debug(`Connections added for id ${chalk.green(id)}.`)
124 | // }
125 | // })
126 | // }
127 | }
128 | processErrorArrayIfExists({
129 | errors: dataErrors,
130 | variables,
131 | additionalInfo: { executedMutationNames },
132 | service,
133 | })
134 | }
135 | // Data related errors
136 | processErrorArrayIfExists({ errors: resErrors, variables, service })
137 | }
138 |
--------------------------------------------------------------------------------
/src/storage/enums.ts:
--------------------------------------------------------------------------------
1 | /* eslint-disable import/prefer-default-export */
2 | export enum DGraphRole {
3 | admin = 'admin',
4 | client = 'client',
5 | }
6 |
7 | export enum StorageEngineType {
8 | dgraph = 'dgraph',
9 | }
10 |
--------------------------------------------------------------------------------
/src/storage/index.ts:
--------------------------------------------------------------------------------
1 | import { StorageEngine, StorageEngineConfig } from '@cloudgraph/sdk'
2 | import DgraphEngine from './dgraph'
3 |
4 | const engineMap: {
5 | [key: string]: new (config: StorageEngineConfig) => StorageEngine
6 | } = {
7 | dgraph: DgraphEngine,
8 | }
9 |
10 | export default engineMap
11 |
--------------------------------------------------------------------------------
/src/storage/types.ts:
--------------------------------------------------------------------------------
1 | import { Method } from 'axios'
2 |
3 | export interface RequestConfig {
4 | baseUrl?: string
5 | path: string
6 | data?: any
7 | verb?: Method
8 | headers?: { [key: string]: string }
9 | }
10 |
11 | export interface GraphQLFormattedQuery {
12 | query: string
13 | variables: any
14 | }
15 |
--------------------------------------------------------------------------------
/src/types/cfonts/index.d.ts:
--------------------------------------------------------------------------------
1 | declare module 'cfonts'
--------------------------------------------------------------------------------
/src/types/index.ts:
--------------------------------------------------------------------------------
1 | import { ProviderData, StorageEngine } from '@cloudgraph/sdk'
2 |
3 | export interface CloudGraphConfig {
4 | [key: string]: unknown | Record, unknown>
5 | }
6 |
7 | export type SchemaMap = {
8 | [schemaName: string]: string
9 | }
10 |
11 | export interface DataToLoad {
12 | provider: string
13 | providerData: ProviderData
14 | storageEngine: StorageEngine
15 | storageRunning: boolean
16 | schemaMap: SchemaMap | undefined
17 | }
18 |
--------------------------------------------------------------------------------
/src/utils/constants.ts:
--------------------------------------------------------------------------------
1 | import { PluginType } from '@cloudgraph/sdk'
2 |
3 | export default {
4 | MAX_RETRY_ATTEMPS: 3,
5 | MAX_BACKOFF_DELAY: 10000,
6 | BASE_BACKOFF_CONSTANT: 2,
7 | }
8 |
9 | export const DEFAULT_CONFIG = {
10 | host: 'localhost',
11 | port: '8997',
12 | scheme: 'http',
13 | }
14 |
15 | export const DGRAPH_CONTAINER_LABEL = 'cloudgraph-cli-dgraph-standalone'
16 | export const DGRAPH_DOCKER_IMAGE_NAME = 'dgraph/standalone:v22.0.1'
17 |
18 | export const messages = {
19 | [PluginType.PolicyPack]: {
20 | singular: 'Policy Pack',
21 | plural: 'policy packs',
22 | },
23 | [PluginType.Provider]: {
24 | singular: 'Provider',
25 | plural: 'providers',
26 | },
27 | }
28 |
29 | export const DEFAULT_CG_CONFIG = {
30 | cloudGraph: {
31 | plugins: {},
32 | storageConfig: DEFAULT_CONFIG,
33 | versionLimit: 10,
34 | queryEngine: 'playground',
35 | port: '5555',
36 | },
37 | }
38 |
--------------------------------------------------------------------------------
/src/utils/data.ts:
--------------------------------------------------------------------------------
1 | import chalk from 'chalk'
2 | import isEmpty from 'lodash/isEmpty'
3 | import CloudGraph, {
4 | ProviderData,
5 | StorageEngine,
6 | SchemaMap,
7 | ServiceConnection,
8 | Client,
9 | Logger,
10 | } from '@cloudgraph/sdk'
11 |
12 | import { scanReport, scanDataType, scanResult } from '../reports'
13 | import { generateMutation, generateUpdateVarsObject } from './mutation'
14 | import { DataToLoad } from '../types'
15 |
16 | const { logger } = CloudGraph
17 |
18 | /**
19 | * Filters connections acording to the afterNodeInsertion boolean
20 | * this is used to filter connections that need to:
21 | * 1. Be inserted in the add mutation, afterNodeInsertion = false
22 | * 2. Be inserted in the patch mutation, afterNodeInsertion = true
23 | */
24 | export const filterConnectionsByPriorityOfInsertion = (
25 | connections: { [key: string]: ServiceConnection[] },
26 | afterNodeInsertion: boolean
27 | ): { [key: string]: ServiceConnection[] } => {
28 | const filteredConnections: { [key: string]: ServiceConnection[] } = {}
29 | Object.entries(connections).map(([id, sConnections]) => {
30 | const fConnections = sConnections.filter(
31 | (i: ServiceConnection) =>
32 | !!i.insertAfterNodeInsertion === afterNodeInsertion
33 | )
34 | if (!isEmpty(fConnections)) {
35 | filteredConnections[id] = fConnections
36 | }
37 | })
38 | return filteredConnections
39 | }
40 | // the afterNodeInsertion flag provides input
41 | // to whether filter connections that need to be inserted
42 | // in the main add mutation(batch mutation, that pushes fresh nodes and connections)
43 | // or in the patch mutation(list of mutations that patches each node and its connections with others)
44 | export function getConnectedEntity(
45 | service: any,
46 | { entities, connections: allConnections }: ProviderData,
47 | initiatorServiceName: string,
48 | afterNodeInsertion = false
49 | ): Record {
50 | logger.debug(
51 | `Getting connected entities for ${chalk.green(
52 | initiatorServiceName
53 | )} id = ${chalk.green(service.id)}`
54 | )
55 | const connections: ServiceConnection[] =
56 | filterConnectionsByPriorityOfInsertion(allConnections, afterNodeInsertion)[
57 | service.id
58 | ]
59 | const connectedEntity: any = { ...(afterNodeInsertion ? {} : service) }
60 | let connectionsStatus = scanResult.pass
61 | if (!isEmpty(connections)) {
62 | for (const connection of connections) {
63 | const entityData = entities.find(
64 | ({ name }: { name: string }) => name === connection.resourceType
65 | )
66 | if (entityData && entityData.data) {
67 | const entityForConnection = entityData.data.find(
68 | ({ id }: { id: string }) => connection.id === id
69 | )
70 | if (!isEmpty(entityForConnection)) {
71 | if (!connectedEntity[connection.field]) {
72 | connectedEntity[connection.field] = []
73 | }
74 | connectedEntity[connection.field].push(entityForConnection)
75 | logger.debug(
76 | `(${initiatorServiceName}) ${service.id} ${chalk.green(
77 | '<----->'
78 | )} ${connection.id} (${connection.resourceType})`
79 | )
80 | } else {
81 | connectionsStatus = scanResult.warn
82 | const error = `Malformed connection found between ${chalk.red(
83 | initiatorServiceName
84 | )} && ${chalk.red(connection.resourceType)} services.`
85 | logger.warn(error)
86 | logger.warn(
87 | `(${initiatorServiceName}) ${service.id} ${chalk.red('<-///->')} ${
88 | connection.id
89 | } (${connection.resourceType})`
90 | )
91 | }
92 | }
93 | }
94 | }
95 | scanReport.pushData({
96 | service: initiatorServiceName,
97 | type: scanDataType.status,
98 | result: connectionsStatus,
99 | })
100 | return connectedEntity
101 | }
102 |
103 | export const processConnectionsBetweenEntities = ({
104 | provider,
105 | providerData,
106 | storageEngine,
107 | storageRunning,
108 | schemaMap,
109 | }: {
110 | provider?: string
111 | providerData: ProviderData
112 | storageEngine: StorageEngine
113 | storageRunning: boolean
114 | schemaMap?: SchemaMap
115 | }): void => {
116 | for (const entity of providerData.entities) {
117 | const { data, name, mutation } = entity
118 |
119 | let connectedData
120 |
121 | if (data instanceof Array) {
122 | connectedData = data.map((service: any) => {
123 | scanReport.pushData({
124 | service: name,
125 | type: scanDataType.count,
126 | result: scanResult.pass,
127 | })
128 | return getConnectedEntity(service, providerData, name)
129 | })
130 | } else {
131 | connectedData = data
132 | }
133 |
134 | if (storageRunning) {
135 | // Add service mutation to promises array
136 | storageEngine.push({
137 | query:
138 | mutation?.toString() ||
139 | (provider &&
140 | generateMutation({ type: 'add', provider, entity, schemaMap })) ||
141 | '',
142 | input: connectedData,
143 | name,
144 | })
145 | }
146 | }
147 | }
148 |
149 | export function insertEntitiesAndConnections({
150 | provider,
151 | providerData,
152 | storageEngine,
153 | storageRunning,
154 | schemaMap,
155 | }: DataToLoad): void {
156 | for (const entity of providerData.entities) {
157 | try {
158 | const { data, mutation, name } = entity
159 | const connectedData = data.map((service: any) => {
160 | scanReport.pushData({
161 | service: name,
162 | type: scanDataType.count,
163 | result: scanResult.pass,
164 | })
165 | return getConnectedEntity(service, providerData, name)
166 | })
167 | if (storageRunning) {
168 | const query =
169 | mutation?.toString() ||
170 | generateMutation({ type: 'add', provider, entity, schemaMap })
171 | storageEngine.push({ query, input: connectedData, name })
172 | }
173 | } catch (error) {
174 | logger.debug(error)
175 | }
176 | }
177 | }
178 |
179 | export function processConnectionsAfterInitialInsertion({
180 | provider,
181 | providerData,
182 | storageEngine,
183 | storageRunning,
184 | schemaMap,
185 | }: DataToLoad): void {
186 | const additionalConnections: {
187 | [key: string]: ServiceConnection[]
188 | } = filterConnectionsByPriorityOfInsertion(providerData.connections, true)
189 | if (!isEmpty(additionalConnections)) {
190 | // Filter resourceTypes that have additional connections to process
191 | const resourcesWithAdditionalConnections = new Set(
192 | Object.values(additionalConnections)
193 | .flat()
194 | .map(({ resourceType }) => resourceType)
195 | )
196 | // Filter entities that match filtered resourceTypes
197 | const entities = providerData.entities.filter(({ name }) =>
198 | resourcesWithAdditionalConnections.has(name)
199 | )
200 | for (const entity of entities) {
201 | try {
202 | const { data, name } = entity
203 | data.map((service: any) => {
204 | const connections = getConnectedEntity(
205 | service,
206 | providerData,
207 | name,
208 | true
209 | )
210 | if (!isEmpty(connections)) {
211 | if (storageRunning) {
212 | const query = generateMutation({
213 | type: 'update',
214 | provider,
215 | entity,
216 | schemaMap,
217 | })
218 | const patch = generateUpdateVarsObject(service, connections)
219 | // Add service mutation to promises array
220 | storageEngine.push({ query, patch, name })
221 | }
222 | }
223 | })
224 | } catch (error) {
225 | logger.debug(error)
226 | }
227 | }
228 | }
229 | }
230 |
231 | export const loadAllData = (
232 | providerClient: Client,
233 | data: DataToLoad,
234 | loggerInstance: Logger
235 | ): void => {
236 | loggerInstance.startSpinner(
237 | `Inserting entities and connections for ${chalk.italic.green(
238 | data.provider
239 | )}`
240 | )
241 | insertEntitiesAndConnections(data)
242 | loggerInstance.successSpinner(
243 | `Entities and connections inserted successfully for ${chalk.italic.green(
244 | data.provider
245 | )}`
246 | )
247 | loggerInstance.startSpinner(
248 | `Processing additional service connections for ${chalk.italic.green(
249 | data.provider
250 | )}`
251 | )
252 | processConnectionsAfterInitialInsertion(data)
253 | loggerInstance.successSpinner(
254 | `Additional connections processed successfully for ${chalk.italic.green(
255 | data.provider
256 | )}`
257 | )
258 | }
259 |
--------------------------------------------------------------------------------
/src/utils/flags.ts:
--------------------------------------------------------------------------------
1 | import { Flags as flags } from '@oclif/core'
2 |
3 | export default {
4 | // devMode flag
5 | dev: flags.boolean({ description: 'Turn on developer mode' }),
6 | // dgraph host
7 | dgraph: flags.string({
8 | char: 'd',
9 | env: 'CG_HOST_PORT',
10 | description: 'Set where dgraph is running (default localhost:8997)',
11 | }),
12 | // storage engine to use
13 | storage: flags.string({
14 | char: 's',
15 | description:
16 | 'Select a storage engine to use. Currently only supports Dgraph',
17 | options: ['dgraph'],
18 | }),
19 | // dir to store cloud graph data versions in
20 | directory: flags.string({
21 | description:
22 | 'Set the folder where CloudGraph will store data. (default cg)',
23 | }),
24 | // serve query engine after scan/load
25 | 'no-serve': flags.boolean({
26 | default: false,
27 | description: 'Set to not serve a query engine',
28 | }),
29 | // port for query engine
30 | port: flags.integer({
31 | char: 'p',
32 | env: 'CG_QUERY_PORT',
33 | description: 'Set port to serve query engine',
34 | }),
35 | // Query Engine to use
36 | 'query-engine': flags.string({
37 | char: 'q',
38 | description: 'Query engine to launch',
39 | options: ['playground', 'altair'],
40 | }),
41 | // version limit
42 | 'version-limit': flags.string({
43 | char: 'l',
44 | description:
45 | 'Limit the amount of version folders stored on the filesystem (default 10)',
46 | }),
47 | // use roles flag (AWS only)
48 | 'use-roles': flags.boolean({
49 | default: false,
50 | description:
51 | 'Set to true to use roleARNs instead of profiles for AWS credentials',
52 | }),
53 | // Policy packs to use
54 | policies: flags.string({
55 | char: 'P',
56 | description: 'Policy Packs to execute during scan',
57 | }),
58 | }
59 |
--------------------------------------------------------------------------------
/src/utils/index.ts:
--------------------------------------------------------------------------------
1 | import { StorageEngineConnectionConfig } from '@cloudgraph/sdk'
2 | import boxen from 'boxen'
3 | import CFonts from 'cfonts'
4 | import chalk from 'chalk'
5 | import { exec } from 'child_process'
6 | import fs from 'fs'
7 | import glob from 'glob'
8 | import path from 'path'
9 | import detect from 'detect-port'
10 |
11 | import C, { DEFAULT_CONFIG, DGRAPH_CONTAINER_LABEL } from '../utils/constants'
12 |
13 | export const getKeyByValue = (
14 | object: Record,
15 | value: any
16 | ): string | undefined => {
17 | return Object.keys(object).find(key => object[key] === value)
18 | }
19 |
20 | export function moduleIsAvailable(modulePath: string): boolean {
21 | try {
22 | require.resolve(modulePath)
23 | return true
24 | } catch (error) {
25 | return false
26 | }
27 | }
28 |
29 | export function getProviderDataFile(
30 | dirPath: string,
31 | provider: string
32 | ): string | void {
33 | const fileGlob = `${dirPath}${provider}*.json`
34 | const fileArray = glob.sync(fileGlob)
35 | if (fileArray && fileArray.length > 0) {
36 | return fileArray[0]
37 | }
38 | }
39 |
40 | const mapFileNameToHumanReadable = (file: string): string => {
41 | const fileNameParts = file.split('/')
42 | const fileName = fileNameParts[fileNameParts.length - 1]
43 | const [providerName, timestamp] = fileName.replace('.json', '').split('_')
44 | return `${providerName} ${new Date(Number(timestamp)).toISOString()}`
45 | }
46 |
47 | // TODO: this could be refactored to go right to the correct version folder (avoid line 70)
48 | // if we extracted the version part of the url and passed to this func
49 | const findProviderFileLocation = (directory: string, file: string): string => {
50 | const [providerName, date] = file.trim().split(' ')
51 | const fileName = `${providerName}_${Date.parse(date)}`
52 | const fileGlob = path.join(directory, `/version-*/${fileName}.json`)
53 | const fileArray = glob.sync(fileGlob)
54 | if (fileArray && fileArray.length > 0) {
55 | return fileArray[0]
56 | }
57 | return ''
58 | }
59 |
60 | export function makeDirIfNotExists(dir: string): void {
61 | if (!fs.existsSync(dir)) {
62 | fs.mkdirSync(dir, { recursive: true })
63 | }
64 | }
65 |
66 | export function buildFixedPath(dir: string, provider?: string): string {
67 | const dirArray = provider?.replace(/\\/g, '/').includes('/')
68 | ? provider?.replace(/\\/g, '/').split('/')
69 | : []
70 | return path.normalize(
71 | `${dir}/${dirArray.slice(0, dirArray.length - 1).join('/')}`
72 | )
73 | }
74 |
75 | export function writeGraphqlSchemaToFile(
76 | dirPath: string,
77 | schema: string,
78 | provider?: string
79 | ): void {
80 | makeDirIfNotExists(buildFixedPath(dirPath, provider))
81 | fs.writeFileSync(
82 | path.normalize(
83 | path.join(
84 | dirPath,
85 | provider ? `/${provider}_schema.graphql` : '/schema.graphql'
86 | )
87 | ),
88 | schema
89 | )
90 | }
91 |
92 | export function printWelcomeMessage(): void {
93 | CFonts.say('Welcome to|CloudGraph!', {
94 | font: 'grid',
95 | colors: ['#666EE8', '#B8FFBD', '#B8FFBD'],
96 | lineHight: 3,
97 | align: 'center',
98 | })
99 | console.log(
100 | boxen(chalk.italic.green('By AutoCloud'), {
101 | borderColor: 'green',
102 | align: 'center',
103 | borderStyle: 'singleDouble',
104 | float: 'center',
105 | padding: 1,
106 | })
107 | )
108 | }
109 |
110 | export function printBoxMessage(msg: string): void {
111 | console.log(
112 | boxen(msg, {
113 | borderColor: 'green',
114 | })
115 | )
116 | }
117 |
118 | export function getVersionFolders(
119 | directory: string,
120 | provider?: string
121 | ): { name: string; ctime: Date }[] {
122 | const folderGlob = path.join(directory, '/version-*/')
123 | const folders = glob.sync(folderGlob)
124 | if (folders && folders.length > 0) {
125 | return folders
126 | .map((name: string) => ({ name, ctime: fs.statSync(name).ctime }))
127 | .filter(({ name }: { name: string }) => {
128 | if (provider) {
129 | const filesInFolder = glob.sync(`${name}**/*`)
130 | if (
131 | filesInFolder.find((val: string) =>
132 | val.includes(`${provider}_schema.graphql`)
133 | )
134 | ) {
135 | return true
136 | }
137 | return false
138 | }
139 | return true
140 | })
141 | .sort(
142 | (a: { name: string; ctime: Date }, b: { name: string; ctime: Date }) =>
143 | a.ctime.getTime() - b.ctime.getTime()
144 | )
145 | }
146 | return []
147 | }
148 |
149 | export function deleteFolder(dirPath: string): void {
150 | fs.rmSync(dirPath, { recursive: true })
151 | }
152 |
153 | export const sleep = (ms: number): Promise =>
154 | new Promise(resolve => setTimeout(resolve, ms))
155 |
156 | export const calculateBackoff = (n: number): number => {
157 | const temp = Math.min(
158 | C.BASE_BACKOFF_CONSTANT ** n + Math.random(),
159 | C.MAX_BACKOFF_DELAY
160 | )
161 | return (
162 | temp / C.BASE_BACKOFF_CONSTANT +
163 | Math.min(0, (Math.random() * temp) / C.BASE_BACKOFF_CONSTANT)
164 | )
165 | }
166 |
167 | export const getPort = (
168 | hostname: string,
169 | scheme: string,
170 | port?: string
171 | ): string => {
172 | if (hostname !== 'localhost' && !port) {
173 | switch (scheme) {
174 | case 'http':
175 | return '80'
176 | case 'https':
177 | return '443'
178 | default:
179 | return '80'
180 | }
181 | }
182 |
183 | if (port) {
184 | return port
185 | }
186 |
187 | return DEFAULT_CONFIG.port
188 | }
189 |
190 | export const getDefaultStorageEngineConnectionConfig =
191 | (): typeof DEFAULT_CONFIG => DEFAULT_CONFIG
192 |
193 | export const getDefaultEndpoint = (): string =>
194 | `${DEFAULT_CONFIG.scheme}://${DEFAULT_CONFIG.host}:${DEFAULT_CONFIG.port}`
195 |
196 | export const getStorageEngineConnectionConfig = (
197 | fullUrl: string = getDefaultEndpoint()
198 | ): StorageEngineConnectionConfig => {
199 | const { hostname: host, port, protocol } = new URL(fullUrl)
200 | const scheme = protocol.split(':')[0]
201 | return {
202 | host,
203 | port: getPort(host, protocol, port),
204 | scheme,
205 | }
206 | }
207 |
208 | export const execCommand = (cmd: string): Promise => {
209 | return new Promise((resolve, reject) => {
210 | exec(cmd, (error: any, stdout: any, stderr: any) => {
211 | if (error) {
212 | reject(error)
213 | }
214 | resolve(stdout || stderr)
215 | })
216 | })
217 | }
218 |
219 | export const findExistingDGraphContainerId = async (
220 | statusFilter: string
221 | ): Promise => {
222 | let result: string
223 | let stdout: any
224 | stdout = await execCommand(
225 | `docker ps --filter label=${DGRAPH_CONTAINER_LABEL} --filter status=${statusFilter} --quiet`
226 | )
227 | result = stdout.trim()
228 | if (!result) {
229 | stdout = await execCommand(
230 | `docker ps --filter name=dgraph --filter status=${statusFilter} --quiet`
231 | )
232 | result = stdout.trim()
233 | }
234 | return result
235 | }
236 |
237 | export const fileUtils = {
238 | mapFileNameToHumanReadable,
239 | makeDirIfNotExists,
240 | writeGraphqlSchemaToFile,
241 | getVersionFolders,
242 | findProviderFileLocation,
243 | getProviderDataFile,
244 | deleteFolder,
245 | }
246 |
247 | export const getNextPort = async (port: number): Promise => {
248 | const availablePort = await detect(port)
249 | return String(availablePort)
250 | }
251 |
--------------------------------------------------------------------------------
/src/utils/mutation.ts:
--------------------------------------------------------------------------------
1 | import { Entity } from '@cloudgraph/sdk'
2 | import { SchemaMap } from '../types'
3 |
4 | export function capitalizeString(input: string): string {
5 | return input.charAt(0).toUpperCase() + input.slice(1)
6 | }
7 |
8 | export const getResourceNameForMutationGenerator = (
9 | entity: Entity,
10 | schemaMap: SchemaMap | undefined
11 | ): string =>
12 | ((schemaMap && schemaMap[entity.name]) || entity.className) as string
13 |
14 | export function generateMutation({
15 | type,
16 | provider,
17 | entity,
18 | schemaMap,
19 | }: {
20 | type: 'add' | 'update'
21 | provider: string
22 | entity: Entity
23 | schemaMap: SchemaMap | undefined
24 | }): string {
25 | const service: string = getResourceNameForMutationGenerator(entity, schemaMap)
26 | const capitalizedType = capitalizeString(type)
27 | // cases: add(upsert), update(update)
28 | // input names are different for upsert and update
29 | const mutationVarName = type === 'add' ? '$input' : '$patch'
30 | const providerServiceString = service.includes(provider)
31 | ? service
32 | : `${provider}${capitalizeString(service)}`
33 | // We get the mutation type name for this mutation
34 | const mutationInputTypeName = `${capitalizedType}${providerServiceString}Input`
35 | // When upserting we insert several entities at a time
36 | // Whereas updating we update edges for one entity at a time
37 | const mutationInputType =
38 | type === 'add'
39 | ? `[${mutationInputTypeName}!]!`
40 | : `${mutationInputTypeName}!`
41 | // Add the upsert boolean if the mutation is an upsert
42 | const mutationAdditionalArgs = type === 'add' ? ',upsert:true' : ''
43 | // For upsert we tell the mutation to return the number of affected nodes
44 | // For update we pass the query that filter the node in order to update its edges
45 | const internalQuery =
46 | type === 'add' ? 'numUids' : `${providerServiceString}{id}`
47 | // And we put everything together
48 | // eslint-disable-next-line max-len
49 | const mutation = `mutation(${mutationVarName}:${mutationInputType}){${type}${providerServiceString}(input:${mutationVarName}${mutationAdditionalArgs}){${internalQuery}}}`
50 | return mutation
51 | }
52 |
53 | export const generateUpdateVarsObject = (
54 | service: any,
55 | connections: Record
56 | ): { filter: { id: { eq: string } }; set: Record } => ({
57 | filter: {
58 | id: { eq: service.id },
59 | },
60 | set: {
61 | ...connections,
62 | },
63 | })
64 |
--------------------------------------------------------------------------------
/src/utils/open.ts:
--------------------------------------------------------------------------------
1 | import childProcess, { ChildProcess } from 'child_process'
2 | import open from 'open'
3 |
4 | const openBrowser = (url: string): Promise => {
5 | // Skip opening browser while testing
6 | if (process.env.NODE_ENV === 'test') {
7 | return Promise.resolve()
8 | }
9 | if (process.platform === 'darwin') {
10 | // Will use the first open browser found from list
11 | const supportedChromiumBrowsers = [
12 | 'Google Chrome Canary',
13 | 'Google Chrome',
14 | 'Microsoft Edge',
15 | 'Brave Browser',
16 | 'Vivaldi',
17 | 'Chromium',
18 | ]
19 |
20 | for (const chromiumBrowser of supportedChromiumBrowsers) {
21 | try {
22 | // Find root directory
23 | const directory = __dirname.split('/').slice(0, -1)
24 | // Try our best to reuse existing tab
25 | // on OSX Chromium-based browser with AppleScript
26 | childProcess.execSync(`ps cax | grep "${chromiumBrowser}"`)
27 | childProcess.execSync(
28 | `osascript openChrome.applescript "${encodeURI(
29 | url
30 | )}" "${chromiumBrowser}"`,
31 | {
32 | cwd: `${directory.join('/')}/scripts/`,
33 | stdio: 'ignore',
34 | }
35 | )
36 | return new Promise(() => {
37 | // Keep the process active
38 | })
39 | } catch (error) {
40 | // Ignore commands errors.
41 | }
42 | }
43 | }
44 |
45 | // Fallback to open
46 | // (It will always open new tab)
47 | return open(url, { wait: true })
48 | }
49 |
50 | export default openBrowser
51 |
--------------------------------------------------------------------------------
/src/utils/questions.ts:
--------------------------------------------------------------------------------
1 | import { ConfirmQuestion, ListQuestion, InputQuestion } from 'inquirer'
2 | import { getDefaultEndpoint } from '.'
3 |
4 | export const overwriteQuestionPrompt = (
5 | category: string
6 | ): ConfirmQuestion[] => [
7 | {
8 | type: 'confirm',
9 | message: `Would you like to change ${category} config`,
10 | name: 'overwrite',
11 | default: true,
12 | },
13 | ]
14 |
15 | export const getProviderQuestion: ListQuestion[] = [
16 | {
17 | type: 'list',
18 | name: 'provider',
19 | message: 'Which cloud provider would you like to use?',
20 | choices: ['aws', 'azure', 'gcp', 'k8s', 'tencent'],
21 | },
22 | ]
23 |
24 | export const dGraphConfigQuestions: InputQuestion[] = [
25 | {
26 | type: 'input',
27 | message:
28 | 'Input your dgraph host url, if you are unsure, use the default by pressing ENTER',
29 | name: 'receivedUrl',
30 | default: getDefaultEndpoint(),
31 | },
32 | {
33 | type: 'input',
34 | message:
35 | 'Enter the maximum number of scanned versions of your cloud data that you would like to store',
36 | name: 'vLimit',
37 | default: 10,
38 | },
39 | ]
40 |
41 | export const queryEngineConfigQuestions: ListQuestion[] = [
42 | {
43 | type: 'list',
44 | message: 'What tool would you like to query your data with?',
45 | name: 'inputQueryEngine',
46 | choices: [
47 | {
48 | name: 'GraphQL Playground (https://github.com/graphql/graphql-playground)',
49 | value: 'playground',
50 | short: 'GraphQL Playground',
51 | },
52 | {
53 | name: 'Altair GraphQL Client (https://altair.sirmuel.design/)',
54 | value: 'altair',
55 | short: 'Altair GraphQL Client',
56 | },
57 | ],
58 | default: 'playground',
59 | },
60 | ]
61 |
--------------------------------------------------------------------------------
/test/commands/init.test.ts:
--------------------------------------------------------------------------------
1 | import inquirer from 'inquirer'
2 | import {
3 | getInitCommand,
4 | initTestSuite,
5 | flagTestHelper,
6 | initCommandArgvGetterMethodTester,
7 | initCommandPromptGetterMethodTester,
8 | initCommandNoOverwriteTester,
9 | runInitCommandTester,
10 | removeTestDirs,
11 | saveTestCloudGraphConfigFile,
12 | } from '../helpers'
13 | import {
14 | askForDGraphConfigFlagsMock,
15 | askForDGraphConfigPromptMock,
16 | askForQueryEngineConfigFlagsMock,
17 | askForQueryEngineConfigPromptMock,
18 | fetchCloudGraphConfigFlagsMock,
19 | fetchCloudGraphConfigPromptMock,
20 | getCloudGraphConfigFlagsMock,
21 | runInitCommandMock,
22 | testDGraphDirectory,
23 | testEndpoint,
24 | testQueryEngine,
25 | testQueryEnginePort,
26 | testStorageEngine,
27 | testVersionLimit,
28 | } from '../helpers/mocks'
29 | import InitCommandClass from '../../src/commands/init'
30 | import {
31 | dGraphConfigQuestions,
32 | overwriteQuestionPrompt,
33 | queryEngineConfigQuestions,
34 | } from '../../src/utils/questions'
35 | import { RunInitCommandTestType } from '../helpers/types'
36 |
37 | initTestSuite({ libsToMock: ['inquirer'] })
38 |
39 | describe('Init command', () => {
40 | let InitCommand: InitCommandClass
41 |
42 | describe('Configuration prompts initial check', () => {
43 | const inquirerPromptSpy = jest
44 | .spyOn(inquirer, 'prompt')
45 | .mockImplementation()
46 | beforeEach(async () => {
47 | InitCommand = await getInitCommand([''], 'aws')
48 | jest.clearAllMocks()
49 | })
50 | afterEach(() => {
51 | inquirerPromptSpy.mockReset()
52 | })
53 |
54 | it('should call promptForDGraphConfig', async () => {
55 | await InitCommand.interface.prompt(dGraphConfigQuestions)
56 | expect(inquirerPromptSpy).toHaveBeenCalledTimes(1)
57 | expect(inquirerPromptSpy).toHaveBeenCalledWith(dGraphConfigQuestions)
58 | })
59 | it('should call promptForQueryEngineConfig', async () => {
60 | await InitCommand.interface.prompt(queryEngineConfigQuestions)
61 | expect(inquirerPromptSpy).toHaveBeenCalledTimes(1)
62 | expect(inquirerPromptSpy).toHaveBeenCalledWith(queryEngineConfigQuestions)
63 | })
64 | it('should call promptForConfigOverwrite', async () => {
65 | await InitCommand.promptForConfigOverwrite('any')
66 | expect(inquirerPromptSpy).toHaveBeenCalledTimes(1)
67 | expect(inquirerPromptSpy).toHaveBeenCalledWith(
68 | overwriteQuestionPrompt('any')
69 | )
70 | })
71 | })
72 |
73 | describe('Configuration prompt answers parse check', () => {
74 | const inquirerPromptSpy = jest
75 | .spyOn(inquirer, 'prompt')
76 | .mockImplementation()
77 |
78 | describe('Overwrite = true', () => {
79 | beforeEach(() => {
80 | inquirerPromptSpy.mockReset()
81 | })
82 |
83 | it('should call askForDGraphConfig and return the desired configuration', async () => {
84 | await initCommandPromptGetterMethodTester(
85 | inquirerPromptSpy,
86 | askForDGraphConfigPromptMock(true)
87 | )
88 | })
89 |
90 | it('should call askForQueryEngineConfig and return the desired configuration', async () => {
91 | await initCommandPromptGetterMethodTester(
92 | inquirerPromptSpy,
93 | askForQueryEngineConfigPromptMock(true)
94 | )
95 | })
96 |
97 | it('should call fetchCloudGraphConfig and return the desired configuration(integration test)', async () => {
98 | InitCommand = await getInitCommand([''], 'aws')
99 | await saveTestCloudGraphConfigFile(InitCommand)
100 | await initCommandPromptGetterMethodTester(
101 | inquirerPromptSpy,
102 | fetchCloudGraphConfigPromptMock(true),
103 | true
104 | )
105 | })
106 | })
107 |
108 | describe('Overwrite = false', () => {
109 | beforeEach(() => {
110 | jest.clearAllMocks()
111 | inquirerPromptSpy.mockReset()
112 | })
113 |
114 | it('should call askForDGraphConfig and return the desired configuration', async () => {
115 | await initCommandNoOverwriteTester(askForDGraphConfigPromptMock(false))
116 | })
117 |
118 | it('should call askForQueryEngineConfig and return the desired configuration', async () => {
119 | await initCommandNoOverwriteTester(
120 | askForQueryEngineConfigPromptMock(false)
121 | )
122 | })
123 |
124 | it('should call fetchCloudGraphConfig and return the desired configuration(integration test)', async () => {
125 | inquirerPromptSpy.mockResolvedValueOnce({ overwrite: false })
126 | await initCommandNoOverwriteTester(
127 | fetchCloudGraphConfigPromptMock(false)
128 | )
129 | })
130 | })
131 | })
132 |
133 | describe('Configuration flags parse check (Base class)', () => {
134 | beforeEach(() => {
135 | jest.clearAllMocks()
136 | })
137 | it('should set the "--dgraph" flag correctly', async () => {
138 | await flagTestHelper('dgraph', testEndpoint)
139 | })
140 | it('should set the "--version-limit" flag correctly', async () => {
141 | await flagTestHelper('version-limit', testVersionLimit)
142 | })
143 | it('should set the "--port"(queryEngine port) flag correctly', async () => {
144 | await flagTestHelper('port', testQueryEnginePort)
145 | })
146 | it('should set the "--query-engine" flag correctly', async () => {
147 | await flagTestHelper('query-engine', testQueryEngine)
148 | })
149 | it('should set the "--storage" flag correctly', async () => {
150 | await flagTestHelper('storage', testStorageEngine)
151 | })
152 | it('should set the "--dev" flag correctly', async () => {
153 | await flagTestHelper('dev', undefined)
154 | })
155 | it('should set the "--directory" flag correctly', async () => {
156 | await flagTestHelper('directory', testDGraphDirectory)
157 | })
158 | it('should set the "--no-serve" flag correctly', async () => {
159 | await flagTestHelper('no-serve', undefined)
160 | })
161 | })
162 |
163 | describe('Configuration flags parse check (Init class) (overwrite = true)', () => {
164 | describe('Overwrite = true', () => {
165 | beforeEach(() => {
166 | jest.clearAllMocks()
167 | })
168 | it('should call askForDGraphConfig and get parsed args', async () => {
169 | await initCommandArgvGetterMethodTester(
170 | askForDGraphConfigFlagsMock(true)
171 | )
172 | })
173 | it('should call askForQueryEngineConfig and get parsed args', async () => {
174 | await initCommandArgvGetterMethodTester(
175 | askForQueryEngineConfigFlagsMock(true)
176 | )
177 | })
178 | it('should call getCloudGraphConfig and get parsed args(full config)', async () => {
179 | await initCommandArgvGetterMethodTester(
180 | getCloudGraphConfigFlagsMock(true)
181 | )
182 | })
183 | it('should call fetchCloudGraphConfig and get parsed args(integration test)', async () => {
184 | await initCommandArgvGetterMethodTester(
185 | fetchCloudGraphConfigFlagsMock(true)
186 | )
187 | })
188 | })
189 | describe('Overwrite = false', () => {
190 | const inquirerPromptSpy = jest
191 | .spyOn(inquirer, 'prompt')
192 | .mockImplementation()
193 | beforeEach(() => {
194 | jest.clearAllMocks()
195 | inquirerPromptSpy.mockReset()
196 | })
197 |
198 | it('should call askForDGraphConfig and get parsed args', async () => {
199 | await initCommandNoOverwriteTester(askForDGraphConfigFlagsMock(false))
200 | })
201 | it('should call askForQueryEngineConfig and get parsed args', async () => {
202 | await initCommandNoOverwriteTester(
203 | askForQueryEngineConfigFlagsMock(false)
204 | )
205 | })
206 | it('should call getCloudGraphConfig and get parsed args(full config)', async () => {
207 | await initCommandNoOverwriteTester(getCloudGraphConfigFlagsMock(false))
208 | })
209 | it('should call fetchCloudGraphConfig and get parsed args(integration test)', async () => {
210 | inquirerPromptSpy.mockResolvedValueOnce({ overwrite: false })
211 | await initCommandNoOverwriteTester(
212 | fetchCloudGraphConfigFlagsMock(false)
213 | )
214 | })
215 | })
216 | })
217 |
218 | describe('Command test(Full integration test)', () => {
219 | const inquirerPromptSpy = jest
220 | .spyOn(inquirer, 'prompt')
221 | .mockImplementation()
222 | beforeEach(() => {
223 | jest.clearAllMocks()
224 | inquirerPromptSpy.mockReset()
225 | })
226 | afterAll(() => {
227 | removeTestDirs()
228 | })
229 |
230 | describe('When a config file already exist', () => {
231 | beforeAll(async () => {
232 | InitCommand = await getInitCommand([''], 'aws')
233 | await saveTestCloudGraphConfigFile(InitCommand)
234 | })
235 | it('should run command skipping config overwrite and check that the config file is unchanged', async () => {
236 | await runInitCommandTester(
237 | inquirerPromptSpy,
238 | runInitCommandMock(
239 | {
240 | overwriteProviderConfig: false,
241 | overwriteCloudGraphConfig: false,
242 | },
243 | RunInitCommandTestType.prompt
244 | )
245 | )
246 | })
247 | it('should run command with mocked config prompts and check that the config file is correct', async () => {
248 | await runInitCommandTester(
249 | inquirerPromptSpy,
250 | runInitCommandMock(
251 | {
252 | overwriteProviderConfig: false,
253 | overwriteCloudGraphConfig: true,
254 | },
255 | RunInitCommandTestType.prompt
256 | )
257 | )
258 | })
259 | it('should run command with mocked config flags and check that the config file is correct', async () => {
260 | await runInitCommandTester(
261 | inquirerPromptSpy,
262 | runInitCommandMock(
263 | {
264 | overwriteProviderConfig: false,
265 | overwriteCloudGraphConfig: true,
266 | },
267 | RunInitCommandTestType.flags
268 | )
269 | )
270 | })
271 | })
272 |
273 | describe('First run (no config file)', () => {
274 | beforeEach(() => {
275 | jest.clearAllMocks()
276 | inquirerPromptSpy.mockReset()
277 | })
278 |
279 | it('should run command with mocked config prompts and check that the config file is correct', async () => {
280 | await runInitCommandTester(
281 | inquirerPromptSpy,
282 | runInitCommandMock(
283 | {
284 | overwriteProviderConfig: false,
285 | overwriteCloudGraphConfig: true,
286 | },
287 | RunInitCommandTestType.prompt
288 | ),
289 | true
290 | )
291 | })
292 | it('should run command with mocked config flags and check that the config file is correct', async () => {
293 | await runInitCommandTester(
294 | inquirerPromptSpy,
295 | runInitCommandMock(
296 | {
297 | overwriteProviderConfig: false,
298 | overwriteCloudGraphConfig: true,
299 | },
300 | RunInitCommandTestType.flags
301 | ),
302 | true
303 | )
304 | })
305 | })
306 |
307 | // TODO: Add test with overwriteProviderConfig when tests for BaseCommand and Manager classes are added
308 | })
309 | })
310 |
--------------------------------------------------------------------------------
/test/commands/launch.test.ts:
--------------------------------------------------------------------------------
1 | import { test } from '@oclif/test'
2 | import {
3 | getInitCommand,
4 | initTestSuite,
5 | removeTestDirs,
6 | saveTestCloudGraphConfigFile,
7 | stopDgraphContainer,
8 | } from '../helpers'
9 | import { rootDir as root } from '../helpers/mocks'
10 |
11 | initTestSuite()
12 |
13 | describe('Launch command with docker installed', () => {
14 | beforeAll(async () => {
15 | await saveTestCloudGraphConfigFile(await getInitCommand([''], 'aws'))
16 | await stopDgraphContainer(true)
17 | })
18 |
19 | test
20 | .loadConfig({ root })
21 | .stdout()
22 | .command(['launch'])
23 | .do(ctx => expect(ctx.config.valid).toBe(true))
24 | .it('should run launch and spin up new container', (ctx: any, done) => {
25 | done()
26 | })
27 |
28 | test
29 | .loadConfig({ root })
30 | .stdout()
31 | .command(['launch'])
32 | .do(ctx => expect(ctx.config.valid).toBe(true))
33 | .it(
34 | 'should run launch and use existing(running) container',
35 | async (ctx: any, done) => {
36 | // Prepare for next test
37 | await stopDgraphContainer()
38 | done()
39 | }
40 | )
41 |
42 | test
43 | .loadConfig({ root })
44 | .stdout()
45 | .command(['launch'])
46 | .do(ctx => expect(ctx.config.valid).toBe(true))
47 | .it(
48 | 'should run launch and use existing(stopped) container',
49 | async (ctx: any, done) => {
50 | done()
51 | }
52 | )
53 |
54 | afterAll(async () => {
55 | await stopDgraphContainer(true)
56 | removeTestDirs()
57 | })
58 | })
59 |
--------------------------------------------------------------------------------
/test/commands/serve.test.ts:
--------------------------------------------------------------------------------
1 | import { test } from '@oclif/test'
2 | import axios from 'axios'
3 | import ServeCommandClass from '../../src/commands/serve'
4 | import {
5 | getInitCommand,
6 | getQueryEngineEndpoint,
7 | getServeCommand,
8 | initDgraphContainer,
9 | initTestSuite,
10 | removeTestDirs,
11 | saveTestCloudGraphConfigFile,
12 | stopDgraphContainer,
13 | } from '../helpers'
14 | import { rootDir as root, testQueryEnginePort } from '../helpers/mocks'
15 |
16 | initTestSuite()
17 |
18 | describe('Serve command with DGraph container stopped', () => {
19 | beforeAll(async () => {
20 | await stopDgraphContainer(true)
21 | await saveTestCloudGraphConfigFile(await getInitCommand([`--port=${testQueryEnginePort}`], 'aws'))
22 | })
23 | afterAll(() => {
24 | removeTestDirs()
25 | })
26 |
27 | test
28 | .loadConfig({ root })
29 | .stdout()
30 | .command(['serve'])
31 | .catch(error => {
32 | expect(error.message).toMatch(/FAILED canceling SERVE/)
33 | })
34 | .it('should try to start query server and fail')
35 | })
36 |
37 | describe('Serve command with DGraph container running', () => {
38 | let serveCommand: ServeCommandClass
39 | beforeAll(async () => {
40 | await saveTestCloudGraphConfigFile(await getInitCommand([`--port=${testQueryEnginePort}`], 'aws'), true)
41 | await initDgraphContainer()
42 | serveCommand = await getServeCommand([`--port=${testQueryEnginePort}`])
43 | })
44 | afterAll(async () => {
45 | await stopDgraphContainer(true)
46 | removeTestDirs()
47 | })
48 |
49 | it('should start query server and check if queryEngine endpoint is up', async () => {
50 | await serveCommand.run()
51 | const res = await axios({
52 | method: 'get',
53 | baseURL: getQueryEngineEndpoint(),
54 | })
55 | expect(res.status).toBe(200)
56 | expect(res.headers['content-type']).toBe('text/html')
57 | expect(res.headers['x-powered-by']).toBe('Express')
58 | })
59 | })
60 |
--------------------------------------------------------------------------------
/test/commands/teardown.test.ts:
--------------------------------------------------------------------------------
1 | import { test } from '@oclif/test'
2 | import {
3 | getInitCommand,
4 | initDgraphContainer,
5 | initTestSuite,
6 | removeTestDirs,
7 | saveTestCloudGraphConfigFile,
8 | stopDgraphContainer,
9 | } from '../helpers'
10 | import { rootDir as root } from '../helpers/mocks'
11 |
12 | initTestSuite()
13 |
14 | describe('Teardown command with docker installed', () => {
15 | beforeAll(async () => {
16 | await saveTestCloudGraphConfigFile(await getInitCommand([''], 'aws'))
17 | await initDgraphContainer()
18 | })
19 |
20 | test
21 | .loadConfig({ root })
22 | .stdout()
23 | .command(['teardown'])
24 | .do(ctx => expect(ctx.config.valid).toBe(true))
25 | .it('should run teardown and stop dgraph container', async (ctx: any, done) => {
26 | // Prepare for next test
27 | await stopDgraphContainer()
28 | done()
29 | })
30 |
31 | test
32 | .loadConfig({ root })
33 | .stdout()
34 | .command(['teardown'])
35 | .do(ctx => expect(ctx.config.valid).toBe(true))
36 | .it(
37 | 'should run teardown and find an already stopped container',
38 | async (ctx: any, done) => {
39 | // Prepare for next test
40 | done()
41 | }
42 | )
43 |
44 | test
45 | .loadConfig({ root })
46 | .stdout()
47 | .command(['teardown', '--delete-image'])
48 | .do(ctx => expect(ctx.config.valid).toBe(true))
49 | .it(
50 | 'should run teardown, find a stopped container and remove it',
51 | async (ctx: any, done) => {
52 | done()
53 | }
54 | )
55 |
56 |
57 | test
58 | .loadConfig({ root })
59 | .stdout()
60 | .command(['teardown'])
61 | .do(ctx => expect(ctx.config.valid).toBe(true))
62 | .it(
63 | 'should run teardown and say that it didnt find any containers to stop',
64 | async (ctx: any, done) => {
65 | // Prepare for next test
66 | await initDgraphContainer()
67 | done()
68 | }
69 | )
70 |
71 | test
72 | .loadConfig({ root })
73 | .stdout()
74 | .command(['teardown', '--delete-image'])
75 | .do(ctx => expect(ctx.config.valid).toBe(true))
76 | .it(
77 | 'should run teardown, stop and remove container',
78 | async (ctx: any, done) => {
79 | done()
80 | }
81 | )
82 |
83 | afterAll(async () => {
84 | await stopDgraphContainer(true)
85 | removeTestDirs()
86 | })
87 | })
88 |
--------------------------------------------------------------------------------
/test/helpers/index.ts:
--------------------------------------------------------------------------------
1 | import { existsSync, rmdirSync, unlinkSync } from 'fs'
2 | import * as path from 'path'
3 | import CloudGraph from '@cloudgraph/sdk'
4 | import {
5 | Config as ConfigCommandClass,
6 | Parser as oclifParser,
7 | Interfaces,
8 | } from '@oclif/core'
9 | import { Config } from 'cosmiconfig/dist/types'
10 | import { cosmiconfigSync } from 'cosmiconfig'
11 |
12 | import InitCommandClass from '../../src/commands/init'
13 | import LaunchCommandClass from '../../src/commands/launch'
14 | import ServeCommandClass from '../../src/commands/serve'
15 | import {
16 | execCommand,
17 | fileUtils,
18 | findExistingDGraphContainerId,
19 | } from '../../src/utils'
20 | import {
21 | DGRAPH_CONTAINER_LABEL,
22 | DGRAPH_DOCKER_IMAGE_NAME,
23 | } from '../../src/utils/constants'
24 | import {
25 | configFileMock,
26 | rootDir as root,
27 | rootTestConfigDir,
28 | testConfigDir as configDir,
29 | testDataDir as dataDir,
30 | testDGraphDirectory,
31 | testQueryEngine,
32 | testQueryEnginePort,
33 | testStorageConfig,
34 | } from './mocks'
35 | import {
36 | MockInitCmdPromptExpectation,
37 | MockInitCmdFlagsExpectation,
38 | MockRunInitCmdPromptExpectation,
39 | } from './types'
40 |
41 | const { Client: Provider } = CloudGraph
42 |
43 | const ConfigCommand = new ConfigCommandClass({ root })
44 | const LaunchCommand = new LaunchCommandClass([''], { root })
45 | const ServeCommand = new ServeCommandClass([''], { root })
46 |
47 | export const { logger } = CloudGraph
48 |
49 | export const setConfigCommand = async (): Promise<{ version: string }> => {
50 | await ConfigCommand.load()
51 | const { version } = ConfigCommand
52 | ConfigCommand.configDir = configDir
53 | ConfigCommand.dataDir = dataDir
54 | return { version }
55 | }
56 |
57 | export const getInitCommand = async (
58 | argv: any = [''],
59 | providerName = ''
60 | ): Promise => {
61 | const { version } = await setConfigCommand()
62 | // Mock the provider module
63 | InitCommandClass.prototype.getProviderClient = jest.fn(async function () {
64 | return { client: Provider, schemaMap: {} }
65 | })
66 | InitCommandClass.prototype.checkProviderConfig = jest.fn(async function () {
67 | return configFileMock.aws as any
68 | })
69 | InitCommandClass.prototype.getProvider = jest.fn(async function () {
70 | return providerName
71 | })
72 | InitCommandClass.prototype.providers = {}
73 | InitCommandClass.prototype.providers.aws = jest.fn()
74 | const initCommand = new InitCommandClass(argv, { root })
75 | initCommand.config.configDir = configDir
76 | initCommand.config.dataDir = dataDir
77 | initCommand.config.version = version
78 | return initCommand
79 | }
80 |
81 | export const getServeCommand = async (
82 | argv: any = ['']
83 | ): Promise => {
84 | const { version } = await setConfigCommand()
85 | const serveCommand = new ServeCommandClass(argv, { root })
86 | serveCommand.config.configDir = configDir
87 | serveCommand.config.dataDir = dataDir
88 | serveCommand.config.version = version
89 | return serveCommand
90 | }
91 |
92 | export const removeTestDirs = (): void => {
93 | rmdirSync(rootTestConfigDir, { recursive: true })
94 | // rmdirSync(dataDir, { recursive: true })
95 | }
96 |
97 | export const getConfigFile = (debug = false): Config => {
98 | debug && logger.debug(`getting configDir: ${configDir}`)
99 | const file = cosmiconfigSync('cloud-graph').load(
100 | path.join(configDir, '.cloud-graphrc.json')
101 | )
102 | return file?.config
103 | }
104 |
105 | export const saveTestCloudGraphConfigFile = async (
106 | InitCommand: InitCommandClass,
107 | debug = false
108 | ): Promise => {
109 | debug && logger.debug(`saving test config: ${configDir}`)
110 | InitCommand.saveCloudGraphConfigFile(configFileMock)
111 | }
112 |
113 | export const parseArgv = async (
114 | InitClass: InitCommandClass
115 | ): Promise<
116 | Interfaces.ParserOutput<
117 | any,
118 | {
119 | [name: string]: string
120 | }
121 | >
122 | > => {
123 | return oclifParser.parse(
124 | InitClass.argv,
125 | InitClass.ctor as Interfaces.Input
126 | )
127 | }
128 |
129 | export const flagTestHelper = async (
130 | flag: string,
131 | flagInput: string | number | undefined,
132 | debug = false
133 | ): Promise => {
134 | debug && logger.debug('******')
135 | debug && logger.debug(`Flag '--${flag}' Test(helper)`)
136 | const flagInputConcatString = flagInput ? `=${flagInput}` : ''
137 | const flagEntry = `--${flag}${flagInputConcatString}`
138 | debug && logger.debug(`Full flag arg to test: '${flagEntry}'`)
139 | const Init = await getInitCommand([flagEntry])
140 | const { flags } = await parseArgv(Init)
141 | debug && logger.debug(`Parsed result: ${flags[flag]}`)
142 | expect(flags[flag]).toBe(flagInput ?? true)
143 | }
144 |
145 | export const initCommandPromptGetterMethodTester = async (
146 | spyFn: jest.SpyInstance,
147 | mock: MockInitCmdPromptExpectation,
148 | debug = false
149 | ): Promise => {
150 | debug && logger.debug('******')
151 | debug && logger.debug(`Test ${mock.methodToTest}`)
152 | debug && logger.debug(`overwriteFlag: ${JSON.stringify(mock.overwriteFlag)}`)
153 | debug &&
154 | logger.debug(`promptExpectation: ${JSON.stringify(mock.promptExpectation)}`)
155 | mock.promptExpectation.forEach(expectation =>
156 | spyFn.mockResolvedValueOnce(expectation)
157 | )
158 | const InitCommand = await getInitCommand([''], 'aws')
159 | const cloudGraphConfig = InitCommand.getCGConfig('cloudGraph')
160 | debug &&
161 | logger.debug(
162 | `Config Dir -> ${JSON.stringify(InitCommand.config.configDir)}`
163 | )
164 | debug && logger.debug(`Config -> ${JSON.stringify(cloudGraphConfig)}`)
165 | const response = await InitCommand[mock.methodToTest](mock.overwriteFlag)
166 | debug && logger.debug(`response: ${JSON.stringify(response)}`)
167 | expect(response).toMatchObject(mock.expectedResult)
168 | debug &&
169 | logger.debug(`expectedResult: ${JSON.stringify(mock.expectedResult)}`)
170 | }
171 |
172 | export const initCommandNoOverwriteTester = async (
173 | mock: MockInitCmdPromptExpectation | MockInitCmdFlagsExpectation,
174 | debug = false
175 | ): Promise => {
176 | debug && logger.debug('******')
177 | debug && logger.debug(`Test ${mock.methodToTest}`)
178 | const InitCommand = await getInitCommand([''], 'aws')
179 | await saveTestCloudGraphConfigFile(InitCommand, debug)
180 | const response = await InitCommand[mock.methodToTest](mock.overwriteFlag)
181 | debug && logger.debug(`response: ${JSON.stringify(response)}`)
182 | debug &&
183 | logger.debug(`expectedResult: ${JSON.stringify(mock.expectedResult)}`)
184 | expect(response).toMatchObject(mock.expectedResult)
185 | }
186 |
187 | export const initCommandArgvGetterMethodTester = async (
188 | mock: MockInitCmdFlagsExpectation,
189 | debug = false
190 | ): Promise => {
191 | debug && logger.debug('******')
192 | debug && logger.debug(`Test ${mock.methodToTest}`)
193 | debug && logger.debug(`argvList: ${JSON.stringify(mock.argvList)}`)
194 | const Init = await getInitCommand(mock.argvList, 'aws')
195 | const response = await Init[mock.methodToTest](mock.overwriteFlag)
196 | debug && logger.debug(`response: ${JSON.stringify(response)}`)
197 | expect(response).toMatchObject(mock.expectedResult)
198 | debug &&
199 | logger.debug(`expectedResult: ${JSON.stringify(mock.expectedResult)}`)
200 | }
201 |
202 | export const removeConfigFile = (debug = false): void => {
203 | const filePath = path.join(configDir, '.cloud-graphrc.json')
204 | debug && logger.debug(`removing configFile: ${filePath}`)
205 | if (existsSync(filePath)) {
206 | unlinkSync(filePath)
207 | }
208 | }
209 |
210 | export const runInitCommandTester = async (
211 | spyFn: jest.SpyInstance,
212 | mock: MockRunInitCmdPromptExpectation,
213 | removeConfig = false,
214 | debug = false
215 | ): Promise => {
216 | debug && logger.debug('******')
217 | debug && logger.debug('Test InitCommand.run()')
218 | debug && logger.debug(`argvList: ${JSON.stringify(mock.argvList)}`)
219 | const InitCommand = await getInitCommand(mock.argvList, 'aws')
220 | if (removeConfig) {
221 | removeConfigFile(debug)
222 | // remove all overwrite prompt mocks
223 | mock.promptExpectation = mock.promptExpectation.filter(
224 | i => !('overwrite' in i)
225 | )
226 | }
227 | debug && logger.debug(`mock: ${JSON.stringify(mock)}`)
228 | const originalConfigFile = !removeConfig ? getConfigFile(debug) : {}
229 | mock.promptExpectation.forEach(expectation =>
230 | spyFn.mockResolvedValueOnce(expectation)
231 | )
232 | await InitCommand.run()
233 | const newConfigFile = getConfigFile(debug)
234 | if (
235 | !mock.overwriteFlags.overwriteProviderConfig &&
236 | !mock.overwriteFlags.overwriteCloudGraphConfig
237 | ) {
238 | // Test config is unchanged
239 | expect(newConfigFile).toMatchObject(originalConfigFile)
240 | }
241 | if (
242 | !mock.overwriteFlags.overwriteProviderConfig &&
243 | mock.overwriteFlags.overwriteCloudGraphConfig
244 | ) {
245 | // Test config for cloudgraph configuration changes
246 | originalConfigFile.cloudGraph = mock.expectedResult
247 | expect(newConfigFile).toMatchObject(originalConfigFile)
248 | }
249 | }
250 |
251 | export const initTestSuite = (
252 | args: {
253 | libsToMock?: string[]
254 | } = {}
255 | ): void => {
256 | const { libsToMock } = args
257 | libsToMock?.forEach(lib => jest.mock(lib))
258 | jest.setTimeout(300000)
259 | }
260 |
261 | export const stopDgraphContainer = async (
262 | rmContainer = false
263 | ): Promise => {
264 | try {
265 | let containerToRemove: undefined | string
266 | const runningContainerId = await findExistingDGraphContainerId('running')
267 | if (runningContainerId) {
268 | logger.debug(
269 | `Stopping ${rmContainer ? 'and deleting' : ''} ${runningContainerId}`
270 | )
271 | await execCommand(`docker stop ${runningContainerId}`)
272 | logger.debug(`${runningContainerId} stopped successfully`)
273 | containerToRemove = runningContainerId
274 | } else {
275 | const exitedContainerId = await findExistingDGraphContainerId('exited')
276 | if (exitedContainerId) {
277 | containerToRemove = exitedContainerId
278 | }
279 | }
280 | if (rmContainer && containerToRemove) {
281 | await execCommand(`docker rm ${containerToRemove}`)
282 | logger.debug(`${containerToRemove} removed successfully`)
283 | }
284 | } catch (error) {
285 | logger.debug('Error while stopping dgraph container')
286 | logger.debug(error)
287 | }
288 | }
289 |
290 | export const initDgraphContainer = async (): Promise => {
291 | try {
292 | await ConfigCommand.load()
293 | logger.debug(ConfigCommand.dataDir)
294 | await stopDgraphContainer(true)
295 | fileUtils.makeDirIfNotExists(path.join(dataDir, testDGraphDirectory))
296 | await execCommand(
297 | `docker run -d -p 8995:5080 -p 8996:6080 -p ${testStorageConfig.port}:8080 -p 8998:9080 -p 8999:8000 --label ${
298 | DGRAPH_CONTAINER_LABEL
299 | } -v ${dataDir}${testDGraphDirectory}:/dgraph --name dgraph ${DGRAPH_DOCKER_IMAGE_NAME}`
300 | )
301 | logger.debug('DGraph instance started!')
302 | await LaunchCommand.checkIfInstanceIsRunningReportStatus()
303 | } catch (error) {
304 | logger.debug('Error while starting dgraph container')
305 | logger.debug(error)
306 | }
307 | }
308 |
309 | export const getQueryEngineEndpoint = (): string => {
310 | ServeCommand.config.configDir = configDir
311 | return `http://localhost:${testQueryEnginePort}/${
312 | ServeCommand.getCGConfigKey('queryEngine') ?? testQueryEngine
313 | }`
314 | }
315 |
--------------------------------------------------------------------------------
/test/helpers/mocks.ts:
--------------------------------------------------------------------------------
1 | import { Entity } from '@cloudgraph/sdk'
2 | import path from 'path'
3 | import {
4 | MockInitCmdFlagsExpectation,
5 | InitCommandEnums,
6 | MockInitCmdPromptExpectation,
7 | MockRunInitCmdPromptExpectation,
8 | RunInitCommandTestType,
9 | } from './types'
10 |
11 | export const rootDir = path.join(__dirname, '../../')
12 | export const rootTestConfigDir = path.join(__dirname, '../../test/.testConfig')
13 | export const testConfigDir = path.join(
14 | __dirname,
15 | '../../test/.testConfig/cloudgraph'
16 | )
17 | export const testDataDir = path.join(__dirname, '../../test/.testData')
18 | export const testEndpoint = 'http://localhost:8888'
19 | export const testVersionLimit = '20'
20 | export const testQueryEnginePort = 4444
21 | export const testQueryEngine = 'playground'
22 | export const testStorageEngine = 'dgraph'
23 | export const testDGraphDirectory = '/dgraph'
24 | export const promptForDGraphConfigMockedExpectation = {
25 | receivedUrl: testEndpoint,
26 | vLimit: testVersionLimit,
27 | }
28 | export const promptForQueryEngineConfigMockedExpectation = {
29 | inputQueryEngine: testQueryEngine,
30 | }
31 |
32 | const { hostname, port, protocol } = new URL(testEndpoint)
33 | export const testStorageConfig = {
34 | host: hostname,
35 | port,
36 | scheme: protocol.split(':')[0],
37 | }
38 |
39 | export const configFileMock = {
40 | aws: {
41 | profileApprovedList: ['default'],
42 | regions: 'us-east-1',
43 | resources: 'alb',
44 | },
45 | cloudGraph: {
46 | storageConfig: testStorageConfig,
47 | versionLimit: testVersionLimit,
48 | queryEngine: testQueryEngine,
49 | port: testQueryEnginePort,
50 | },
51 | }
52 |
53 | export const askForDGraphConfigFlagsMock = (
54 | overwriteFlag: boolean
55 | ): MockInitCmdFlagsExpectation => ({
56 | argvList: [`--dgraph=${testEndpoint}`, `--version-limit=${testVersionLimit}`],
57 | methodToTest: InitCommandEnums.askForDGraphConfig,
58 | overwriteFlag,
59 | expectedResult: {
60 | storageConfig: testStorageConfig,
61 | versionLimit: testVersionLimit,
62 | },
63 | })
64 |
65 | export const askForQueryEngineConfigFlagsMock = (
66 | overwriteFlag: boolean
67 | ): MockInitCmdFlagsExpectation => ({
68 | argvList: [`--query-engine=${testQueryEngine}`],
69 | methodToTest: InitCommandEnums.askForQueryEngineConfig,
70 | overwriteFlag,
71 | expectedResult: {
72 | queryEngine: testQueryEngine,
73 | },
74 | })
75 |
76 | export const getCloudGraphConfigFlagsMock = (
77 | overwriteFlag: boolean
78 | ): MockInitCmdFlagsExpectation => ({
79 | argvList: [
80 | ...askForDGraphConfigFlagsMock(overwriteFlag).argvList,
81 | ...askForQueryEngineConfigFlagsMock(overwriteFlag).argvList,
82 | ],
83 | methodToTest: InitCommandEnums.getCloudGraphConfig,
84 | overwriteFlag,
85 | expectedResult: {
86 | ...askForDGraphConfigFlagsMock(overwriteFlag).expectedResult,
87 | ...askForQueryEngineConfigFlagsMock(overwriteFlag).expectedResult,
88 | },
89 | })
90 |
91 | export const askForDGraphConfigPromptMock = (
92 | overwriteFlag: boolean
93 | ): MockInitCmdPromptExpectation => ({
94 | methodToTest: InitCommandEnums.askForDGraphConfig,
95 | overwriteFlag,
96 | promptExpectation: overwriteFlag
97 | ? [
98 | {
99 | receivedUrl: testEndpoint,
100 | vLimit: testVersionLimit,
101 | },
102 | ]
103 | : [],
104 | expectedResult: askForDGraphConfigFlagsMock(overwriteFlag).expectedResult,
105 | })
106 |
107 | export const askForQueryEngineConfigPromptMock = (
108 | overwriteFlag: boolean
109 | ): MockInitCmdPromptExpectation => ({
110 | methodToTest: InitCommandEnums.askForQueryEngineConfig,
111 | overwriteFlag,
112 | promptExpectation: overwriteFlag
113 | ? [
114 | {
115 | inputQueryEngine: testQueryEngine,
116 | },
117 | ]
118 | : [],
119 | expectedResult:
120 | askForQueryEngineConfigFlagsMock(overwriteFlag).expectedResult,
121 | })
122 |
123 | export const fetchCloudGraphConfigFlagsMock = (
124 | overwriteFlag: boolean
125 | ): MockInitCmdFlagsExpectation => ({
126 | argvList: [
127 | ...askForDGraphConfigFlagsMock(overwriteFlag).argvList,
128 | ...askForQueryEngineConfigFlagsMock(overwriteFlag).argvList,
129 | ],
130 | methodToTest: InitCommandEnums.getCloudGraphConfig,
131 | overwriteFlag,
132 | expectedResult: {
133 | ...askForDGraphConfigFlagsMock(overwriteFlag).expectedResult,
134 | ...askForQueryEngineConfigFlagsMock(overwriteFlag).expectedResult,
135 | },
136 | })
137 |
138 | export const fetchCloudGraphConfigPromptMock = (
139 | overwriteFlag: boolean
140 | ): MockInitCmdPromptExpectation => ({
141 | methodToTest: InitCommandEnums.fetchCloudGraphConfig,
142 | overwriteFlag,
143 | promptExpectation: [
144 | { overwrite: overwriteFlag },
145 | ...askForDGraphConfigPromptMock(overwriteFlag).promptExpectation,
146 | ...askForQueryEngineConfigPromptMock(overwriteFlag).promptExpectation,
147 | ],
148 | expectedResult: {
149 | ...askForDGraphConfigPromptMock(overwriteFlag).expectedResult,
150 | ...askForQueryEngineConfigPromptMock(overwriteFlag).expectedResult,
151 | },
152 | })
153 |
154 | export const runInitCommandMock = (
155 | overwriteFlags: MockRunInitCmdPromptExpectation['overwriteFlags'],
156 | test: RunInitCommandTestType
157 | ): MockRunInitCmdPromptExpectation => ({
158 | argvList:
159 | test === RunInitCommandTestType.flags
160 | ? fetchCloudGraphConfigFlagsMock(overwriteFlags.overwriteCloudGraphConfig)
161 | .argvList
162 | : [],
163 | overwriteFlags,
164 | promptExpectation: [
165 | { overwrite: overwriteFlags.overwriteProviderConfig }, // First prompt for provider config overwrite
166 | ...fetchCloudGraphConfigPromptMock(overwriteFlags.overwriteCloudGraphConfig)
167 | .promptExpectation,
168 | ],
169 | expectedResult: {
170 | ...(test === RunInitCommandTestType.flags
171 | ? fetchCloudGraphConfigFlagsMock(overwriteFlags.overwriteCloudGraphConfig)
172 | .expectedResult
173 | : {}),
174 | ...(test === RunInitCommandTestType.prompt
175 | ? fetchCloudGraphConfigPromptMock(
176 | overwriteFlags.overwriteCloudGraphConfig
177 | ).expectedResult
178 | : {}),
179 | },
180 | })
181 |
182 | export const addmutationMock =
183 | (): string =>
184 | 'mutation($input:[AddawsApiGatewayResourceInput!]!){addawsApiGatewayResource(input:$input,upsert:true){numUids}}'
185 |
186 | export const updatemutationMock =
187 | (): string =>
188 | 'mutation($patch:UpdateawsApiGatewayResourceInput!){updateawsApiGatewayResource(input:$patch){awsApiGatewayResource{id}}}'
189 |
190 | export const addmutationMockUsingClassname =
191 | (): string =>
192 | 'mutation($input:[AddawsAPIGatewayResourceInput!]!){addawsAPIGatewayResource(input:$input,upsert:true){numUids}}'
193 |
194 | export const mockEntityForMutationTest = {
195 | className: 'APIGatewayResource',
196 | name: 'apiGatewayResource',
197 | mutation: 'mutation($input:[AddawsApiGatewayResourceInput!]!){addawsApiGatewayResource(input:$input,upsert:true){numUids}}',
198 | data: []
199 | } as Entity
200 |
201 | export const mockSchemaMap = {
202 | 'apiGatewayResource': 'apiGatewayResource'
203 | }
204 |
--------------------------------------------------------------------------------
/test/helpers/types.ts:
--------------------------------------------------------------------------------
1 | export enum InitCommandEnums {
2 | askForDGraphConfig = 'askForDGraphConfig',
3 | askForQueryEngineConfig = 'askForQueryEngineConfig',
4 | getCloudGraphConfig = 'getCloudGraphConfig',
5 | fetchCloudGraphConfig = 'fetchCloudGraphConfig',
6 | run = 'run',
7 | }
8 |
9 | export enum RunInitCommandTestType {
10 | prompt = 'prompt',
11 | flags = 'flags',
12 | }
13 |
14 | export interface MockInitCmdFlagsExpectation {
15 | argvList: string[]
16 | expectedResult: Record
17 | overwriteFlag: boolean
18 | methodToTest: InitCommandEnums
19 | }
20 |
21 | export interface MockInitCmdPromptExpectation {
22 | methodToTest: InitCommandEnums
23 | overwriteFlag: boolean
24 | promptExpectation: any[]
25 | expectedResult: any
26 | }
27 |
28 | export interface MockRunInitCmdPromptExpectation {
29 | argvList: string[]
30 | overwriteFlags: {
31 | overwriteProviderConfig: boolean
32 | overwriteCloudGraphConfig: boolean
33 | }
34 | promptExpectation: any[],
35 | expectedResult: any
36 | }
37 |
--------------------------------------------------------------------------------
/test/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "../tsconfig",
3 | "compilerOptions": {
4 | "noEmit": true
5 | },
6 | "references": [
7 | {"path": ".."}
8 | ]
9 | }
10 |
--------------------------------------------------------------------------------
/test/utils/mutation.test.ts:
--------------------------------------------------------------------------------
1 | import { Entity } from '@cloudgraph/sdk'
2 | import { generateMutation } from '../../src/utils/mutation'
3 | import { initTestSuite } from '../helpers'
4 | import {
5 | addmutationMock,
6 | addmutationMockUsingClassname,
7 | mockEntityForMutationTest,
8 | mockSchemaMap,
9 | updatemutationMock,
10 | } from '../helpers/mocks'
11 |
12 | initTestSuite()
13 |
14 | describe('Mutation generator basic tests', () => {
15 | it('should generate the string for an add mutation', () => {
16 | const mutation = generateMutation({
17 | type: 'add',
18 | provider: 'aws',
19 | entity: mockEntityForMutationTest,
20 | schemaMap: mockSchemaMap,
21 | })
22 | const mock = addmutationMock()
23 | expect(mutation).toBe(mock)
24 | })
25 | it('should generate the string for an update mutation', () => {
26 | const mutation = generateMutation({
27 | type: 'update',
28 | provider: 'aws',
29 | entity: mockEntityForMutationTest,
30 | schemaMap: mockSchemaMap,
31 | })
32 | const mock = updatemutationMock()
33 | expect(mutation).toBe(mock)
34 | })
35 | })
36 |
37 | describe('Mutation generator cases test', () => {
38 | it('should use the schemaMap to get the correct type', () => {
39 | const entityCopy: Entity = JSON.parse(
40 | JSON.stringify(mockEntityForMutationTest)
41 | ) as Entity
42 | delete entityCopy.className
43 | const mutation = generateMutation({
44 | type: 'add',
45 | provider: 'aws',
46 | entity: entityCopy,
47 | schemaMap: mockSchemaMap,
48 | })
49 | const mock = addmutationMock()
50 | expect(mutation).toBe(mock)
51 | })
52 | it('should use the className to generate the correct type', () => {
53 | const entityCopy: Entity = JSON.parse(
54 | JSON.stringify(mockEntityForMutationTest)
55 | ) as Entity
56 | entityCopy.mutation = ''
57 | const mutation = generateMutation({
58 | type: 'add',
59 | provider: 'aws',
60 | entity: mockEntityForMutationTest,
61 | schemaMap: undefined,
62 | })
63 | const mock = addmutationMockUsingClassname()
64 | expect(mutation).toBe(mock)
65 | })
66 | it('should use the mutation file', () => {
67 | const entityCopy: Entity = JSON.parse(
68 | JSON.stringify(mockEntityForMutationTest)
69 | ) as Entity
70 | const { mutation } = entityCopy
71 | const result =
72 | mutation ||
73 | generateMutation({
74 | type: 'add',
75 | provider: 'aws',
76 | entity: entityCopy,
77 | schemaMap: undefined,
78 | })
79 | const mock = addmutationMock()
80 | expect(result).toBe(mock)
81 | })
82 | })
83 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "composite": true,
4 | "declaration": true,
5 | "esModuleInterop": true,
6 | "forceConsistentCasingInFileNames": true,
7 | "importHelpers": true,
8 | "module": "commonjs",
9 | "outDir": "lib",
10 | "rootDir": "src",
11 | "skipLibCheck": true,
12 | "strict": true,
13 | "target": "es2017",
14 | "typeRoots": ["./node_modules/@types", "src/types"]
15 | },
16 | "include": [
17 | "src/**/*"
18 | ]
19 | }
20 |
--------------------------------------------------------------------------------