├── LICENSE.TXT ├── README.md ├── deployment ├── README.md ├── args.sh ├── cloudbuild.yaml ├── create_deployment_trigger.sh ├── create_vm_template.sh ├── initial_setup.sh ├── instance_group_management.sh ├── push_new_docker_image.sh ├── rollback_to_release.sh ├── rollout_new_release.sh └── setup_utils.sh ├── docker ├── Dockerfile ├── README.md ├── entry_point.sh ├── index_fetcher.sh ├── start_server.sh ├── status_templates │ ├── contact │ ├── failure │ ├── footer │ ├── header │ └── success └── status_updater.sh ├── docs ├── index.md ├── privacy.md └── terms.md ├── download_latest_release_assets.py └── indexer ├── Dockerfile ├── README.md ├── args.sh ├── cloudbuild.yaml ├── cronjob ├── download_latest_release_assets.py ├── entry_point.sh ├── prepare.sh ├── push_new_docker_image.sh ├── rollout_new_release.sh └── run.sh /LICENSE.TXT: -------------------------------------------------------------------------------- 1 | ============================================================================== 2 | This Project is under the Apache License v2.0 with LLVM Exceptions: 3 | ============================================================================== 4 | 5 | Apache License 6 | Version 2.0, January 2004 7 | http://www.apache.org/licenses/ 8 | 9 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 10 | 11 | 1. Definitions. 12 | 13 | "License" shall mean the terms and conditions for use, reproduction, 14 | and distribution as defined by Sections 1 through 9 of this document. 15 | 16 | "Licensor" shall mean the copyright owner or entity authorized by 17 | the copyright owner that is granting the License. 18 | 19 | "Legal Entity" shall mean the union of the acting entity and all 20 | other entities that control, are controlled by, or are under common 21 | control with that entity. For the purposes of this definition, 22 | "control" means (i) the power, direct or indirect, to cause the 23 | direction or management of such entity, whether by contract or 24 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 25 | outstanding shares, or (iii) beneficial ownership of such entity. 26 | 27 | "You" (or "Your") shall mean an individual or Legal Entity 28 | exercising permissions granted by this License. 29 | 30 | "Source" form shall mean the preferred form for making modifications, 31 | including but not limited to software source code, documentation 32 | source, and configuration files. 33 | 34 | "Object" form shall mean any form resulting from mechanical 35 | transformation or translation of a Source form, including but 36 | not limited to compiled object code, generated documentation, 37 | and conversions to other media types. 38 | 39 | "Work" shall mean the work of authorship, whether in Source or 40 | Object form, made available under the License, as indicated by a 41 | copyright notice that is included in or attached to the work 42 | (an example is provided in the Appendix below). 43 | 44 | "Derivative Works" shall mean any work, whether in Source or Object 45 | form, that is based on (or derived from) the Work and for which the 46 | editorial revisions, annotations, elaborations, or other modifications 47 | represent, as a whole, an original work of authorship. For the purposes 48 | of this License, Derivative Works shall not include works that remain 49 | separable from, or merely link (or bind by name) to the interfaces of, 50 | the Work and Derivative Works thereof. 51 | 52 | "Contribution" shall mean any work of authorship, including 53 | the original version of the Work and any modifications or additions 54 | to that Work or Derivative Works thereof, that is intentionally 55 | submitted to Licensor for inclusion in the Work by the copyright owner 56 | or by an individual or Legal Entity authorized to submit on behalf of 57 | the copyright owner. For the purposes of this definition, "submitted" 58 | means any form of electronic, verbal, or written communication sent 59 | to the Licensor or its representatives, including but not limited to 60 | communication on electronic mailing lists, source code control systems, 61 | and issue tracking systems that are managed by, or on behalf of, the 62 | Licensor for the purpose of discussing and improving the Work, but 63 | excluding communication that is conspicuously marked or otherwise 64 | designated in writing by the copyright owner as "Not a Contribution." 65 | 66 | "Contributor" shall mean Licensor and any individual or Legal Entity 67 | on behalf of whom a Contribution has been received by Licensor and 68 | subsequently incorporated within the Work. 69 | 70 | 2. Grant of Copyright License. Subject to the terms and conditions of 71 | this License, each Contributor hereby grants to You a perpetual, 72 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 73 | copyright license to reproduce, prepare Derivative Works of, 74 | publicly display, publicly perform, sublicense, and distribute the 75 | Work and such Derivative Works in Source or Object form. 76 | 77 | 3. Grant of Patent License. Subject to the terms and conditions of 78 | this License, each Contributor hereby grants to You a perpetual, 79 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 80 | (except as stated in this section) patent license to make, have made, 81 | use, offer to sell, sell, import, and otherwise transfer the Work, 82 | where such license applies only to those patent claims licensable 83 | by such Contributor that are necessarily infringed by their 84 | Contribution(s) alone or by combination of their Contribution(s) 85 | with the Work to which such Contribution(s) was submitted. If You 86 | institute patent litigation against any entity (including a 87 | cross-claim or counterclaim in a lawsuit) alleging that the Work 88 | or a Contribution incorporated within the Work constitutes direct 89 | or contributory patent infringement, then any patent licenses 90 | granted to You under this License for that Work shall terminate 91 | as of the date such litigation is filed. 92 | 93 | 4. Redistribution. You may reproduce and distribute copies of the 94 | Work or Derivative Works thereof in any medium, with or without 95 | modifications, and in Source or Object form, provided that You 96 | meet the following conditions: 97 | 98 | (a) You must give any other recipients of the Work or 99 | Derivative Works a copy of this License; and 100 | 101 | (b) You must cause any modified files to carry prominent notices 102 | stating that You changed the files; and 103 | 104 | (c) You must retain, in the Source form of any Derivative Works 105 | that You distribute, all copyright, patent, trademark, and 106 | attribution notices from the Source form of the Work, 107 | excluding those notices that do not pertain to any part of 108 | the Derivative Works; and 109 | 110 | (d) If the Work includes a "NOTICE" text file as part of its 111 | distribution, then any Derivative Works that You distribute must 112 | include a readable copy of the attribution notices contained 113 | within such NOTICE file, excluding those notices that do not 114 | pertain to any part of the Derivative Works, in at least one 115 | of the following places: within a NOTICE text file distributed 116 | as part of the Derivative Works; within the Source form or 117 | documentation, if provided along with the Derivative Works; or, 118 | within a display generated by the Derivative Works, if and 119 | wherever such third-party notices normally appear. The contents 120 | of the NOTICE file are for informational purposes only and 121 | do not modify the License. You may add Your own attribution 122 | notices within Derivative Works that You distribute, alongside 123 | or as an addendum to the NOTICE text from the Work, provided 124 | that such additional attribution notices cannot be construed 125 | as modifying the License. 126 | 127 | You may add Your own copyright statement to Your modifications and 128 | may provide additional or different license terms and conditions 129 | for use, reproduction, or distribution of Your modifications, or 130 | for any such Derivative Works as a whole, provided Your use, 131 | reproduction, and distribution of the Work otherwise complies with 132 | the conditions stated in this License. 133 | 134 | 5. Submission of Contributions. Unless You explicitly state otherwise, 135 | any Contribution intentionally submitted for inclusion in the Work 136 | by You to the Licensor shall be under the terms and conditions of 137 | this License, without any additional terms or conditions. 138 | Notwithstanding the above, nothing herein shall supersede or modify 139 | the terms of any separate license agreement you may have executed 140 | with Licensor regarding such Contributions. 141 | 142 | 6. Trademarks. This License does not grant permission to use the trade 143 | names, trademarks, service marks, or product names of the Licensor, 144 | except as required for reasonable and customary use in describing the 145 | origin of the Work and reproducing the content of the NOTICE file. 146 | 147 | 7. Disclaimer of Warranty. Unless required by applicable law or 148 | agreed to in writing, Licensor provides the Work (and each 149 | Contributor provides its Contributions) on an "AS IS" BASIS, 150 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 151 | implied, including, without limitation, any warranties or conditions 152 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 153 | PARTICULAR PURPOSE. You are solely responsible for determining the 154 | appropriateness of using or redistributing the Work and assume any 155 | risks associated with Your exercise of permissions under this License. 156 | 157 | 8. Limitation of Liability. In no event and under no legal theory, 158 | whether in tort (including negligence), contract, or otherwise, 159 | unless required by applicable law (such as deliberate and grossly 160 | negligent acts) or agreed to in writing, shall any Contributor be 161 | liable to You for damages, including any direct, indirect, special, 162 | incidental, or consequential damages of any character arising as a 163 | result of this License or out of the use or inability to use the 164 | Work (including but not limited to damages for loss of goodwill, 165 | work stoppage, computer failure or malfunction, or any and all 166 | other commercial damages or losses), even if such Contributor 167 | has been advised of the possibility of such damages. 168 | 169 | 9. Accepting Warranty or Additional Liability. While redistributing 170 | the Work or Derivative Works thereof, You may choose to offer, 171 | and charge a fee for, acceptance of support, warranty, indemnity, 172 | or other liability obligations and/or rights consistent with this 173 | License. However, in accepting such obligations, You may act only 174 | on Your own behalf and on Your sole responsibility, not on behalf 175 | of any other Contributor, and only if You agree to indemnify, 176 | defend, and hold each Contributor harmless for any liability 177 | incurred by, or claims asserted against, such Contributor by reason 178 | of your accepting any such warranty or additional liability. 179 | 180 | END OF TERMS AND CONDITIONS 181 | 182 | APPENDIX: How to apply the Apache License to your work. 183 | 184 | To apply the Apache License to your work, attach the following 185 | boilerplate notice, with the fields enclosed by brackets "[]" 186 | replaced with your own identifying information. (Don't include 187 | the brackets!) The text should be enclosed in the appropriate 188 | comment syntax for the file format. We also recommend that a 189 | file or class name and description of purpose be included on the 190 | same "printed page" as the copyright notice for easier 191 | identification within third-party archives. 192 | 193 | Copyright [yyyy] [name of copyright owner] 194 | 195 | Licensed under the Apache License, Version 2.0 (the "License"); 196 | you may not use this file except in compliance with the License. 197 | You may obtain a copy of the License at 198 | 199 | http://www.apache.org/licenses/LICENSE-2.0 200 | 201 | Unless required by applicable law or agreed to in writing, software 202 | distributed under the License is distributed on an "AS IS" BASIS, 203 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 204 | See the License for the specific language governing permissions and 205 | limitations under the License. 206 | 207 | 208 | ---- LLVM Exceptions to the Apache 2.0 License ---- 209 | 210 | As an exception, if, as a result of your compiling your source code, portions 211 | of this Software are embedded into an Object form of such source code, you 212 | may redistribute such embedded portions in such Object form without complying 213 | with the conditions of Sections 4(a), 4(b) and 4(d) of the License. 214 | 215 | In addition, if you combine or link compiled forms of this Software with 216 | software that is licensed under the GPLv2 ("Combined Software") and if a 217 | court of competent jurisdiction determines that the patent provision (Section 218 | 3), the indemnity provision (Section 9) or other Section of the License 219 | conflicts with the conditions of the GPLv2, you may retroactively and 220 | prospectively choose to deem waived or otherwise exclude such Section(s) of 221 | the License, but only in their entirety and only with respect to the Combined 222 | Software. 223 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # chrome-remote-index 2 | 3 | ## Repo Layout 4 | 5 | [deployment](deployment/) contains the script used to deploy a remote-index 6 | serving instance to GCP. It takes care of VM creation and deploying new docker 7 | containers. 8 | 9 | [docker](docker/) contains the scripts used by remote-index serving instance to 10 | fetch new index files and startup the clangd-index-server. It also contains the 11 | Dockerfile that containerizes this process. 12 | -------------------------------------------------------------------------------- /deployment/README.md: -------------------------------------------------------------------------------- 1 | # GCP server management scripts 2 | 3 | This directory contains scripts used for managing the GCP project. They make use 4 | of Google Cloud SDK so you need to install the SDK first, you can find 5 | instructions [here](https://cloud.google.com/sdk/docs/install). 6 | 7 | ## Configuration 8 | 9 | Most of the configuration arguments for GCP deployment are defined in 10 | [args.sh](args.sh). You can see documentation and different setup options in 11 | this script. 12 | 13 | ### Serving infra 14 | 15 | [initial_setup.sh](initial_setup.sh) handles creation of VM instances and load 16 | balancers. It should be run once, by default it will create 2 serving 17 | environments, one for live and one for staging. 18 | 19 | Staging environment consists of a single instance group and a single VM in 20 | europe-west, with a regional TCP loadbalancer in front. Loadbalancer accepts 21 | trafic on port 50051. 22 | 23 | Live environment has 2 instance groups one in us-central other in europe-west, 24 | with a single VM in each of them. It has a global TCP loadbalancer in front. 25 | Loadbalancer accepts traffic on port 5900. 26 | 27 | Both environments use a TCP healthcheck on port 50051 and they only allow 28 | ingress to that port. 29 | 30 | ## Rolling images back/forward 31 | 32 | For moving to the new release or falling back to the old one use 33 | [rollout_new_release.sh](rollout_new_release.sh) and 34 | [rollback_to_release.sh](rollback_to_release.sh) respectively. 35 | 36 | ### Rolling out new images 37 | 38 | `bash rollout_new_release.sh staging` will create a new docker image, pulling 39 | the binaries from 40 | [clangd/clangd/releases](https://github.com/clangd/clangd/releases) page, and 41 | push it to staging. 42 | 43 | `bash rollout_new_release.sh live` will push the latest available docker image 44 | in Google Container Registry (GCR) into live, e.g. can be used to promote the 45 | latest staging image. 46 | 47 | ### Rolling back to older images 48 | 49 | `bash rollback_to_release.sh [staging|live] IMAGE_FQN` can be used to change 50 | images for staging|live instaces. 51 | 52 | Fully qualified image names (FQN) can be acquired either through GCP web UI or 53 | through SDK with: 54 | 55 | ``` 56 | gcloud container images list-tags gcr.io/chrome-remote-index/chrome-remote-index-server 57 | gcloud container images describe gcr.io/chrome-remote-index/chrome-remote-index-server@sha265:$SHORT_SHA$ 58 | ``` 59 | -------------------------------------------------------------------------------- /deployment/args.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Abort script on failure and print commands as we execute them. 3 | set -x -e 4 | 5 | # GCP project to configure. 6 | PROJECT_ID="chrome-remote-index" 7 | 8 | # Machine type to use for index serving VM instances. 9 | # 4 vCPUs and 32GB ram is enough for serving chrome-index for 6 different 10 | # platforms. 11 | # https://cloud.google.com/compute/docs/machine-types#e2_high-memory_machine_types 12 | MACHINE_TYPE="e2-custom-6-49152" 13 | DISK_SIZE="30GB" 14 | 15 | # Used as base name for instance groups and machine instances. 16 | BASE_INSTANCE_NAME="index-server" 17 | 18 | # Fully qualified name for the server image in GCR. 19 | IMAGE_IN_GCR="gcr.io/${PROJECT_ID}/${BASE_INSTANCE_NAME}" 20 | 21 | # Basename for instance templates, can be suffixed with image SHAs. 22 | BASE_TEMPLATE_NAME="${BASE_INSTANCE_NAME}-template" 23 | 24 | # Following options are used by push_new_docker_image.sh to configure container 25 | # for fetching new index artifacts and consuming them. 26 | 27 | # Which github repository to use for fetching index artifacts. 28 | INDEX_REPO="clangd/chrome-remote-index" 29 | 30 | # Artifact prefix to fetch the index from and port number to serve it on. 31 | # Separated by `:`. 32 | INDEX_ASSET_PORT_PAIRS="chrome-index-linux:50051 \ 33 | chrome-index-chromeos:50052 \ 34 | chrome-index-android:50053 \ 35 | chrome-index-fuchsia:50054 \ 36 | chrome-index-chromecast-linux:50055 \ 37 | chrome-index-chromecast-android:50056" 38 | 39 | # Absolute path to project root on indexer machine, passed to 40 | # clangd-index-server. 41 | INDEXER_PROJECT_ROOT="/chromium/src/" 42 | -------------------------------------------------------------------------------- /deployment/cloudbuild.yaml: -------------------------------------------------------------------------------- 1 | steps: 2 | - name: gcr.io/google.com/cloudsdktool/cloud-sdk 3 | args: 4 | - bash 5 | - '-c' 6 | - >- 7 | apt-get install -y unzip python3-requests && 8 | pip install --break-system-packages grip j2cli && 9 | cd deployment && 10 | bash rollout_new_release.sh live && 11 | bash rollout_new_release.sh staging 12 | -------------------------------------------------------------------------------- /deployment/create_deployment_trigger.sh: -------------------------------------------------------------------------------- 1 | # !/bin/bash 2 | 3 | source args.sh 4 | 5 | PUB_SUB_TOPIC="deploy-index-server" 6 | gcloud pubsub topics create --project=$PROJECT_ID $PUB_SUB_TOPIC 7 | 8 | # Create a periodic task that will trigger a new deployment 9AM UTC every 9 | # Wednesday. 10 | gcloud scheduler jobs create pubsub --project=$PROJECT_ID \ 11 | "deployment-scheduler" --schedule="0 9 * * 3" --topic=$PUB_SUB_TOPIC \ 12 | --message-body="Deploy" 13 | 14 | RED='\033[0;31m' 15 | NC='\033[0m' # No Color 16 | 17 | set +x 18 | # TODO: Create this automatically through gcloud cli once it is possible. 19 | echo -en "${RED}WARNING:${NC} " 20 | echo "You need to create a build trigger that'll listen on $PUB_SUB_TOPIC and" 21 | echo "associate it with $INDEX_REPO in" 22 | echo "https://console.cloud.google.com/cloud-build/triggers/add?project=${PROJECT_ID}" 23 | echo 24 | 25 | echo -en "${RED}WARNING:${NC} " 26 | echo "You also need to add Compute Instance Admin, Compute Load Balancer Admin" 27 | echo "and Service Account User roles to cloudbuild service account in" 28 | echo "https://console.cloud.google.com/iam-admin/iam?project=${PROJECT_ID}" 29 | -------------------------------------------------------------------------------- /deployment/create_vm_template.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ $# -lt 2 ]]; then 4 | echo "Usage: $0 FULL_IMAGE TEMPLATE_NAME" 5 | echo " FULL_IMAGE - Full name of the image in GCR." 6 | echo " TEMPLATE_NAME - Name to use for VM instance template." 7 | exit 1 8 | fi 9 | 10 | source args.sh 11 | FULL_IMAGE="$1" 12 | TEMPLATE_NAME="$2" 13 | 14 | if gcloud compute --project=$PROJECT_ID instance-templates describe $TEMPLATE_NAME; 15 | then 16 | echo "Template already exists, using it." 17 | exit 0 18 | fi 19 | 20 | gcloud compute --project=$PROJECT_ID instance-templates \ 21 | create-with-container $TEMPLATE_NAME --machine-type=$MACHINE_TYPE \ 22 | --metadata=google-logging-enabled=true,google-monitoring-enabled=true \ 23 | --boot-disk-size=$DISK_SIZE --boot-disk-type=pd-standard \ 24 | --tags=$BASE_INSTANCE_NAME --container-image=$FULL_IMAGE 25 | -------------------------------------------------------------------------------- /deployment/initial_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source args.sh 3 | source setup_utils.sh 4 | 5 | # Create one server image, push it to the GCR first and create an instance 6 | # template using that container. 7 | bash push_new_docker_image.sh 8 | IMAGE_SHA=$(gcloud container images list-tags $IMAGE_IN_GCR --format=yaml --limit=1 | grep -i digest | cut -d' ' -f2) 9 | SHORT_SHA=$(echo $IMAGE_SHA | cut -d: -f2 | head -c 8) 10 | IMAGE_FQN="${IMAGE_IN_GCR}@${IMAGE_SHA}" 11 | TEMPLATE_NAME="${BASE_TEMPLATE_NAME}-${SHORT_SHA}" 12 | bash create_vm_template.sh $IMAGE_FQN $TEMPLATE_NAME 13 | 14 | # Create the firewall rule to allow ingress into the server on port 50051. 15 | gcloud compute --project=$PROJECT_ID firewall-rules create \ 16 | "${BASE_INSTANCE_NAME}-fw-grpc" --direction=INGRESS --priority=1000 \ 17 | --action=ALLOW --rules=tcp:50051 --source-ranges=0.0.0.0/0 \ 18 | --target-tags=$BASE_INSTANCE_NAME 19 | 20 | # We need two healthchecks, one global for live instance and one regional for 21 | # staging. 22 | HEALTH_CHECK_NAME="${BASE_INSTANCE_NAME}-hc-grpc" 23 | createHealthCheck "global" $HEALTH_CHECK_NAME 24 | createHealthCheck "europe-west1" $HEALTH_CHECK_NAME 25 | 26 | # Create one staging instance and 2 production instances, with appropriate load 27 | # balancers. 28 | 29 | # Named port for load balancer to redirect traffic into instance groups. 30 | NAMED_PORT="grpc" 31 | 32 | # Create staging instance with a single VM and frontend on port 50051. 33 | IG_NAME="${BASE_INSTANCE_NAME}-staging" 34 | LB_NAME="${IG_NAME}-lb" 35 | createLoadBalancer $LB_NAME "europe-west1" 50051 $HEALTH_CHECK_NAME $NAMED_PORT 36 | addBackendToLB $LB_NAME "${IG_NAME}-eu" "europe-west1-b" "europe-west1" \ 37 | $TEMPLATE_NAME $HEALTH_CHECK_NAME $NAMED_PORT 38 | 39 | # Now create the live instance with 2 VMs and frontend on port 5900. 40 | IG_NAME="${BASE_INSTANCE_NAME}-live" 41 | LB_NAME="${IG_NAME}-lb" 42 | createLoadBalancer $LB_NAME "global" 5900 $HEALTH_CHECK_NAME $NAMED_PORT 43 | addBackendToLB $LB_NAME "${IG_NAME}-eu" "europe-west1-b" "global" \ 44 | $TEMPLATE_NAME $HEALTH_CHECK_NAME $NAMED_PORT 45 | addBackendToLB $LB_NAME "${IG_NAME}-us" "us-central1-b" "global" \ 46 | $TEMPLATE_NAME $HEALTH_CHECK_NAME $NAMED_PORT 47 | -------------------------------------------------------------------------------- /deployment/instance_group_management.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source args.sh 3 | 4 | function updateIG() { 5 | local IG_NAME="$1" 6 | local IG_REGION="$2" 7 | # Update instance group to use the new image, creating 1 backup instance for 8 | # transition. 9 | gcloud beta compute --project=$PROJECT_ID instance-groups managed \ 10 | rolling-action start-update $IG_NAME --version=template=$TEMPLATE_NAME \ 11 | --max-surge=1 --zone=$IG_REGION --min-ready=10m --max-unavailable=1 12 | } 13 | 14 | function rolloutImage() { 15 | local IMAGE_FQN="$1" 16 | local TARGET="$2" 17 | local IG_BASE="ig-${TARGET}" 18 | local SHORT_SHA=$(echo $IMAGE_FQN | cut -d: -f2 | head -c 8) 19 | local TEMPLATE_NAME="${BASE_TEMPLATE_NAME}-${SHORT_SHA}" 20 | bash create_vm_template.sh $IMAGE_FQN $TEMPLATE_NAME 21 | 22 | if [ "$TARGET" = "staging" ]; then 23 | updateIG "${IG_BASE}-eu" "europe-west1-b" 24 | else 25 | updateIG "${IG_BASE}-eu" "europe-west1-b" 26 | updateIG "${IG_BASE}-us" "us-central1-b" 27 | fi 28 | } 29 | -------------------------------------------------------------------------------- /deployment/push_new_docker_image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source args.sh 3 | 4 | SERVER_ASSET_PREFIX="clangd_indexing_tools-linux" 5 | OUTPUT_NAME="$SERVER_ASSET_PREFIX.zip" 6 | 7 | TEMP_DIR="$(mktemp -d)" 8 | # Make sure we delete TEMP_DIR on exit. 9 | trap "rm -r $TEMP_DIR" EXIT 10 | 11 | # Copy all the necessary files for docker image into a temp directory and move 12 | # into it. 13 | cp ../docker/Dockerfile "$TEMP_DIR/" 14 | cp ../docker/index_fetcher.sh "$TEMP_DIR/" 15 | cp ../docker/entry_point.sh "$TEMP_DIR/" 16 | cp ../docker/status_updater.sh "$TEMP_DIR/" 17 | cp ../docker/start_server.sh "$TEMP_DIR/" 18 | cp -r ../docker/status_templates "$TEMP_DIR/" 19 | cp -r ../docs "$TEMP_DIR/" 20 | cp ../download_latest_release_assets.py "$TEMP_DIR/" 21 | cd "$TEMP_DIR" 22 | 23 | # Generate static pages for serving. 24 | cd docs 25 | REPOSITORY=$INDEX_REPO j2 ../status_templates/contact > contact.html 26 | 27 | export GRIPHOME="$(pwd)" 28 | export GRIPURL="$(pwd)" 29 | echo "CACHE_DIRECTORY = '$(pwd)/asset'" > settings.py 30 | for f in *.md; do 31 | BASE_NAME="${f%.*}" 32 | OUT_FILE="${BASE_NAME}.html" 33 | grip --export - $OUT_FILE --no-inline < $f 34 | # Replace links to current directory with root. 35 | sed -i "s@$(pwd)@@g" $OUT_FILE 36 | # Replace links to current directory with root. 37 | sed -i "s@.*@${BASE_NAME} - ${PROJECT_ID}@g" \ 38 | $OUT_FILE 39 | # Insert the footer section for the navbar. 40 | sed -i "\@@e cat contact.html" $OUT_FILE 41 | done 42 | 43 | for f in asset/*.css; do 44 | sed -i "\@@i " ../status_templates/header 45 | done 46 | 47 | rm -f *.md settings.py footer.html 48 | chmod -R a+rx * 49 | cd .. 50 | 51 | # First download and extract remote index server. 52 | ./download_latest_release_assets.py \ 53 | --repository="clangd/clangd" \ 54 | --asset-prefix="$SERVER_ASSET_PREFIX" \ 55 | --output-name="$OUTPUT_NAME" 56 | # Extract clangd-index-server and monitor. 57 | unzip -j "$OUTPUT_NAME" "*/bin/clangd-index-server*" 58 | chmod +x clangd-index-server clangd-index-server-monitor 59 | 60 | # Build the image, tag it for GCR and push. 61 | docker build --build-arg REPOSITORY="$INDEX_REPO" \ 62 | --build-arg INDEX_ASSET_PORT_PAIRS="$INDEX_ASSET_PORT_PAIRS" \ 63 | --build-arg INDEXER_PROJECT_ROOT="$INDEXER_PROJECT_ROOT" \ 64 | --build-arg PROJECT_NAME="$PROJECT_ID" \ 65 | -t "$IMAGE_IN_GCR" . 66 | gcloud auth configure-docker 67 | docker push "$IMAGE_IN_GCR" 68 | -------------------------------------------------------------------------------- /deployment/rollback_to_release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | TARGET="$1" 3 | IMAGE_FQN="$2" 4 | 5 | function printUsage() { 6 | echo "Usage: $0 [staging | live] IMAGE_FQN" 7 | echo " You can retrieve image fqn via 'gcloud container images list-tags IMAGE_NAME'" 8 | echo " followed by 'gcloud container images describe IMAGE_NAME@sha265:DIGEST'" 9 | exit 1 10 | } 11 | 12 | case $TARGET in 13 | staging) 14 | # Always create a new docker image when pushing to staging. 15 | bash push_new_docker_image.sh 16 | ;; 17 | live) 18 | ;; 19 | *) 20 | printUsage 21 | ;; 22 | esac 23 | if [ -z "$IMAGE_FQN" ]; then 24 | printUsage 25 | fi 26 | 27 | source args.sh 28 | source instance_group_management.sh 29 | 30 | rolloutImage $IMAGE_FQN $TARGET 31 | -------------------------------------------------------------------------------- /deployment/rollout_new_release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source args.sh 3 | source instance_group_management.sh 4 | 5 | TARGET="$1" 6 | 7 | case $TARGET in 8 | staging) 9 | # Always create a new docker image when pushing to staging. 10 | bash push_new_docker_image.sh 11 | ;; 12 | live) 13 | ;; 14 | *) 15 | echo "Usage: $0 [staging | live]" 16 | exit 1 17 | esac 18 | 19 | # Fetch latest image sha from GCR. 20 | IMAGE_SHA=$(gcloud container images list-tags $IMAGE_IN_GCR --format=yaml --limit=1 | grep -i digest | cut -d' ' -f2) 21 | IMAGE_FQN="${IMAGE_IN_GCR}@${IMAGE_SHA}" 22 | rolloutImage $IMAGE_FQN $TARGET 23 | -------------------------------------------------------------------------------- /deployment/setup_utils.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source args.sh 3 | 4 | # Creates a TCP health check on port 50051 in specified region with given name. 5 | function createHealthCheck() { 6 | local HC_REGION="$1" 7 | local HC_NAME="$2" 8 | if [ "$HC_REGION" = "global" ]; then 9 | HC_REGION="--global" 10 | else 11 | HC_REGION="--region=${HC_REGION}" 12 | fi 13 | 14 | # Create tcp health check on port 50051. 15 | gcloud beta compute health-checks create tcp $HC_NAME --project=$PROJECT_ID \ 16 | --port=50051 --proxy-header=NONE --no-enable-logging $HC_REGION \ 17 | --check-interval=10 --timeout=5 --unhealthy-threshold=3 \ 18 | --healthy-threshold=1 19 | } 20 | 21 | # Creates a TCP load balancer in given region. Uses a reverse tcp proxy for 22 | # global loadbalancers. 23 | function createLoadBalancer() { 24 | local LB_NAME="$1" 25 | local REGION="$2" 26 | local LB_PORT="$3" 27 | local HC_NAME="$4" 28 | local NAMED_PORT="$5" 29 | local PROXY_NAME="${LB_NAME}-tcp-proxy" 30 | local IPV4_NAME="${LB_NAME}-ipv4" 31 | 32 | if [ "$REGION" = "global" ]; then 33 | local LB_REGION="--${REGION}" 34 | HC_REGION="--global-health-checks" 35 | local IP_VERSION="--ip-version=IPV4" 36 | else 37 | HC_REGION="--health-checks-region=${REGION}" 38 | local LB_REGION="--region=${REGION}" 39 | # Regional load balancers don't get to choose between ipv4 and ipv6. 40 | local IP_VERSION="" 41 | fi 42 | 43 | # First create the backend service to which instance groups will be attached 44 | # later on. 45 | gcloud compute --project=$PROJECT_ID backend-services create $LB_NAME \ 46 | $HC_REGION $LB_REGION --protocol="TCP" --health-checks=$HC_NAME \ 47 | --port-name=$NAMED_PORT 48 | 49 | # Create an ip for the LB frontend. 50 | gcloud compute --project=$PROJECT_ID addresses create $IPV4_NAME $LB_REGION \ 51 | $IP_VERSION 52 | 53 | if [ "$REGION" = "global" ]; then 54 | # Create a TCP proxy. 55 | gcloud compute --project=$PROJECT_ID target-tcp-proxies create $PROXY_NAME \ 56 | --backend-service=$LB_NAME --proxy-header NONE 57 | # Create a forwarding rule from frontend to tcp proxy. 58 | gcloud compute --project=$PROJECT_ID forwarding-rules create \ 59 | "${IPV4_NAME}-forwarding-rule" --global --target-tcp-proxy=$PROXY_NAME \ 60 | --address=$IPV4_NAME --ports=$LB_PORT 61 | else 62 | # Create a forwarding rule from frontend to backend service directly. 63 | gcloud compute --project=$PROJECT_ID forwarding-rules create \ 64 | "${IPV4_NAME}-forwarding-rule" --load-balancing-scheme external \ 65 | --region=$REGION --ports=$LB_PORT --address=$IPV4_NAME \ 66 | --backend-service=$LB_NAME 67 | fi 68 | } 69 | 70 | # Creates an instance group with the given template in given region and adds it 71 | # as a backend service for the given load balancer. 72 | function addBackendToLB() { 73 | local LB_NAME="$1" 74 | local IG_NAME="$2" 75 | local IG_ZONE="$3" 76 | local REGION="$4" 77 | local TEMPLATE_NAME="$5" 78 | local HC_NAME="$6" 79 | local NAMED_PORT="$7" 80 | if [ "$REGION" = "global" ]; then 81 | local LB_REGION="--${REGION}" 82 | else 83 | local LB_REGION="--region=${REGION}" 84 | fi 85 | 86 | # Create a managed instance group, with given name in given zone. 87 | gcloud compute --project=$PROJECT_ID instance-groups managed create $IG_NAME \ 88 | --base-instance-name=$IG_NAME --template=$TEMPLATE_NAME --size=1 \ 89 | --zone=$IG_ZONE --health-check=$HC_NAME --initial-delay=300 90 | 91 | # Also add the named port for load balancer use. 92 | gcloud compute --project=$PROJECT_ID instance-groups set-named-ports \ 93 | $IG_NAME --named-ports="${NAMED_PORT}:50051" --zone=$IG_ZONE 94 | 95 | # Add the instance group as a backend to the load balancer. 96 | gcloud compute --project=$PROJECT_ID backend-services add-backend \ 97 | $LB_NAME $LB_REGION --instance-group=$IG_NAME --instance-group-zone=$IG_ZONE 98 | } 99 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stable 2 | RUN apt-get update -y && apt-get upgrade -y 3 | RUN apt-get install -y cron python3 python3-requests unzip nginx-light 4 | # We install pip, jinja and delete pip afterwards in same step to keep final 5 | # image size small. 6 | RUN apt-get install -y --no-install-recommends python3-pip && \ 7 | pip3 install --break-system-packages j2cli && apt-get remove -y python3-pip \ 8 | && apt-get autoremove -y 9 | RUN rm -rf /var/www/html/* 10 | WORKDIR "/" 11 | 12 | ARG REPOSITORY 13 | ENV REPOSITORY ${REPOSITORY} 14 | ARG INDEX_ASSET_PORT_PAIRS 15 | ENV INDEX_ASSET_PORT_PAIRS ${INDEX_ASSET_PORT_PAIRS} 16 | ARG INDEXER_PROJECT_ROOT 17 | ENV INDEXER_PROJECT_ROOT ${INDEXER_PROJECT_ROOT} 18 | ARG PROJECT_NAME 19 | ENV PROJECT_NAME ${PROJECT_NAME} 20 | 21 | ADD "clangd-index-server" "clangd-index-server" 22 | ADD "clangd-index-server-monitor" "clangd-index-server-monitor" 23 | ADD "status_templates" "/status_templates" 24 | ADD "docs" "/docs" 25 | ADD "download_latest_release_assets.py" "download_latest_release_assets.py" 26 | ADD "index_fetcher.sh" "index_fetcher.sh" 27 | ADD "entry_point.sh" "entry_point.sh" 28 | ADD "status_updater.sh" "status_updater.sh" 29 | ADD "start_server.sh" "start_server.sh" 30 | 31 | ENTRYPOINT ["/entry_point.sh"] 32 | -------------------------------------------------------------------------------- /docker/README.md: -------------------------------------------------------------------------------- 1 | The Dockerfile requires clangd-index-server, 2 | download_latest_release_assets.py, index_fetcher.sh and entry_point.sh to be in 3 | the working directory. 4 | 5 | The container sets up a cronjob that'll invoke index_fetcher.sh with necessary 6 | environment variables every 6 hours and starts the clangd-index-server, which 7 | automatically consumes the artifacts produced by index_fetcher. Hence a new 8 | image is only needed for configuration or binary updates. 9 | -------------------------------------------------------------------------------- /docker/entry_point.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Abort script on failure. 4 | set -e 5 | # Print commands as we execute them. 6 | set -x 7 | 8 | # We will prepend all the ports to listen on while starting index-server 9 | # instances. 10 | STATUS_UPDATER_CMD="/status_updater.sh $PROJECT_NAME $REPOSITORY" 11 | 12 | # Move static files to serving directory. 13 | cp -r docs/* /var/www/html/ 14 | 15 | # Start the nginx server. Contents in /var/www/html are served at *:80. 16 | service nginx start 17 | 18 | # Set the path so that cron can find j2. 19 | echo "PATH=$PATH" > crontab_schedule.txt 20 | for ASSET_PORT_PAIR in $INDEX_ASSET_PORT_PAIRS 21 | do 22 | INDEX_ASSET_PREFIX=${ASSET_PORT_PAIR%:*} 23 | PORT=${ASSET_PORT_PAIR#*:} 24 | INDEX_FILE="/${INDEX_ASSET_PREFIX}.idx" 25 | INDEX_FETCHER_CMD="/index_fetcher.sh $REPOSITORY $INDEX_ASSET_PREFIX $INDEX_FILE" 26 | 27 | # Run index fetcher once every 6 hours. 28 | echo "0 */6 * * * $INDEX_FETCHER_CMD" >> crontab_schedule.txt 29 | # Start the server and keep it running. 30 | bash /start_server.sh "$INDEX_FETCHER_CMD" $INDEX_FILE $INDEXER_PROJECT_ROOT \ 31 | $PORT $INDEX_ASSET_PREFIX & 32 | # Watch instance. 33 | STATUS_UPDATER_CMD="${STATUS_UPDATER_CMD} ${ASSET_PORT_PAIR}" 34 | done 35 | # Update status every minute. 36 | echo "* * * * * $STATUS_UPDATER_CMD" >> crontab_schedule.txt 37 | # Run status updater at startup to generate error file. 38 | $STATUS_UPDATER_CMD 39 | 40 | crontab crontab_schedule.txt 41 | cron -f 42 | -------------------------------------------------------------------------------- /docker/index_fetcher.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | REPO="$1" 3 | ASSET_PREFIX="$2" 4 | INDEX_FILE="$3" 5 | 6 | TEMP_DIR="$(mktemp -d)" 7 | # Make sure we delete TEMP_DIR on exit. 8 | trap "rm -r $TEMP_DIR" EXIT 9 | # Abort script on failure. 10 | set -e 11 | # Print commands as we execute them. 12 | set -x 13 | 14 | cd $TEMP_DIR 15 | /download_latest_release_assets.py --repository="$REPO" \ 16 | --asset-prefix="$ASSET_PREFIX" \ 17 | --output-name="index.zip" 18 | unzip "index.zip" 19 | mv *.idx $INDEX_FILE 20 | -------------------------------------------------------------------------------- /docker/start_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Helper to start a server, by fetching the relevant index once, and restart it 3 | # if it crashes. 4 | 5 | INDEX_FETCHER_CMD=$1 6 | INDEX_FILE=$2 7 | INDEXER_PROJECT_ROOT=$3 8 | PORT=$4 9 | LOG_PREFIX=$5 10 | 11 | # Fetch index once. 12 | $INDEX_FETCHER_CMD 13 | # Start the server. 14 | until /clangd-index-server $INDEX_FILE $INDEXER_PROJECT_ROOT -log-public \ 15 | -server-address="0.0.0.0:${PORT}" -log-prefix=$LOG_PREFIX 16 | do 17 | echo "Restarting index-server ${LOG_PREFIX}. Exited with code $?." >&2 18 | sleep 1 19 | done 20 | -------------------------------------------------------------------------------- /docker/status_templates/contact: -------------------------------------------------------------------------------- 1 |
2 |

3 | You can raise any issues about this service in 4 | {{REPOSITORY}}. 5 |

6 |

7 | Landing | 8 | Status | 9 | Privacy policy | 10 | Terms and conditions 11 |

12 | -------------------------------------------------------------------------------- /docker/status_templates/failure: -------------------------------------------------------------------------------- 1 |
2 | {{INSTANCE_NAME}}:
3 | Failed to fetch status from instance.
4 | 
5 | -------------------------------------------------------------------------------- /docker/status_templates/footer: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /docker/status_templates/header: -------------------------------------------------------------------------------- 1 | 2 | 3 | {{PROJECT_NAME}} 4 | 9 | 10 | 11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |

{{PROJECT_NAME}} status page

19 | -------------------------------------------------------------------------------- /docker/status_templates/success: -------------------------------------------------------------------------------- 1 | {% set uptime_seconds_int = uptime_seconds | int %} 2 | {% set index_age_seconds_int = index_age_seconds | int %} 3 |
4 | {{INSTANCE_NAME}}:
5 | Uptime: {{uptime_seconds_int // 3600 // 24}} days {{(uptime_seconds_int // 3600) % 24}} hours {{(uptime_seconds_int // 60) % 60}} minutes {{uptime_seconds_int % 60}} seconds.
6 | Index age: {{index_age_seconds_int // 3600}} hours {{(index_age_seconds_int // 60) % 60}} minutes {{index_age_seconds_int % 60}} seconds.
7 | 
8 | -------------------------------------------------------------------------------- /docker/status_updater.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Issues monitoring requests on all ports of the localhost passed in as 3 | # positional args and merges those into one final html. 4 | 5 | TMPL_DIR="/status_templates" 6 | HEADER_TMPL="$TMPL_DIR/header" 7 | SUCCESS_TMPL="$TMPL_DIR/success" 8 | FAILURE_TMPL="$TMPL_DIR/failure" 9 | CONTACT_TMPL="$TMPL_DIR/contact" 10 | FOOTER_TMPL="$TMPL_DIR/footer" 11 | OUT_FILE="/var/www/html/status.html" 12 | 13 | if [[ $# -lt 2 ]]; then 14 | echo "Usage: $0 PROJECT_NAME REPOSITORY PORTS..." 15 | echo " PROJECT_NAME - e.g. llvm-remote-index" 16 | echo " REPOSITORY - e.g. clangd/llvm-remote-index" 17 | echo " NAME:PORT pairs... - One or more name:port pairs. Name is displayed \ 18 | on the status page and port is used to query an index-server on the localhost" 19 | exit 20 | fi 21 | 22 | set -e -x 23 | 24 | # Stores monitoring info from grpc servers. 25 | TEMP_DATA_FILE=$(mktemp) 26 | 27 | # Current output we are building, swapped with $OUT_FILE once it is complete. 28 | TEMP_OUT_FILE=$(mktemp) 29 | # Make sure it will be readable once moved to final target. 30 | chmod a+r $TEMP_OUT_FILE 31 | 32 | # Delete tmp files on exit. 33 | trap "rm -f $TEMP_DATA_FILE" EXIT 34 | trap "rm -f $TEMP_OUT_FILE" EXIT 35 | 36 | # Env variables used by templates. 37 | export HOST_NAME="$(hostname -s)" 38 | export PROJECT_NAME="$1" 39 | shift 40 | export REPOSITORY="$1" 41 | shift 42 | 43 | j2 $HEADER_TMPL >> $TEMP_OUT_FILE 44 | 45 | # All the remaining args are ports on the local machine to connect. 46 | while [[ $# -gt 0 ]]; 47 | do 48 | NAME=${1%:*} 49 | PORT=${1#*:} 50 | shift 51 | export INSTANCE_NAME="${HOST_NAME}/${NAME}" 52 | 53 | if /clangd-index-server-monitor "localhost:${PORT}" > $TEMP_DATA_FILE; then 54 | TMPL_FILE=$SUCCESS_TMPL 55 | else 56 | TMPL_FILE=$FAILURE_TMPL 57 | # j2 expects a valid json file, so in case of failure to communicate with 58 | # server, just provide an empty json. 59 | echo '{}' > $TEMP_DATA_FILE 60 | fi 61 | 62 | j2 --format=json -e '' "$TMPL_FILE" "$TEMP_DATA_FILE" >> $TEMP_OUT_FILE 63 | done 64 | 65 | j2 $CONTACT_TMPL >> $TEMP_OUT_FILE 66 | j2 $FOOTER_TMPL >> $TEMP_OUT_FILE 67 | 68 | mv $TEMP_OUT_FILE $OUT_FILE 69 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Chromium remote index server 2 | 3 | This server provides about a day old index for 4 | [chromium](https://chromium.googlesource.com/chromium/src.git) to be used by 5 | clangd. It aims to lower resource usage of clangd on workstation machines by 6 | moving project-wide indexing and serving pieces to a centralized server. 7 | 8 | To make use of this service you need a clangd build with remote index support 9 | and point clangd at this particular index server. 10 | 11 | More details on 12 | [remote index internals](https://clangd.llvm.org/remote-index.html). 13 | 14 | ## Getting clangd client 15 | 16 | **Googlers only** : clangd is installed on `/usr/bin/clangd` by default on 17 | glinux workstations, you can directly use that instead. 18 | 19 | You will need **clangd 12** or newer, [built with remote index support][build]. 20 | This feature is not yet enabled by default, but the [clangd/clangd releases] 21 | *do* enable it. On clangd 13+, you can verify that by checking for “grpc” in 22 | the output of `clangd --version`: 23 | 24 | [build]: https://clangd.llvm.org/design/remote-index.html#buildingreleases 25 | [clangd/clangd releases]: https://github.com/clangd/clangd/releases 26 | 27 | ``` 28 | > clangd --version 29 | clangd version 13.0.0 (https://github.com/llvm/llvm-project ...) 30 | Features: linux+grpc 31 | ``` 32 | 33 | After acquiring the binary, make sure your LSP client points to it. Details 34 | about this process can be found 35 | [here](https://clangd.llvm.org/installation.html#editor-plugins). 36 | 37 | ## Pointing clangd to chromium-remote-index-server 38 | 39 | **Googlers only** : if you are using clangd installed on a glinux workstation, 40 | you should have remote-index support on by default. 41 | 42 | Finally you'll need to point clangd at this particular index server. The easiest 43 | way to achieve this is via user configuration: a config.yaml file in an 44 | OS-specific directory: 45 | 46 | - Windows: `%LocalAppData%\clangd\config.yaml`, typically 47 | `C:\Users\Bob\AppData\Local\clangd\config.yaml`. 48 | - macOS: `~/Library/Preferences/clangd/config.yaml` 49 | - Linux and others: `$XDG_CONFIG_HOME/clangd/config.yaml`, typically 50 | `~/.config/clangd/config.yaml`. 51 | 52 | You'll need to populate this config file with the following, while changing 53 | `/path/to/chromium/src/` with absolute path to your checkout location. 54 | 55 | ``` 56 | If: 57 | PathMatch: /path/to/chromium/src/.* 58 | Index: 59 | External: 60 | # Change this depending on the platform, see below. 61 | Server: linux.clangd-index.chromium.org:5900 62 | MountPoint: /path/to/chromium/src/ 63 | ``` 64 | 65 | ## FAQ 66 | 67 | ### I am targeting a non-linux platform, can I still use this service? 68 | 69 | If you are targeting a different platform, you can change the Server to one of 70 | the following instead: 71 | 72 | ``` 73 | linux: linux.clangd-index.chromium.org:5900 74 | chromeos: chromeos.clangd-index.chromium.org:5900 75 | android: android.clangd-index.chromium.org:5900 76 | fuchsia: fuchsia.clangd-index.chromium.org:5900 77 | chromecast-linux: chromecast-linux.clangd-index.chromium.org:5900 78 | chromecast-android: chromecast-android.clangd-index.chromium.org:5900 79 | ``` 80 | 81 | Unfortunately we don't support iOS, mac & windows targets yet. 82 | 83 | ### Will clangd still know about my local changes? 84 | 85 | Clangd will still have up-to-date symbol information for the files open (and the 86 | headers included through them) in your current editing session, but the 87 | information might be stale for the others. This has been working fine for most 88 | users, but if that's not the case for you, you can also turn on local indexing 89 | for parts of the codebase. Such a config file could look like: 90 | 91 | ```yaml 92 | If: 93 | PathMatch: /path/to/chromium/src/.* 94 | Index: 95 | External: 96 | Server: linux.clangd-index.chromium.org:5900 97 | MountPoint: /path/to/chromium/src/ 98 | --- 99 | If: 100 | PathMatch: /path/to/chromium/src/chromeos/login/.* 101 | Index: 102 | Background: Build 103 | ``` 104 | 105 | Note that the fragment setting Index.Background to Build must come after the 106 | external index specification. More details on 107 | [configuration schema](https://clangd.llvm.org/config.html). 108 | 109 | ### I've multiple checkouts, how do I state that fact in config? 110 | 111 | If you have multiple checkouts you can specify different fragments by putting 112 | --- in between. For example: 113 | 114 | ```yaml 115 | If: 116 | PathMatch: /path/to/chromium/src/.* 117 | Index: 118 | External: 119 | Server: linux.clangd-index.chromium.org:5900 120 | MountPoint: /path/to/chromium/src/ 121 | --- 122 | If: 123 | PathMatch: /path/to/chromium2/src/.* 124 | Index: 125 | External: 126 | Server: chromeos.clangd-index.chromium.org:5900 127 | MountPoint: /path/to/chromium2/src/ 128 | ``` 129 | 130 | ### Verifying that remote-index is in use 131 | 132 | Clangd should be working as before, e.g. can take you to the definitions of 133 | symbols that aren't directly visible within the current file, or provide code 134 | completions for symbols outside of current translation unit. Note that the 135 | information coming from files that are recently modified but not been opened in 136 | current editing session might be stale. 137 | 138 | To increase certainity, you can check clangd logs after performing some actions 139 | like go-to-definition or code-completion for such contents: 140 | 141 | ``` 142 | I[12:49:24.612] Associating /repos/chromium/src/ with remote index at linux.clangd-index.chromium.org:5900. 143 | V[12:49:24.614] Remote index connection [linux.clangd-index.chromium.org:5900]: idle => connecting 144 | V[12:49:24.662] Remote index [linux.clangd-index.chromium.org:5900]: LookupRequest => 1 results in 48ms. 145 | V[12:49:24.662] Remote index connection [linux.clangd-index.chromium.org:5900]: connecting => ready 146 | ``` 147 | 148 | Note that to see the verbose logs you need to pass in `-log=verbose` to clangd. 149 | You can find details about accessing clangd logs in 150 | https://clangd.llvm.org/troubleshooting.html#gathering-logs. 151 | 152 | ### Unknown config key warning 153 | 154 | If you have the following warning: 155 | 156 | > I[00:00:00.000] config warning at /path/to/.config/clangd/config.yaml:5:2: 157 | > Unknown Index key External 158 | 159 | It means your version of clangd is too old or was not built with remote index 160 | support. See [Getting clangd client](#getting-clangd-client) for more details. 161 | 162 | ### Untrusted config warning 163 | 164 | If you have the following warning: 165 | 166 | > Remote index may not be specified by untrusted configuration. Copy this into 167 | > user config to use it. 168 | 169 | It means you configered the remote index through project config (e.g. 170 | `/path/to/llvm/.clangd`) which is no longer supported. Please follow 171 | [configuration instructions](#pointing-clangd-to-chromium-remote-index-server) 172 | above instead. 173 | 174 | ### Symbol information seems to be missing for some symbols. 175 | 176 | If you are working on a branded chromeos build, using chromeos index is not 177 | enough as there's some discrepancy in handling std symbols between branded and 178 | unbranded builds. You can work around this by updating your remote index spec to 179 | look like: 180 | 181 | ```yaml 182 | If: 183 | PathMatch: /path/to/chromium/src/.* 184 | Index: 185 | External: 186 | Server: chromeos.clangd-index.chromium.org:5900 187 | MountPoint: /path/to/chromium/src/ 188 | CompileFlags: 189 | Add: [-D_LIBCPP_ABI_UNSTABLE, -D_LIBCPP_ABI_VERSION=Cr] 190 | ``` 191 | -------------------------------------------------------------------------------- /docs/privacy.md: -------------------------------------------------------------------------------- 1 | # Privacy policy 2 | 3 | ## Data pipeline 4 | 5 | Clangd remote index server is building an index directly from public Chromium 6 | [source code](https://chromium.googlesource.com/chromium/src.git). The 7 | [indexer code](https://github.com/clangd/llvm-remote-index/blob/master/.github/workflows/index.yaml) 8 | is executed on a GCP VM using 9 | [this docker container](https://github.com/clangd/chrome-remote-index/blob/main/indexer/Dockerfile). 10 | 11 | ## User data 12 | 13 | The remote index service can be summarized as an implementation of 14 | [`clang::clangd::SymbolIndex`](https://github.com/llvm/llvm-project/blob/main/clang-tools-extra/clangd/index/Index.h) 15 | interface. The data we transfer from and to the client is the data needed to 16 | form a request to the index instance (and doesn't differ from the analogous 17 | request sent to the local index). 18 | [clangd/index/remote/Index.proto](https://github.com/llvm/llvm-project/blob/main/clang-tools-extra/clangd/index/remote/Index.proto) 19 | is a specification of data that is transferred over the wire. Even though this 20 | data is transferred to and from the server, none if it is actually saved. The 21 | server disposes the request data from the RAM right after the response is 22 | sent and the only data it saves is: 23 | 24 | * Request timestamp 25 | * How much time it took the server to process request 26 | * Status of the request processing (success/failure) 27 | * Number of the results returned for each successful request 28 | 29 | These logs help maintainers monitor and identify problems with the service and 30 | improve it over time. We [run the 31 | server](https://github.com/clangd/chrome-remote-index/blob/master/deployment/entry_point.sh) 32 | with `--log-public` option within a Docker 33 | [container](https://github.com/clangd/chrome-remote-index/blob/master/docker/Dockerfile). 34 | All [deployment 35 | scripts](https://github.com/clangd/chrome-remote-index/tree/master/deployment) 36 | are also public. 37 | 38 | ## Client and server specification 39 | 40 | Finally, the code that runs the service as well as its client side is publicly 41 | available. The client side implementation lives in upstream LLVM under 42 | [clang-tools-extra/clangd/index/remote/](https://github.com/llvm/llvm-project/tree/main/clang-tools-extra/clangd/index/remote), 43 | this is exactly the code being used to produce Clangd 44 | [releases and weekly snapshots](https://github.com/clangd/clangd/releases). The 45 | server code lives in 46 | [clangd/chrome-remote-index](https://github.com/clangd/chrome-remote-index) 47 | repository and also has the deployment scripts. The service is deployed on the 48 | public instance of Google Cloud Platform. 49 | -------------------------------------------------------------------------------- /docs/terms.md: -------------------------------------------------------------------------------- 1 | # Terms and Conditions 2 | 3 | Clangd remote index service is run on a best-effort basis by Clangd developers. 4 | Google donates VM instances in GCP to the Chromium Project for hosting this 5 | service, and is not tied to it in any other way. We also monitor the service on 6 | a best-effort basis and will do our best to deal with arising problems (such as 7 | server not being responsive) within several working days. 8 | 9 | The service is aimed to improve the workflow of Chromium developers and relieve 10 | the burden of having to use a very powerful machine for editing code by 11 | off-loading one of the most expensive operations -- codebase indexing. Remote 12 | index service offers a way to maintain a connection with the infrastructure that 13 | keeps relatively fresh Chromium index (rebuilt and updated daily) and use it in 14 | combination with Clangd, so that users can take advantage of its features such 15 | as code completion, code navigation (go-to-definition, find references) and so 16 | on. 17 | 18 | For more information about remote index feature and its design, please see 19 | [documentation](https://clangd.llvm.org/remote-index.html). 20 | 21 | Both the service and the code it runs are available publicly for all interested 22 | parties. Please check the [privacy document](domain/privacy) to 23 | learn more about how we keep your data secure and where you can inspect the 24 | code. 25 | 26 | To get in touch with the developers, report bugs and ask questions, please open 27 | GitHub Issue: 28 | [clangd/chrome-remote-index](https://github.com/clangd/chrome-remote-index/issues). 29 | -------------------------------------------------------------------------------- /download_latest_release_assets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import os 5 | import requests 6 | import sys 7 | 8 | 9 | # Returns True if the download was successful. 10 | def download(repository, asset_prefix, output_dir, output_name): 11 | # Traverse releases in chronological order. 12 | request = requests.get( 13 | f'https://api.github.com/repos/{repository}/releases') 14 | for release in request.json(): 15 | for asset in release.get('assets', []): 16 | if asset.get('name', '').startswith(asset_prefix): 17 | download_url = asset['browser_download_url'] 18 | downloaded_file = requests.get(download_url) 19 | if output_name is None: 20 | output_name = asset['name'] 21 | with open(os.path.join(output_dir, output_name), 'wb') as f: 22 | f.write(downloaded_file.content) 23 | # The latest release is downloaded, there is nothing else to 24 | # do. 25 | return True 26 | return False 27 | 28 | 29 | def main(): 30 | parser = argparse.ArgumentParser( 31 | description='Download Clangd binaries (clangd itself, indexer, etc).') 32 | parser.add_argument( 33 | '--repository', 34 | type=str, 35 | help='GitHub repository to download latest release from.', 36 | default='clangd/clangd') 37 | parser.add_argument('--output-dir', 38 | type=str, 39 | help='Asset will be stored here.', 40 | default=os.getcwd()) 41 | parser.add_argument( 42 | '--output-name', 43 | type=str, 44 | help= 45 | 'Asset will be stored with this name, will use asset name by default', 46 | default=None) 47 | parser.add_argument( 48 | '--asset-prefix', 49 | type=str, 50 | help='The required prefix to match for asset to download.', 51 | required=True) 52 | args = parser.parse_args() 53 | success = download(args.repository, args.asset_prefix, args.output_dir, 54 | args.output_name) 55 | if not success: 56 | sys.exit(1) 57 | 58 | 59 | if __name__ == '__main__': 60 | main() 61 | -------------------------------------------------------------------------------- /indexer/Dockerfile: -------------------------------------------------------------------------------- 1 | #===-- Dockerfile ---------------------------------------------------------===// 2 | # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 3 | # See https://llvm.org/LICENSE.txt for license information. 4 | # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 5 | # 6 | #===-----------------------------------------------------------------------===// 7 | # Docker image used for building clangd index for Chromium. 8 | #===-----------------------------------------------------------------------===// 9 | 10 | FROM ubuntu:20.04 11 | 12 | # GitHub Authentification token. 13 | ARG TOKEN 14 | ENV DEBIAN_FRONTEND=noninteractive 15 | ENV GITHUB_TOKEN=$TOKEN 16 | 17 | RUN if [ -z "$GITHUB_TOKEN" ]; then \ 18 | echo "GITHUB_TOKEN should be provided during build stage."; \ 19 | exit -1; \ 20 | fi; 21 | 22 | # Install dependencies. 23 | RUN apt-get update; \ 24 | apt-get install -y git curl python3 python3-requests p7zip-full unzip \ 25 | cron lsb-release sudo tzdata; \ 26 | apt-get dist-upgrade -y; 27 | 28 | # Install GitHub CLI (needed for uploading to GitHub Releases). 29 | ADD https://github.com/cli/cli/releases/download/v1.12.1/gh_1.12.1_linux_amd64.deb gh.deb 30 | RUN dpkg -i gh.deb && rm gh.deb 31 | 32 | COPY cronjob /etc/cron.d/cronjob 33 | RUN chmod 0644 etc/cron.d/cronjob 34 | RUN crontab /etc/cron.d/cronjob 35 | RUN touch /var/log/indexer.log 36 | 37 | COPY run.sh run.sh 38 | COPY download_latest_release_assets.py download_latest_release_assets.py 39 | 40 | RUN python3 download_latest_release_assets.py --output-name \ 41 | clangd_indexing_tools.zip --asset-prefix clangd_indexing_tools-linux && \ 42 | unzip clangd_indexing_tools.zip -d clangd_binaries && \ 43 | rm clangd_indexing_tools.zip 44 | 45 | COPY prepare.sh prepare.sh 46 | RUN chmod +x /prepare.sh 47 | 48 | RUN echo "GITHUB_TOKEN=$GITHUB_TOKEN" >> /etc/environment 49 | RUN echo "DEBIAN_FRONTEND=noninteractive" >> /etc/environment 50 | 51 | COPY entry_point.sh entry_point.sh 52 | RUN chmod +x /entry_point.sh 53 | 54 | ENTRYPOINT ["/entry_point.sh"] 55 | -------------------------------------------------------------------------------- /indexer/README.md: -------------------------------------------------------------------------------- 1 | # Making a new indexer release 2 | 3 | Running `bash rollout_new_release.sh GITHUB_TOKEN` will create a new indexer 4 | image with latest snapshot binaries available in 5 | [clangd/releases](https://github.com/clangd/clangd/releases) and restart the 6 | `indexer` instance in `chrome-remote-index` GCP project. 7 | 8 | The `GITHUB_TOKEN` is used for creating releases and uploading indexing 9 | artifacts into 10 | [this repo](https://github.com/clangd/chrome-remote-index/releases). So it 11 | should have `public_repo` access. Note that releases will be created as the user 12 | owning the token. 13 | -------------------------------------------------------------------------------- /indexer/args.sh: -------------------------------------------------------------------------------- 1 | PROJECT_ID="chrome-remote-index" 2 | VM_NAME="indexer" 3 | VM_ZONE="us-central1-c" 4 | IMAGE_IN_GCR="gcr.io/chrome-remote-index/chrome-remote-index-indexer" 5 | -------------------------------------------------------------------------------- /indexer/cloudbuild.yaml: -------------------------------------------------------------------------------- 1 | steps: 2 | - name: gcr.io/google.com/cloudsdktool/cloud-sdk 3 | entrypoint: 'bash' 4 | args: 5 | - '-c' 6 | - >- 7 | cd indexer && 8 | bash rollout_new_release.sh $$GH_TOKEN 9 | secretEnv: ['GH_TOKEN'] 10 | availableSecrets: 11 | secretManager: 12 | - versionName: projects/chrome-remote-index/secrets/GH_TOKEN/versions/latest 13 | env: GH_TOKEN 14 | -------------------------------------------------------------------------------- /indexer/cronjob: -------------------------------------------------------------------------------- 1 | 30 1 * * * flock -w 0 /cron.lock /run.sh >> /var/log/indexer.log 2>&1 2 | -------------------------------------------------------------------------------- /indexer/download_latest_release_assets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import os 5 | import requests 6 | import sys 7 | 8 | 9 | # Returns True if the download was successful. 10 | def download(repository, asset_prefix, output_dir, output_name): 11 | # Traverse releases in chronological order. 12 | request = requests.get( 13 | f'https://api.github.com/repos/{repository}/releases') 14 | for release in request.json(): 15 | for asset in release.get('assets', []): 16 | if asset.get('name', '').startswith(asset_prefix): 17 | download_url = asset['browser_download_url'] 18 | downloaded_file = requests.get(download_url) 19 | if output_name is None: 20 | output_name = asset['name'] 21 | with open(os.path.join(output_dir, output_name), 'wb') as f: 22 | f.write(downloaded_file.content) 23 | # The latest release is downloaded, there is nothing else to 24 | # do. 25 | return True 26 | return False 27 | 28 | 29 | def main(): 30 | parser = argparse.ArgumentParser( 31 | description='Download Clangd binaries (clangd itself, indexer, etc).') 32 | parser.add_argument( 33 | '--repository', 34 | type=str, 35 | help='GitHub repository to download latest release from.', 36 | default='clangd/clangd') 37 | parser.add_argument('--output-dir', 38 | type=str, 39 | help='Asset will be stored here.', 40 | default=os.getcwd()) 41 | parser.add_argument( 42 | '--output-name', 43 | type=str, 44 | help= 45 | 'Asset will be stored with this name, will use asset name by default', 46 | default=None) 47 | parser.add_argument( 48 | '--asset-prefix', 49 | type=str, 50 | help='The required prefix to match for asset to download.', 51 | required=True) 52 | args = parser.parse_args() 53 | success = download(args.repository, args.asset_prefix, args.output_dir, 54 | args.output_name) 55 | if not success: 56 | sys.exit(1) 57 | 58 | 59 | if __name__ == '__main__': 60 | main() 61 | -------------------------------------------------------------------------------- /indexer/entry_point.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #===-- entry_point.sh -----------------------------------------------------===// 3 | # 4 | # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 5 | # See https://llvm.org/LICENSE.txt for license information. 6 | # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 7 | # 8 | #===-----------------------------------------------------------------------===// 9 | # 10 | # Docker entry point wrapper. 11 | # 12 | #===-----------------------------------------------------------------------===// 13 | 14 | set -eux 15 | 16 | /prepare.sh 17 | 18 | # Run one indexing cycle immediately at startup. 19 | /run.sh 20 | 21 | # Start cron only after initial indexing finishes. 22 | cron 23 | tail -f /var/log/indexer.log 24 | -------------------------------------------------------------------------------- /indexer/prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #===-- prepare.sh ---------------------------------------------------------===// 3 | # 4 | # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 5 | # See https://llvm.org/LICENSE.txt for license information. 6 | # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 7 | # 8 | #===-----------------------------------------------------------------------===// 9 | # 10 | # Fetch Chromium sources and prepare the environment. 11 | # 12 | #===-----------------------------------------------------------------------===// 13 | 14 | set -eux 15 | 16 | cd / 17 | 18 | rm -rf depot_tools 19 | git clone --depth=1 https://chromium.googlesource.com/chromium/tools/depot_tools.git 20 | 21 | export PATH="$PATH:$(readlink -f depot_tools)" 22 | 23 | mkdir -p chromium 24 | cd chromium 25 | 26 | gclient metrics --opt-out 27 | 28 | # Perform fetch only if this is for the first time. As fetch will fail 29 | # otherwise. 30 | if [ ! -f .gclient ]; then 31 | fetch --no-history --nohooks chromium 32 | fi 33 | 34 | echo "target_os = [ 'linux', 'android', 'chromeos', 'fuchsia' ]" >> .gclient 35 | 36 | cd src 37 | 38 | gclient sync --no-history 39 | 40 | build/install-build-deps.py || true 41 | 42 | gclient runhooks 43 | 44 | echo "Finished preparing the environment" 45 | -------------------------------------------------------------------------------- /indexer/push_new_docker_image.sh: -------------------------------------------------------------------------------- 1 | source args.sh 2 | 3 | set -ex 4 | 5 | TOKEN="$1" 6 | if [ -z "$TOKEN" ]; then \ 7 | echo "Usage: $0 GITHUB_TOKEN_FOR_UPLOADING_INDEX"; \ 8 | exit -1; \ 9 | fi; 10 | 11 | TEMP_DIR="$(mktemp -d)" 12 | # Make sure we delete TEMP_DIR on exit. 13 | trap "rm -r $TEMP_DIR" EXIT 14 | 15 | # Copy all the necessary files for docker image into a temp directory and move 16 | # into it. 17 | cp Dockerfile "$TEMP_DIR/" 18 | cp cronjob "$TEMP_DIR/" 19 | cp run.sh "$TEMP_DIR/" 20 | cp ../download_latest_release_assets.py "$TEMP_DIR/" 21 | cp prepare.sh "$TEMP_DIR/" 22 | cp entry_point.sh "$TEMP_DIR/" 23 | cd "$TEMP_DIR" 24 | 25 | # Build the image, tag it for GCR and push. 26 | docker build --build-arg TOKEN="$TOKEN" -t "$IMAGE_IN_GCR" . 27 | gcloud auth configure-docker 28 | docker push "$IMAGE_IN_GCR" 29 | -------------------------------------------------------------------------------- /indexer/rollout_new_release.sh: -------------------------------------------------------------------------------- 1 | source args.sh 2 | 3 | set -ex 4 | 5 | # Start by creating a new image. 6 | . push_new_docker_image.sh $@ 7 | 8 | # Restarting VM instance will start with latest docker image. 9 | gcloud compute --project=$PROJECT_ID instances stop --zone=$VM_ZONE $VM_NAME 10 | gcloud compute --project=$PROJECT_ID instances start --zone=$VM_ZONE $VM_NAME 11 | -------------------------------------------------------------------------------- /indexer/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #===-- run.sh -------------------------------------------------------------===// 3 | # 4 | # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 5 | # See https://llvm.org/LICENSE.txt for license information. 6 | # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 7 | # 8 | #===-----------------------------------------------------------------------===// 9 | # 10 | # Produce Chromium index. 11 | # 12 | #===-----------------------------------------------------------------------===// 13 | 14 | set -eux 15 | 16 | cd / 17 | 18 | # Prepare the environment: download all necessary binaries and fetch the source 19 | # code. 20 | 21 | export CLANGD_INDEXER=$(find clangd_binaries -name "clangd-indexer" | xargs readlink -f) 22 | 23 | export PATH="$PATH:$(readlink -f depot_tools)" 24 | 25 | # Update Chromium sources. 26 | cd chromium/src 27 | git fetch --depth=1 28 | git reset --hard origin/main 29 | gclient fetch 30 | gclient sync --no-history --delete_unversioned_trees 31 | gclient runhooks 32 | 33 | mkdir -p out/Default 34 | export BUILD_DIR=$(readlink -f out/Default) 35 | 36 | # Create a release, will be empty for now and incrementally populated 37 | # throughout the indexing pipeline. 38 | 39 | DATE=$(date -u +%Y%m%d) 40 | COMMIT=$(git rev-parse --short HEAD) 41 | RELEASE_NAME="index/${DATE}" 42 | gh release create $RELEASE_NAME --repo clangd/chrome-remote-index \ 43 | --title="Index at $DATE" \ 44 | --notes="Chromium index artifacts at $COMMIT with project root \`$PWD\`." 45 | 46 | # The indexing pipeline is common. Each platform will only have to do the 47 | # preparation step (set up the build configuration and install dependencies). 48 | 49 | # $1: Platform name. 50 | # $2: GN arguments for the chosen platform. 51 | # TODO: Add logging for failures. 52 | index() { 53 | PLATFORM=$1 54 | 55 | GN_ARGS=$2 56 | 57 | echo "Indexing for $PLATFORM" 58 | # Clean up the artifacts. 59 | rm -rf $BUILD_DIR /chrome-*.idx chrome-index-*.zip 60 | 61 | gn gen --args="$GN_ARGS" $BUILD_DIR 62 | 63 | # Build generated files. 64 | ninja -C $BUILD_DIR -t targets all | grep -i '^gen/' | grep -E "\.(cpp|h|inc|cc)\:" | cut -d':' -f1 | xargs autoninja -C $BUILD_DIR -k 0 65 | 66 | # Get compile_commands.json for clangd-indexer. 67 | tools/clang/scripts/generate_compdb.py -p $BUILD_DIR > compile_commands.json 68 | 69 | $CLANGD_INDEXER --executor=all-TUs compile_commands.json > /chrome-$PLATFORM.idx 70 | 71 | 7z a chrome-index-$PLATFORM-$DATE.zip /chrome-$PLATFORM.idx 72 | 73 | gh release upload --repo clangd/chrome-remote-index $RELEASE_NAME chrome-index-$PLATFORM-$DATE.zip 74 | echo "Finished indexing for $PLATFORM" 75 | } 76 | 77 | # --- Linux --- 78 | 79 | ./build/install-build-deps.py || true 80 | 81 | index linux 'target_os="linux"' || true 82 | 83 | # --- ChromeOS --- 84 | 85 | index chromeos 'target_os="chromeos"' || true 86 | 87 | # --- Android --- 88 | 89 | index android 'target_os="android"' || true 90 | 91 | # --- Fuchsia --- 92 | 93 | index fuchsia 'target_os="fuchsia"' || true 94 | 95 | # --- Android Chromecast --- 96 | 97 | index chromecast-android 'target_os="android" is_chromecast=true' || true 98 | 99 | # --- Linux Chromecast --- 100 | 101 | index chromecast-linux 'target_os="linux" is_chromecast=true' || true 102 | --------------------------------------------------------------------------------