├── .dockerignore ├── .github └── workflows │ └── ci.yaml ├── .gitignore ├── Dockerfile ├── README.md ├── docker-compose.yml ├── docs ├── advanced-functions │ ├── external-storage.md │ ├── localpv-manager.md │ └── manage-multiple-cluster.md ├── appendices │ ├── downloads.md │ ├── faq.md │ ├── iomesh-metrics.md │ ├── release-notes.md │ └── setup-worker-node.md ├── assets │ ├── iomesh-csi-driver │ │ ├── example │ │ │ └── fio.yaml │ │ └── v0.1.1 │ │ │ └── deploy │ │ │ ├── accounts.yaml │ │ │ ├── clusterroles.yaml │ │ │ ├── namespace.yaml │ │ │ └── zbs-csi-driver.yaml │ └── iomesh-operation │ │ ├── ioemsh-dashobard.json │ │ └── iomesh-prometheus-kubernetes-sd-example.yaml ├── basic-concepts │ └── basic-concepts.md ├── cluster-operations │ ├── manage-license.md │ ├── replace-failed-disk.md │ ├── scale-down-cluster.md │ ├── scale-out-cluster.md │ ├── uninstall-cluster.md │ └── upgrade-cluster.md ├── deploy-iomesh-cluster │ ├── activate-license.md │ ├── install-iomesh.md │ ├── prerequisites.md │ └── setup-iomesh.md ├── introduction │ └── introduction.md ├── monitor-iomesh │ ├── install-iomesh-dashboard.md │ └── monitoring-iomesh.md ├── stateful-applications │ ├── iomesh-for-mongodb.md │ └── iomesh-for-mysql.md ├── volume-operations │ ├── authenticate-pv.md │ ├── clone-pv.md │ ├── create-pv.md │ ├── create-storageclass.md │ └── expand-pv.md └── volumesnapshot-operations │ ├── create-snapshotclass.md │ ├── create-volumesnapshot.md │ └── restore-volumesnapshot.md ├── package-lock.json ├── scripts ├── install_iomesh_coreos.sh ├── install_iomesh_el7.sh └── install_iomesh_el8.sh └── website ├── .dockerignore ├── .gitignore ├── Dockerfile ├── README.md ├── blog ├── 2016-03-11-blog-post.md ├── 2017-04-10-blog-post-two.md ├── 2017-09-25-testing-rss.md ├── 2017-09-26-adding-rss.md └── 2017-10-24-new-version-1.0.0.md ├── core └── Footer.js ├── docker-compose.yml ├── docs ├── doc1.md ├── doc2.md ├── doc3.md ├── exampledoc4.md └── exampledoc5.md ├── package-lock.json ├── package.json ├── pages └── en │ ├── help.js │ ├── users.js │ └── versions.js ├── sidebars.json ├── siteConfig.js ├── static ├── css │ └── custom.css ├── img │ ├── IOMesh_logo_on_white.svg │ ├── Slack_footer.svg │ ├── Twitter_footer.svg │ ├── YouTube_footer.svg │ ├── favicon.ico │ ├── mesh.svg │ ├── oss_logo.png │ ├── undraw_code_review.svg │ ├── undraw_monitor.svg │ ├── undraw_note_list.svg │ ├── undraw_online.svg │ ├── undraw_open_source.svg │ ├── undraw_operating_system.svg │ ├── undraw_react.svg │ ├── undraw_tweetstorm.svg │ └── undraw_youtube_tutorial.svg ├── index.html └── js │ └── add-tracker.js ├── versioned_docs ├── version-0.10.0 │ └── about-iomesh │ │ └── introduction.md ├── version-0.10.1 │ ├── about-iomesh │ │ └── introduction.md │ └── deploy │ │ ├── install-iomesh.md │ │ ├── prerequisites.md │ │ └── setup-iomesh.md ├── version-0.11.0 │ ├── about-iomesh │ │ └── introduction.md │ ├── additional-info │ │ ├── best-practice-in-production.md │ │ ├── performance-testing.md │ │ └── release-notes.md │ ├── advanced-usage │ │ └── external-iscsi.md │ ├── deploy │ │ ├── install-iomesh.md │ │ ├── prerequisites.md │ │ ├── setup-iomesh.md │ │ ├── setup-snapshotclass.md │ │ └── setup-storageclass.md │ ├── iomesh-operations │ │ ├── cluster-operations.md │ │ └── monitoring.md │ ├── references │ │ └── metrics.md │ ├── stateful-applications │ │ ├── iomesh-for-cassandra.md │ │ ├── iomesh-for-mongodb.md │ │ └── iomesh-for-mysql.md │ └── volume-operations │ │ ├── create-volume.md │ │ ├── expand-volume.md │ │ └── snapshot-restore-and-clone.md ├── version-0.9.5 │ ├── about-iomesh │ │ └── introduction.md │ ├── additional-info │ │ ├── best-practice-in-production.md │ │ ├── deployment-architectures.md │ │ └── performance-testing.md │ ├── deploy │ │ ├── install-iomesh.md │ │ ├── prerequisites.md │ │ ├── setup-iomesh.md │ │ ├── setup-snapshotclass.md │ │ └── setup-storageclass.md │ ├── iomesh-operations │ │ ├── cluster-operations.md │ │ ├── failover.md │ │ └── monitoring.md │ ├── stateful-applications │ │ ├── iomesh-for-cassandra.md │ │ ├── iomesh-for-mongodb.md │ │ └── iomesh-for-mysql.md │ └── volume-operations │ │ ├── create-volume.md │ │ ├── expand-volume.md │ │ └── snapshot-restore-and-clone.md ├── version-0.9.7 │ ├── about-iomesh │ │ └── introduction.md │ ├── additional-info │ │ └── performance-testing.md │ ├── deploy │ │ └── install-iomesh.md │ ├── iomesh-operations │ │ └── monitoring.md │ ├── stateful-applications │ │ ├── iomesh-for-cassandra.md │ │ ├── iomesh-for-mongodb.md │ │ └── iomesh-for-mysql.md │ └── volume-operations │ │ └── create-volume.md ├── version-v0.11.1 │ ├── about-iomesh │ │ └── introduction.md │ ├── additional-info │ │ └── release-notes.md │ └── deploy │ │ ├── install-iomesh.md │ │ └── prerequisites.md ├── version-v1.0.0 │ ├── advanced-functions │ │ ├── external-storage.md │ │ ├── localpv-manager.md │ │ └── manage-multiple-cluster.md │ ├── appendices │ │ ├── downloads.md │ │ ├── faq.md │ │ ├── iomesh-metrics.md │ │ └── release-notes.md │ ├── basic-concepts │ │ └── basic-concepts.md │ ├── cluster-operations │ │ ├── manage-license.md │ │ ├── replace-failed-disk.md │ │ ├── scale-cluster.md │ │ ├── uninstall-cluster.md │ │ └── upgrade-cluster.md │ ├── deploy-iomesh-cluster │ │ ├── activate-license.md │ │ ├── install-iomesh.md │ │ ├── prerequisites.md │ │ ├── setup-iomesh.md │ │ └── setup-worker-node.md │ ├── introduction │ │ └── introduction.md │ ├── monitor-iomesh │ │ ├── install-iomesh-dashboard.md │ │ └── monitoring-iomesh.md │ ├── stateful-applications │ │ ├── iomesh-for-mongodb.md │ │ └── iomesh-for-mysql.md │ ├── volume-operations │ │ ├── clone-pv.md │ │ ├── create-pv.md │ │ ├── create-storageclass.md │ │ ├── encrypt-pv.md │ │ └── expand-pv.md │ └── volumesnapshot-operations │ │ ├── create-snapshotclass.md │ │ ├── create-volumesnapshot.md │ │ └── restore-volumesnapshot.md ├── version-v1.0.1 │ ├── advanced-functions │ │ ├── external-storage.md │ │ ├── localpv-manager.md │ │ └── manage-multiple-cluster.md │ ├── appendices │ │ ├── downloads.md │ │ ├── release-notes.md │ │ └── setup-worker-node.md │ ├── basic-concepts │ │ └── basic-concepts.md │ ├── cluster-operations │ │ ├── scale-cluster.md │ │ └── upgrade-cluster.md │ ├── deploy-iomesh-cluster │ │ ├── install-iomesh.md │ │ ├── prerequisites.md │ │ └── setup-iomesh.md │ ├── monitor-iomesh │ │ └── install-iomesh-dashboard.md │ └── volume-operations │ │ └── authenticate-pv.md ├── version-v1.0.2 │ ├── basic-concepts │ │ └── basic-concepts.md │ └── cluster-operations │ │ ├── scale-down-cluster.md │ │ └── scale-out-cluster.md ├── version-v1.0.3 │ └── deploy-iomesh-cluster │ │ └── setup-iomesh.md └── version-v1.0.4 │ ├── basic-concepts │ └── basic-concepts.md │ ├── cluster-operations │ ├── replace-failed-disk.md │ └── scale-out-cluster.md │ └── deploy-iomesh-cluster │ ├── install-iomesh.md │ └── setup-iomesh.md ├── versioned_sidebars ├── version-0.11.0-sidebars.json ├── version-0.9.5-sidebars.json ├── version-v1.0.0-sidebars.json ├── version-v1.0.1-sidebars.json └── version-v1.0.2-sidebars.json ├── versions.json └── website ├── README.md ├── blog ├── 2016-03-11-blog-post.md ├── 2017-04-10-blog-post-two.md ├── 2017-09-25-testing-rss.md ├── 2017-09-26-adding-rss.md └── 2017-10-24-new-version-1.0.0.md ├── core └── Footer.js ├── package.json ├── pages └── en │ ├── help.js │ ├── index.js │ └── users.js ├── sidebars.json ├── siteConfig.js └── static ├── css └── custom.css └── img ├── favicon.ico ├── oss_logo.png ├── undraw_code_review.svg ├── undraw_monitor.svg ├── undraw_note_list.svg ├── undraw_online.svg ├── undraw_open_source.svg ├── undraw_operating_system.svg ├── undraw_react.svg ├── undraw_tweetstorm.svg └── undraw_youtube_tutorial.svg /.dockerignore: -------------------------------------------------------------------------------- 1 | */node_modules 2 | *.log 3 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | on: 4 | pull_request: 5 | branches: [master] 6 | push: 7 | branches: [master] 8 | 9 | jobs: 10 | build-deploy: 11 | runs-on: ubuntu-latest 12 | defaults: 13 | run: 14 | working-directory: website 15 | steps: 16 | - uses: actions/checkout@v2 17 | 18 | - name: Setup Node 19 | uses: actions/setup-node@v2.1.2 20 | with: 21 | node-version: '12.x' 22 | 23 | - name: Get yarn cache 24 | id: yarn-cache 25 | run: echo "::set-output name=dir::$(yarn cache dir)" 26 | 27 | - name: Cache dependencies 28 | uses: actions/cache@v2 29 | with: 30 | path: ${{ steps.yarn-cache.outputs.dir }} 31 | key: ${{ runner.os }}-website-${{ hashFiles('**/yarn.lock') }} 32 | restore-keys: | 33 | ${{ runner.os }}-website- 34 | 35 | - run: yarn install --frozen-lockfile 36 | - run: yarn build 37 | 38 | - name: Deploy 39 | if: ${{ github.event_name == 'push' }} 40 | uses: peaceiris/actions-gh-pages@v3 41 | with: 42 | personal_token: ${{ secrets.IOMESH_ROBOT_ACCESS_TOKEN }} 43 | publish_branch: gh-pages 44 | publish_dir: ./website/build/iomesh-docs 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | 3 | node_modules 4 | 5 | lib/core/metadata.js 6 | lib/core/MetadataBlog.js 7 | 8 | website/translated_docs 9 | website/build/ 10 | website/yarn.lock 11 | website/node_modules 12 | website/i18n/* 13 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:lts 2 | 3 | WORKDIR /app/website 4 | 5 | EXPOSE 3000 35729 6 | COPY ./docs /app/docs 7 | COPY ./website /app/website 8 | RUN yarn install 9 | 10 | CMD ["yarn", "start"] 11 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # IOMesh 2 | 3 | [![Slack channel #iomesh-community](https://img.shields.io/badge/slack-%23iomesh--community-974eff?logo=slack)](https://join.slack.com/t/iomesh/shared_invite/zt-pnqohdau-vZnhWMsm0ETSbPA_AJGCRw) 4 | 5 | ## To start using IOMesh 6 | 7 | See our online documentation on [docs.iomesh.com](https://docs.iomesh.com). 8 | 9 | ## License 10 | 11 | Copyright (c) 2021, SMARTX. All rights reserved. 12 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | docusaurus: 5 | build: . 6 | ports: 7 | - 3000:3000 8 | - 35729:35729 9 | volumes: 10 | - ./docs:/app/docs 11 | - ./website/blog:/app/website/blog 12 | - ./website/core:/app/website/core 13 | - ./website/i18n:/app/website/i18n 14 | - ./website/pages:/app/website/pages 15 | - ./website/static:/app/website/static 16 | - ./website/sidebars.json:/app/website/sidebars.json 17 | - ./website/siteConfig.js:/app/website/siteConfig.js 18 | working_dir: /app/website 19 | -------------------------------------------------------------------------------- /docs/appendices/downloads.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: downloads 3 | title: Downloads 4 | sidebar_label: Downloads 5 | --- 6 | 7 | 8 | IOMesh Offline Installation Package 9 | 10 | - Intel x86_64: 11 | - Download Link: 12 | 13 | 14 | - MD5: 15 | ``` 16 | 0c5e40ecb6780b8533be49e918afc767 17 | ``` 18 | - Kunpeng AArch64: 19 | - Download Link: 20 | 21 | 22 | - MD5: 23 | ``` 24 | 5a841691fd568d1af9eed53d7a0f5a72 25 | ``` 26 | 27 | - Hygon x86_64: 28 | - Download Link: 29 | 30 | 31 | - MD5: 32 | ``` 33 | d4774a94a81bfc0b5b10684190126bd7 34 | ``` 35 | 36 | IOMesh Cluster Dashboard File 37 | - Download Link: 38 | 39 | 40 | 41 | - MD5: 42 | ``` 43 | e063db897db783365ad476b8582c1534 44 | ``` 45 | 46 | 47 | -------------------------------------------------------------------------------- /docs/appendices/faq.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: faq 3 | title: FAQ 4 | sidebar_label: FAQ 5 | --- 6 | 7 | Q: Failed to pull docker image during IOMesh installation, and the error log reads: `Too Many Requests - Server message: toomanyrequests: You have reached your pull rate limit.` 8 | 9 | A: Log in to your Docker account on the worker node that experienced the above issue, or alternatively, update your account. This issue typically occurs during online installation as opposed to offline installation. 10 | -------------------------------------------------------------------------------- /docs/appendices/setup-worker-node.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: setup-worker-node 3 | title: Set Up Worker Node 4 | sidebar_label: Set Up Worker Node 5 | --- 6 | 7 | Before setting up `open-iscsi` for the worker nodes, ensure all requirements in [Prerequisites](../deploy-iomesh-cluster/prerequisites) are met. 8 | 9 | 1. On the node console, run the following command to install `open-iscsi`. 10 | 11 | 12 | 13 | 14 | 15 | ```shell 16 | sudo yum install iscsi-initiator-utils -y 17 | ``` 18 | 19 | 20 | 21 | ```shell 22 | sudo apt-get install open-iscsi -y 23 | ``` 24 | 25 | 26 | 27 | 2. Edit the file `/etc/iscsi/iscsid.conf`. Set the value of the field `node.startup` to `manual`. 28 | 29 | ```shell 30 | sudo sed -i 's/^node.startup = automatic$/node.startup = manual/' /etc/iscsi/iscsid.conf 31 | ``` 32 | > _Note:_ 33 | > The value of `MaxRecvDataSegmentLength` in `/etc/iscsi/iscsi.conf` is set at 32,768 by default, and the maximum number of PVs is limited to 80,000 in IOMesh. To create PVs more than 80,000 in IOMesh, it is recommended to set the value of `MaxRecvDataSegmentLength` to 163,840 or above. 34 | 35 | 3. Disable SELinux. 36 | 37 | ```shell 38 | sudo setenforce 0 39 | sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config 40 | ``` 41 | 42 | 4. Load `iscsi_tcp` kernel module. 43 | 44 | ```shell 45 | sudo modprobe iscsi_tcp 46 | sudo bash -c 'echo iscsi_tcp > /etc/modprobe.d/iscsi-tcp.conf' 47 | ``` 48 | 49 | 5. Start `iscsid` service. 50 | 51 | ```shell 52 | sudo systemctl enable --now iscsid 53 | ``` 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /docs/assets/iomesh-csi-driver/example/fio.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: fio-pvc 5 | spec: 6 | storageClassName: iomesh-csi-driver-default 7 | volumeMode: Block 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 30Gi 13 | --- 14 | apiVersion: v1 15 | kind: Pod 16 | metadata: 17 | name: fio 18 | labels: 19 | app: fio 20 | spec: 21 | volumes: 22 | - name: fio-pvc 23 | persistentVolumeClaim: 24 | claimName: fio-pvc 25 | containers: 26 | - name: fio 27 | image: clusterhq/fio-tool 28 | command: 29 | - tail 30 | args: 31 | - '-f' 32 | - /dev/null 33 | imagePullPolicy: IfNotPresent 34 | volumeDevices: 35 | - devicePath: /mnt/fio 36 | name: fio-pvc 37 | restartPolicy: Always 38 | -------------------------------------------------------------------------------- /docs/assets/iomesh-csi-driver/v0.1.1/deploy/accounts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: zbs-csi-controller-account 5 | namespace: iomesh-system 6 | --- 7 | kind: ClusterRoleBinding 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | metadata: 10 | name: zbs-csi-provisioner-binding 11 | subjects: 12 | - kind: ServiceAccount 13 | name: zbs-csi-controller-account 14 | namespace: iomesh-system 15 | roleRef: 16 | kind: ClusterRole 17 | name: zbs-csi-provisioner-role 18 | apiGroup: rbac.authorization.k8s.io 19 | --- 20 | kind: ClusterRoleBinding 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | metadata: 23 | name: zbs-csi-attacher-binding 24 | subjects: 25 | - kind: ServiceAccount 26 | name: zbs-csi-controller-account 27 | namespace: iomesh-system 28 | roleRef: 29 | kind: ClusterRole 30 | name: zbs-csi-attacher-role 31 | apiGroup: rbac.authorization.k8s.io 32 | --- 33 | kind: ClusterRoleBinding 34 | apiVersion: rbac.authorization.k8s.io/v1 35 | metadata: 36 | name: zbs-csi-snapshotter-binding 37 | subjects: 38 | - kind: ServiceAccount 39 | name: zbs-csi-controller-account 40 | namespace: iomesh-system 41 | roleRef: 42 | kind: ClusterRole 43 | name: zbs-csi-snapshotter-role 44 | apiGroup: rbac.authorization.k8s.io 45 | --- 46 | kind: ClusterRoleBinding 47 | apiVersion: rbac.authorization.k8s.io/v1 48 | metadata: 49 | name: zbs-csi-resizer-binding 50 | subjects: 51 | - kind: ServiceAccount 52 | name: zbs-csi-controller-account 53 | namespace: iomesh-system 54 | roleRef: 55 | kind: ClusterRole 56 | name: zbs-csi-resizer-role 57 | apiGroup: rbac.authorization.k8s.io 58 | --- 59 | kind: ClusterRoleBinding 60 | apiVersion: rbac.authorization.k8s.io/v1 61 | metadata: 62 | name: zbs-csi-driver-controller-binding 63 | subjects: 64 | - kind: ServiceAccount 65 | name: zbs-csi-controller-account 66 | namespace: iomesh-system 67 | roleRef: 68 | kind: ClusterRole 69 | name: zbs-csi-driver-role 70 | apiGroup: rbac.authorization.k8s.io 71 | --- 72 | apiVersion: v1 73 | kind: ServiceAccount 74 | metadata: 75 | name: zbs-csi-node-account 76 | namespace: iomesh-system 77 | --- 78 | kind: ClusterRoleBinding 79 | apiVersion: rbac.authorization.k8s.io/v1 80 | metadata: 81 | name: zbs-csi-driver-node-binding 82 | subjects: 83 | - kind: ServiceAccount 84 | name: zbs-csi-node-account 85 | namespace: iomesh-system 86 | roleRef: 87 | kind: ClusterRole 88 | name: zbs-csi-driver-role 89 | apiGroup: rbac.authorization.k8s.io 90 | -------------------------------------------------------------------------------- /docs/assets/iomesh-csi-driver/v0.1.1/deploy/namespace.yaml: -------------------------------------------------------------------------------- 1 | kind: Namespace 2 | apiVersion: v1 3 | metadata: 4 | name: iomesh-system 5 | -------------------------------------------------------------------------------- /docs/cluster-operations/scale-down-cluster.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: scale-down-cluster 3 | title: Scale Down Cluster 4 | sidebar_label: Scale Down Cluster 5 | --- 6 | 7 | You can scale down the IOMesh cluster by removing chunk pods in the Kubernetes worker nodes. 8 | 9 | **Precautions** 10 | - You can only delete chunk pods. Deleting meta pods is not supported. 11 | - You can only remove one chunk pod at a time. 12 | - Each chunk pod is created sequentially and given a unique number by `StatefulSet`, and you should remove them in reverse creation order. For example, if there are 5 chunk pods `iomesh-chunk-0`, `iomesh-chunk-1`, `iomesh-chunk-2`, `iomesh-chunk-3`, `iomesh-chunk-4`, deletion should start with `iomesh-chunk-4`. 13 | 14 | 15 | **Procedure** 16 | 17 | The following example reduces the number of chunk pods by removing `iomesh-chunk-2` on the node `k8s-worker-2`. 18 | 19 | 1. Run the `ip a` command on the `k8s-worker-2` node to obtain the unique IP within the data CIDR. Assume the IP is `192.168.29.23`. 20 | 21 | 2. Run the following command. Locate the `status.summary.chunkSummary.chunks` field and find the ID of `chunks` whose IP is `192.168.29.23`. 22 | ```shell 23 | kubectl get iomesh iomesh -n iomesh-system -o yaml 24 | ``` 25 | ```yaml 26 | chunks: 27 | - id: 2 # The chunk ID. 28 | ip: 192.168.29.23 29 | ``` 30 | 31 | 3. Get the meta leader pod name. 32 | ```shell 33 | kubectl get pod -n iomesh-system -l=iomesh.com/meta-leader -o=jsonpath='{.items[0].metadata.name}' 34 | ``` 35 | ```output 36 | iomesh-meta-0 37 | ``` 38 | 4. Access the meta leader pod. 39 | ```shell 40 | kubectl exec -it iomesh-meta-0 -n iomesh-system -c iomesh-meta bash 41 | ``` 42 | 43 | 5. Perform `chunk unregister`. Replace with the chunk ID obtained from Step 2. 44 | 45 | Depending on the size of the data in the chunk, executing this command can take from a few minutes to several hours. 46 | ``` 47 | /opt/iomesh/iomeshctl chunk unregister 48 | ``` 49 | 50 | 6. Find `chunk` in `iomesh.yaml`, the default configuration file exported during IOMesh installation, and then edit `replicaCount`. 51 | ```yaml 52 | chunk: 53 | replicaCount: 3 # Reduce the value to 2. 54 | ``` 55 | 56 | 7. Apply the modification. 57 | 58 | ```shell 59 | helm upgrade --namespace iomesh-system iomesh iomesh/iomesh --values iomesh.yaml 60 | ``` 61 | 62 | 8. Verify that the number of chunk pods is reduced. 63 | 64 | ```shell 65 | kubectl get pod -n iomesh-system | grep chunk 66 | ``` 67 | If successful, you should see output like this: 68 | ```output 69 | iomesh-chunk-0 3/3 Running 0 5h5m 70 | iomesh-chunk-1 3/3 Running 0 5h5m 71 | ``` 72 | 73 | 9. Run the following command. Then locate the `status.summary.chunkSummary.chunks` field to verify that the chunk was removed. 74 | ```shell 75 | kubectl get iomesh iomesh -n iomesh-system -o yaml 76 | ``` -------------------------------------------------------------------------------- /docs/cluster-operations/uninstall-cluster.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: uninstall-cluster 3 | title: Uninstall Cluster 4 | sidebar_label: Uninstall Cluster 5 | --- 6 | 7 | >_ATTENTION_: After uninstalling the IOMesh cluster, all data will be lost including all PVCs in the IOMesh cluster. 8 | 9 | To uninstall the IOMesh cluster, run the following command: 10 | 11 | ```shell 12 | helm uninstall --namespace iomesh-system iomesh 13 | ``` 14 | 15 | If there are IOMesh resources left after uninstalling IOMesh due to network or other issues, run the following command to clear all IOMesh resources. 16 | ```shell 17 | curl -sSL https://iomesh.run/uninstall_iomesh.sh | sh - 18 | ``` 19 | 20 | 21 | -------------------------------------------------------------------------------- /docs/deploy-iomesh-cluster/activate-license.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: activate-license 3 | title: Activate License 4 | sidebar_label: Activate License 5 | --- 6 | 7 | IOMesh currently offers two editions: Community and Enterprise. They differ in the maximum number of worker nodes and level of business support provided. You can find more information on the IOMesh official website at https://www.iomesh.com/spec. 8 | 9 | IOMesh comes with a trial license when it is installed and deployed. However, it is recommended that you update the trial license to a subscription or perpetual license, depending on your IOMesh edition and how long you plan to use it. 10 | 11 | **Prerequisites** 12 | - **Community Edition**: Apply for the new license code at https://www.iomesh.com/license. 13 | - **Enterprise Edition**: Get the license code either of a subscription or perpetual license from SmartX sales. 14 | 15 | **Procedure** 16 | 17 | 1. Create a file `license-code.txt` and save the license code in it. 18 | 19 | 2. Create a Kubernetes Secret. 20 | 21 | ```bash 22 | kubectl create secret generic iomesh-authorization-code -n iomesh-system --from-file=authorizationCode=./license-code.txt 23 | ``` 24 | 3. Add the field `spec.licenseSecretName` or update it if it exists. Fill in the value `iomesh-authorization-code`, which is the Kubernetes Secret name created above. 25 | 26 | ```bash 27 | kubectl edit iomesh -n iomesh-system 28 | ``` 29 | 30 | ```output 31 | spec: 32 | licenseSecretName: iomesh-authorization-code 33 | ``` 34 | 35 | 4. Confirm the update, and whether the update is successful will be shown in the output. 36 | 37 | ```bash 38 | kubectl describe iomesh -n iomesh-system # Whether the update is successful will be displayed in the events. 39 | ``` 40 | If the update fails, verify if you have entered the correct license code. If it still does not work, reset the field `spec.licenseSecretName`. 41 | 42 | 5. Verify that the license expiration date and other details are as expected. 43 | 44 | ```bash 45 | kubectl get iomesh -n iomesh-system -o=jsonpath='{.items[0].status.license}' 46 | ``` 47 | 48 | 49 | -------------------------------------------------------------------------------- /docs/deploy-iomesh-cluster/prerequisites.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: prerequisites 3 | title: Prerequisites 4 | sidebar_label: Prerequisites 5 | --- 6 | 7 | Before installing and deploying IOMesh, verify the following requirements. 8 | 9 | > _NOTE:_ Expanding an IOMesh cluster to multiple clusters is not currently supported. You should decide at the beginning whether to deploy one or multiple clusters. For multi-cluster deployment and operations, refer to [Multiple Cluster Management](../advanced-functions/manage-multiple-cluster.md). 10 | 11 | ## Cluster Requirements 12 | 13 | - A Kubernetes or OpenShift cluster with minimum three worker nodes. 14 | - The Kubernetes version should be 1.17-1.25 or OpenShift version should be 3.11-4.10. 15 | 16 | ## Hardware Requirements 17 | 18 | Ensure that each worker node has the following hardware configurations, and note that IOMesh Community and Enterprise editions have the same hardware requirements. 19 | 20 | **CPU** 21 | 22 | - The CPU architecture should be Intel x86_64, Kunpeng AArch64, or Hygon x86_64. 23 | - At least eight cores for each worker node. 24 | 25 | **Memory** 26 | 27 | - At least 16 GB on each worker node. 28 | 29 | **Storage Controller** 30 | 31 | - SAS HBA or RAID cards that support passthrough mode (JBOD). 32 | 33 | **OS Disk** 34 | 35 | - An SSD with at least 100 GB of free space in the `/opt` directory for storing IOMesh metadata. 36 | 37 | **Data & Cache Disk** 38 | 39 | Depends on whether the storage architecture is tiered storage or non-tiered storage. 40 | 41 | |Architecture|Description| 42 | |---|---| 43 | |Tiered Storage| Faster storage media for cache and slower storage media for capacity. For example, use faster NVMe SSDs as cache disks and slower SATA SSDs or HDDs as data disks.| 44 | |Non-Tiered Storage|Cache disks are not required. All disks except the physical disk containing the system partition are used as data disks.| 45 | 46 | In this release, hybrid mode is only supported for tiered storage and all-flash mode for non-tiered storage. 47 | 48 | |Deployment Mode|Disk Requirements| 49 | |---|---| 50 | |Hybrid Mode|
  • Cache Disk: At least one SATA SSD, SAS SSD or NVMe SSD, and the capacity must be greater than 60 GB.
  • Data Disk: At least one SATA HDD or SAS HDD.
  • The total SSD capacity should be 10% to 20% of total HDD capacity.
| 51 | |All-Flash Mode|At least one SSD with a capacity greater than 60G.| 52 | 53 | **NIC** 54 | 55 | - Each worker node should have at least one 10/25 GbE NIC. 56 | 57 | ## Network Requirements 58 | 59 | To prevent network bandwidth contention, create a dedicated storage network for IOMesh or leverage an existing network. 60 | 61 | - Plan a CIDR for IOMesh storage network. The IP of each worker node running IOMesh should be within that CIDR. 62 | - The ping latency of the IOMesh storage network should below 1 millisecond. 63 | - All worker nodes must be connected to the L2 layer network. 64 | 65 | 66 | -------------------------------------------------------------------------------- /docs/introduction/introduction.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: introduction 3 | title: Introduction 4 | sidebar_label: Introduction 5 | --- 6 | 7 | ## What is IOMesh? 8 | 9 | IOMesh is a Kubernetes-native storage system that manages storage resources within a Kubernetes cluster, automates operations and maintenance, and provides persistent storage, data protection and migration capabilities for data applications such as MySQL, Cassandra, MongoDB and middleware running on Kubernetes. 10 | 11 | ## Key Features 12 | 13 | **Kubernetes Native** 14 | 15 | IOMesh is fully built on the capabilities of Kubernetes and implements storage as code through declarative APIs, allowing for managing infrastructure and deployment environments through code to better support DevOps. 16 | 17 | **High Performance** 18 | 19 | IOMesh enables I/O-intensive databases and applications to run efficiently in the container environment. Leveraging the high-performance I/O link, IOMesh achieves high IOPS while maintaining low latency to ensure stable operation of data applications. 20 | 21 | **No Kernel Dependencies** 22 | 23 | IOMesh runs in user space rather than kernel space, isolated from other applications. This means if IOMesh fails, other applications on the same node can continue delivering services as usual without affecting the entire system. Since it is kernel independent, there is no need to install kernel modules or worry about compatibility issues. 24 | 25 | **Tiered Storage** 26 | 27 | IOMesh facilitates cost-effective, hybrid deployment of SSDs & HDDs, maximizing storage performance and capacity for different media while reducing storage costs from the outset. 28 | 29 | **Data Protection & Security** 30 | 31 | A system with multiple levels of data protection makes sure that data is always secure and available. IOMesh does this by placing multiple replicas on different nodes, allowing PV-level snapshots for easy recovery in case of trouble, while also isolating abnormal disks to minimize impact on system performance and reduce operational burden. Authentication is also provided for specific PVs to ensure secure access. 32 | 33 | **Fully Integrated into Kubernetes Ecosystem** 34 | 35 | IOMesh flexibly provides storage for stateful applications via CSI even when they are migrated. It also works seamlessly with the Kubernetes toolchain, easily deploying IOMesh using Helm Chart and integrating with Prometheus and Grafana to provide standardized, visualized monitoring and alerting service. 36 | 37 | ## Architecture 38 | ![IOMesh arch](https://user-images.githubusercontent.com/78140947/122766241-e2352c00-d2d3-11eb-9630-bb5b428c3178.png) 39 | -------------------------------------------------------------------------------- /docs/volume-operations/clone-pv.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: clone-pv 3 | title: Clone PV 4 | sidebar_label: Clone PV 5 | --- 6 | 7 | To clone a PV, you should create a new PVC and specify an existing PVC in the field `dataSource` so that you can clone a volume based on it. 8 | 9 | **Precautions** 10 | - The target PVC must be in the same namespace as the source PVC. 11 | - The target PVC must have the same StorageClass and volume mode as the source PVC. 12 | - The capacity of the target PVC must match the capacity of the source PVC. 13 | 14 | **Prerequisite** 15 | 16 | Verify that there is already a PVC available for cloning. 17 | 18 | **Procedure** 19 | 1. Create a YAML config `clone.yaml`. Specify the source PVC in the field `name`. 20 | 21 | ```yaml 22 | # Source: clone.yaml 23 | apiVersion: v1 24 | kind: PersistentVolumeClaim 25 | metadata: 26 | name: cloned-pvc 27 | spec: 28 | storageClassName: iomesh-csi-driver # The StorageClass must be the same as that of the source PVC. 29 | dataSource: 30 | name: iomesh-example-pvc # Specify the source PVC that should be from the same namespace as the target PVC. 31 | kind: PersistentVolumeClaim 32 | accessModes: 33 | - ReadWriteOnce 34 | resources: 35 | requests: 36 | storage: 10Gi # The capacity value must be the same as that of the source PVC. 37 | volumeMode: Filesystem # The volume mode must be the same as that of the source PVC. 38 | ``` 39 | 40 | 2. Apply the YAML config. Once done, a clone of `existing-pvc` will be created. 41 | 42 | ```bash 43 | kubectl apply -f clone.yaml 44 | ``` 45 | 46 | 3. Check the new PVC. 47 | 48 | ``` 49 | kubectl get pvc cloned-pvc 50 | ``` 51 | If successful, you should see output below: 52 | ```output 53 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 54 | cloned-pvc Bound pvc-161b8c15-3b9f-4742-95db-dcd69c9a2931 10Gi RWO iomesh-csi-driver 12s 55 | ``` 56 | 4. Get the cloned PV. 57 | ```shell 58 | kubectl get pv pvc-161b8c15-3b9f-4742-95db-dcd69c9a2931 # The PV name you get in Step 3. 59 | ``` 60 | 61 | If successful, you should see output below: 62 | ```output 63 | NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE 64 | pvc-161b8c15-3b9f-4742-95db-dcd69c9a2931 10Gi RWO Delete Bound default/cloned-pvc iomesh-csi-driver 122m 65 | ``` -------------------------------------------------------------------------------- /docs/volume-operations/create-pv.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: create-pv 3 | title: Create PV 4 | sidebar_label: Create PV 5 | --- 6 | 7 | To create a PV, you need to first create a PVC. Once done, IOMesh will detect the creation of the PVC and automatically generate a new PV based on its specs, binding them together. Then the pair of PV and PVC will be ready for use. 8 | 9 | > _NOTE:_ IOMesh supports access modes `ReadWriteOnce`,`ReadWriteMany`,and `ReadOnlyMany`, but `ReadWriteMany` and `ReadOnlyMany` are only for PVs with `volumemode` as Block. 10 | 11 | **Prerequisite** 12 | 13 | Ensure that there is already a StorageClass available for use. 14 | 15 | **Procedure** 16 | 1. Create a YAML config `pvc.yaml`. Configure the fields `accessModes`, `storage`, and `volumeMode`. 17 | 18 | ```yaml 19 | # Source: pvc.yaml 20 | apiVersion: v1 21 | kind: PersistentVolumeClaim 22 | metadata: 23 | name: iomesh-example-pvc 24 | spec: 25 | storageClassName: iomesh-csi-driver 26 | accessModes: 27 | - ReadWriteOnce # Specify the access mode. 28 | resources: 29 | requests: 30 | storage: 10Gi # Specify the storage value. 31 | volumeMode: Filesystem # Specify the volume mode. 32 | ``` 33 | 34 | For details, refer to [Kubernetes Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). 35 | 36 | 2. Apply the YAML config to create the PVC. Once done, the corresponding PV will be created. 37 | 38 | ``` 39 | kubectl apply -f pvc.yaml 40 | ``` 41 | 42 | 3. Verify that the PVC was created. 43 | 44 | ``` 45 | kubectl get pvc iomesh-example-pvc 46 | ``` 47 | If successful, you should see output like this: 48 | ```output 49 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 50 | iomesh-example-pvc Bound pvc-34230f3f-47dc-46e8-8c42-38c073c40598 10Gi RWO iomesh-csi-driver 21h 51 | ``` 52 | 53 | 4. View the PV bound to this PVC. You can find the PV name from the PVC output. 54 | 55 | ``` 56 | kubectl get pv pvc-34230f3f-47dc-46e8-8c42-38c073c40598 57 | ``` 58 | If successful, you should see output like this: 59 | ```output 60 | NAME CAPACITY RECLAIM POLICY STATUS CLAIM STORAGECLASS 61 | pvc-34230f3f-47dc-46e8-8c42-38c073c40598 10Gi Delete Bound default/iomesh-example-pvc iomesh-csi-driver 62 | ``` 63 | 64 | -------------------------------------------------------------------------------- /docs/volume-operations/create-storageclass.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: create-storageclass 3 | title: Create StorageClass 4 | sidebar_label: Create StorageClass 5 | --- 6 | 7 | IOMesh provides a default StorageClass `iomesh-csi-driver` that provides PVs for pods through dynamic volume provisioning. Its parameters adopt the default values in the table below and cannot be modified. If you want a StorageClass with custom parameters, refer to the following. 8 | 9 | | Field |Description|Default (`iomesh-csi-driver`)| 10 | |---|---|---| 11 | |`provisioner`| The provisioner that determines what volume plugin is used for provisioning PVs. |`com.iomesh.csi-driver`| 12 | |`reclaimPolicy`|

Determines whether PV is retained when the PVC is deleted.

`Delete`: When PVC is deleted, PV and the corresponding IOMesh volume will be deleted.

`Retain`: When PVC is deleted, PV and the corresponding IOMesh volume will be retained.|`delete`| 13 | |`allowVolumeExpansion`|Shows if volume expansion support is enabled.| `true`| 14 | |`csi.storage.k8s.io/fstype`|

The filesystem type, including

`xfs`, `ext2`, `ext3`, `ext4`|`ext4`| 15 | |`replicaFactor` | The number of replicas for PVs, either `2` or `3`|`2`.| 16 | | `thinProvision` |

Shows the provisioning type.

`true` for thin provisioning.

`false` for thick provisioning.

|`true`| 17 | 18 | 19 | **Procedure** 20 | 21 | 1. Create a YAML config `sc.yaml` and configure the parameters as needed. 22 | 23 | ```yaml 24 | # Source: sc.yaml 25 | kind: StorageClass 26 | apiVersion: storage.k8s.io/v1 27 | metadata: 28 | name: iomesh-example-sc 29 | provisioner: com.iomesh.csi-driver 30 | reclaimPolicy: Delete # Specify the reclaim policy. 31 | allowVolumeExpansion: true 32 | parameters: 33 | # Specify the filesystem type, including "ext4", "ext3", "ext2", and "xfs". 34 | csi.storage.k8s.io/fstype: "ext4" 35 | # Specify the replication factor, either "2" or "3". 36 | replicaFactor: "2" 37 | # Specify the provisioning type. 38 | thinProvision: "true" 39 | volumeBindingMode: Immediate 40 | ``` 41 | 42 | 2. Apply the YAML config to create the StorageClass. 43 | 44 | ``` 45 | kubectl apply -f sc.yaml 46 | ``` 47 | 48 | 3. View the newly created StorageClass. 49 | 50 | ``` 51 | kubectl get storageclass iomesh-example-sc 52 | ``` 53 | After running the command, you should see an example like: 54 | ```output 55 | NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE 56 | iomesh-example-sc com.iomesh.csi-driver Delete Immediate true 24h 57 | ``` 58 | -------------------------------------------------------------------------------- /docs/volume-operations/expand-pv.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: expand-pv 3 | title: Expand PV 4 | sidebar_label: Expand PV 5 | --- 6 | 7 | To expand the capacity of a PV, you only need to modify the field `storage` in its corresponding PVC. 8 | 9 | **Prerequisite** 10 | 11 | The StorageClass must have `allowVolumeExpansion` set to true. The default StorageClass `iomesh-csi-driver` already does this. If a StorageClass is created and configured with custom parameters, verify that its `allowVolumeExpansion` is set to `true`. 12 | 13 | **Procedure** 14 | 15 | The following example assumes a YAML config `pvc.yaml` that points to a PVC `iomesh-example-pvc` with a capacity of `10Gi`. 16 | ```yaml 17 | # Source: pvc.yaml 18 | apiVersion: v1 19 | kind: PersistentVolumeClaim 20 | metadata: 21 | name: iomesh-example-pvc 22 | spec: 23 | storageClassName: iomesh-csi-driver 24 | accessModes: 25 | - ReadWriteOnce 26 | resources: 27 | requests: 28 | storage: 10Gi # The original capacity of the PVC. 29 | ``` 30 | 31 | 1. Get the PVC. 32 | 33 | ```bash 34 | kubectl get pvc iomesh-example-pvc 35 | ``` 36 | 37 | If successful, you should see output below: 38 | 39 | ```output 40 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 41 | iomesh-example-pvc Bound pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca 10Gi RWO iomesh-csi-driver 11m 42 | ``` 43 | 44 | 2. Access `pvc.yaml`. Then set the field `storage` to a new value. 45 | ```yaml 46 | apiVersion: v1 47 | kind: PersistentVolumeClaim 48 | metadata: 49 | name: iomesh-example-pvc 50 | spec: 51 | storageClassName: iomesh-csi-driver 52 | accessModes: 53 | - ReadWriteOnce 54 | resources: 55 | requests: 56 | storage: 20Gi # The new value must be greater than the original one. 57 | ``` 58 | 59 | 3. Apply the modification. 60 | 61 | ```bash 62 | kubectl apply -f pvc.yaml 63 | ``` 64 | 65 | 4. View the PVC and its corresponding PV. 66 | 67 | > **_NOTE_:** The PV capacity will be changed to the new value, but the capacity value in the PVC will remain the same until it is actually used by the pod. 68 | 69 | ```bash 70 | kubectl get pvc iomesh-example-pvc 71 | ``` 72 | 73 | If successful, you should see output below. 74 | 75 | ```output 76 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 77 | iomesh-example-pvc Bound pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca 10Gi RWO iomesh-csi-driver 11m 78 | ``` 79 | 80 | 5. Verify that the PV capacity was expanded. You can find the PV name from the PVC output. 81 | 82 | ```bash 83 | kubectl get pv pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca # The PV name you get in Step 4. 84 | ``` 85 | 86 | If successful, you should see output below: 87 | ```output 88 | NAME CAPACITY RECLAIM POLICY STATUS CLAIM STORAGECLASS 89 | pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca 20Gi Delete Bound default/iomesh-example-pvc iomesh-csi-driver 90 | ``` -------------------------------------------------------------------------------- /docs/volumesnapshot-operations/create-snapshotclass.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: create-snapshotclass 3 | title: Create VolumeSnapshotClass 4 | sidebar_label: Create VolumeSnapshotClass 5 | --- 6 | 7 | A VolumeSnapshot is a snapshot of an existing PV on the storage system, and each VolumeSnapshot is bound to a SnapshotClass that describes the class of snapshots when provisioning a VolumeSnapshot. 8 | 9 | To create a VolumeSnaphotClass, refer to the following: 10 | 11 | |Field|Description|Value| 12 | |---|---|---| 13 | |`driver`|The driver that determines what CSI volume plugin is used for provisioning VolumeSnapshots.|`com.iomesh.csi-driver`| 14 | |[`deletionPolicy`](https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes/#deletionpolicy)|Allows you to configure what happens to the VolumeSnapshotContent when the VolumeSnapshot object is to be deleted.| `Delete`| 15 | 16 | **Procedure** 17 | 18 | 1. Create a YAML config `snc.yaml` and configure the fields `driver` and `deletionPolicy`. 19 | 20 | ```yaml 21 | # Source: snc.yaml 22 | apiVersion: snapshot.storage.k8s.io/v1 23 | kind: VolumeSnapshotClass 24 | metadata: 25 | name: iomesh-csi-driver 26 | driver: com.iomesh.csi-driver # The driver in iomesh.yaml. 27 | deletionPolicy: Delete # "Delete" is recommended. 28 | ``` 29 | 30 | 2. Apply the YAML config to create the VolumeSnapshotClass. 31 | 32 | ``` 33 | kubectl apply -f snc.yaml 34 | ``` 35 | 36 | 3. Get the VolumeSnapshotClass. 37 | 38 | ``` 39 | kubectl get volumesnapshotclass iomesh-csi-driver 40 | ``` 41 | 42 | If successful, you should see output like this: 43 | ```output 44 | NAME DRIVER DELETIONPOLICY AGE 45 | iomesh-csi-driver com.iomesh.csi-driver Delete 24s 46 | ``` 47 | 48 | -------------------------------------------------------------------------------- /docs/volumesnapshot-operations/create-volumesnapshot.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: create-volumesnapshot 3 | title: Create VolumeSnapshot 4 | sidebar_label: Create VolumeSnapshot 5 | --- 6 | 7 | A VolumeSnapshot is a request for snapshot of a volume and similar to a PVC, while a VolumeSnapshotContent is the snapshot taken from a volume provisioned in the cluster. 8 | 9 | **Prerequisite** 10 | 11 | Ensure that there is already a SnapshotClass. 12 | 13 | **Procedure** 14 | 15 | 1. Create a YAML config `snapshot.yaml`. Specify the SnapshotClass and PVC. 16 | 17 | ```yaml 18 | # Source: snapshot.yaml 19 | apiVersion: snapshot.storage.k8s.io/v1 20 | kind: VolumeSnapshot 21 | metadata: 22 | name: example-snapshot 23 | spec: 24 | volumeSnapshotClassName: iomesh-csi-driver # Specify a SnapshotClass such as `iomesh-csi-driver`. 25 | source: 26 | persistentVolumeClaimName: mongodb-data-pvc # Specify the PVC for which you want to take a snapshot such as `mongodb-data-pvc`. 27 | ``` 28 | 2. Apply the YAML config to create a VolumeSnapshot. 29 | 30 | ```bash 31 | kubectl apply -f snapshot.yaml 32 | ``` 33 | 34 | 3. When the VolumeSnapshot is created, the corresponding VolumeSnapshotContent will be created by IOMesh. Run the following command to verify that they were both created. 35 | 36 | ```bash 37 | kubectl get Volumesnapshots example-snapshot 38 | ``` 39 | 40 | If successful, you should see output like this: 41 | 42 | ```output 43 | NAME SOURCEPVC RESTORESIZE SNAPSHOTCONTENT CREATIONTIME 44 | example-snapshot mongodb-data-pvc 6Gi snapcontent-fb64d696-725b-4f1b-9847-c95e25b68b13 10h 45 | ``` 46 | -------------------------------------------------------------------------------- /docs/volumesnapshot-operations/restore-volumesnapshot.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: restore-volumesnapshot 3 | title: Restore VolumeSnapshot 4 | sidebar_label: Restore VolumeSnapshot 5 | --- 6 | 7 | Restoring a VolumeSnapshot means creating a PVC while specifying the `dataSource` field referencing to the target snapshot. 8 | 9 | **Precaution** 10 | - The new PVC must have the same access mode as the VolumeSnapshot. 11 | - The new PVC must have the same storage value as the VolumeSnapshot. 12 | 13 | **Procedure** 14 | 15 | 1. Create a YAML config `restore.yaml`. Specify the field `dataSource.name`. 16 | 17 | ```yaml 18 | # Source: restore.yaml 19 | apiVersion: v1 20 | kind: PersistentVolumeClaim 21 | metadata: 22 | name: example-restore 23 | spec: 24 | storageClassName: iomesh-csi-driver 25 | dataSource: 26 | name: example-snapshot # Specify the VolumeSnapshot. 27 | kind: VolumeSnapshot 28 | apiGroup: snapshot.storage.k8s.io 29 | accessModes: 30 | - ReadWriteOnce # Must be same as the access mode in the VolumeSnapshot. 31 | resources: 32 | requests: 33 | storage: 6Gi # Must be same as the storage value in the VolumeSnapshot. 34 | ``` 35 | 36 | 2. Apply the YAML config to create the PVC. 37 | 38 | ```bash 39 | kubectl apply -f restore.yaml 40 | ``` 41 | 3. Check the PVC. A PV will be created and bounded to this PVC. 42 | 43 | ``` 44 | kubectl get pvc example-restore 45 | ``` 46 | If successful, you should see output like this: 47 | ```output 48 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 49 | example-restore Bound pvc-54230f3f-47dc-46e8-8c42-38c073c40598 6Gi RWO iomesh-csi-driver 21h 50 | ``` 51 | 4. View the PV. You can find the PV name from the PVC output. 52 | ```bash 53 | kubectl get pv pvc-54230f3f-47dc-46e8-8c42-38c073c40598 # The PV name you get in Step 3. 54 | ``` 55 | ```output 56 | NAME CAPACITY RECLAIM POLICY STATUS CLAIM STORAGECLASS 57 | pvc-54230f3f-47dc-46e8-8c42-38c073c40598 6Gi Delete Bound example-restore iomesh-csi-driver 58 | ``` -------------------------------------------------------------------------------- /package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "lockfileVersion": 1 3 | } 4 | -------------------------------------------------------------------------------- /website/.dockerignore: -------------------------------------------------------------------------------- 1 | */node_modules 2 | *.log 3 | -------------------------------------------------------------------------------- /website/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | 3 | node_modules 4 | 5 | lib/core/metadata.js 6 | lib/core/MetadataBlog.js 7 | 8 | website/translated_docs 9 | website/build/ 10 | website/yarn.lock 11 | website/node_modules 12 | website/i18n/* 13 | -------------------------------------------------------------------------------- /website/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:lts 2 | 3 | WORKDIR /app/website 4 | 5 | EXPOSE 3000 35729 6 | COPY ./docs /app/docs 7 | COPY ./website /app/website 8 | RUN yarn install 9 | 10 | CMD ["yarn", "start"] 11 | -------------------------------------------------------------------------------- /website/blog/2016-03-11-blog-post.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Blog Title 3 | author: Blog Author 4 | authorURL: http://twitter.com/ 5 | authorFBID: 100002976521003 6 | --- 7 | 8 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus elementum massa eget nulla aliquet sagittis. Proin odio tortor, vulputate ut odio in, ultrices ultricies augue. Cras ornare ultrices lorem malesuada iaculis. Etiam sit amet libero tempor, pulvinar mauris sed, sollicitudin sapien. 9 | 10 | 11 | 12 | Mauris vestibulum ullamcorper nibh, ut semper purus pulvinar ut. Donec volutpat orci sit amet mauris malesuada, non pulvinar augue aliquam. Vestibulum ultricies at urna ut suscipit. Morbi iaculis, erat at imperdiet semper, ipsum nulla sodales erat, eget tincidunt justo dui quis justo. Pellentesque dictum bibendum diam at aliquet. Sed pulvinar, dolor quis finibus ornare, eros odio facilisis erat, eu rhoncus nunc dui sed ex. Nunc gravida dui massa, sed ornare arcu tincidunt sit amet. Maecenas efficitur sapien neque, a laoreet libero feugiat ut. 13 | 14 | Nulla facilisi. Maecenas sodales nec purus eget posuere. Sed sapien quam, pretium a risus in, porttitor dapibus erat. Sed sit amet fringilla ipsum, eget iaculis augue. Integer sollicitudin tortor quis ultricies aliquam. Suspendisse fringilla nunc in tellus cursus, at placerat tellus scelerisque. Sed tempus elit a sollicitudin rhoncus. Nulla facilisi. Morbi nec dolor dolor. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Cras et aliquet lectus. Pellentesque sit amet eros nisi. Quisque ac sapien in sapien congue accumsan. Nullam in posuere ante. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Proin lacinia leo a nibh fringilla pharetra. 15 | 16 | Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Proin venenatis lectus dui, vel ultrices ante bibendum hendrerit. Aenean egestas feugiat dui id hendrerit. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Curabitur in tellus laoreet, eleifend nunc id, viverra leo. Proin vulputate non dolor vel vulputate. Curabitur pretium lobortis felis, sit amet finibus lorem suscipit ut. Sed non mollis risus. Duis sagittis, mi in euismod tincidunt, nunc mauris vestibulum urna, at euismod est elit quis erat. Phasellus accumsan vitae neque eu placerat. In elementum arcu nec tellus imperdiet, eget maximus nulla sodales. Curabitur eu sapien eget nisl sodales fermentum. 17 | 18 | Phasellus pulvinar ex id commodo imperdiet. Praesent odio nibh, sollicitudin sit amet faucibus id, placerat at metus. Donec vitae eros vitae tortor hendrerit finibus. Interdum et malesuada fames ac ante ipsum primis in faucibus. Quisque vitae purus dolor. Duis suscipit ac nulla et finibus. Phasellus ac sem sed dui dictum gravida. Phasellus eleifend vestibulum facilisis. Integer pharetra nec enim vitae mattis. Duis auctor, lectus quis condimentum bibendum, nunc dolor aliquam massa, id bibendum orci velit quis magna. Ut volutpat nulla nunc, sed interdum magna condimentum non. Sed urna metus, scelerisque vitae consectetur a, feugiat quis magna. Donec dignissim ornare nisl, eget tempor risus malesuada quis. 19 | -------------------------------------------------------------------------------- /website/blog/2017-04-10-blog-post-two.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: New Blog Post 3 | author: Blog Author 4 | authorURL: http://twitter.com/ 5 | authorFBID: 100002976521003 6 | --- 7 | 8 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus elementum massa eget nulla aliquet sagittis. Proin odio tortor, vulputate ut odio in, ultrices ultricies augue. Cras ornare ultrices lorem malesuada iaculis. Etiam sit amet libero tempor, pulvinar mauris sed, sollicitudin sapien. 9 | 10 | 11 | 12 | Mauris vestibulum ullamcorper nibh, ut semper purus pulvinar ut. Donec volutpat orci sit amet mauris malesuada, non pulvinar augue aliquam. Vestibulum ultricies at urna ut suscipit. Morbi iaculis, erat at imperdiet semper, ipsum nulla sodales erat, eget tincidunt justo dui quis justo. Pellentesque dictum bibendum diam at aliquet. Sed pulvinar, dolor quis finibus ornare, eros odio facilisis erat, eu rhoncus nunc dui sed ex. Nunc gravida dui massa, sed ornare arcu tincidunt sit amet. Maecenas efficitur sapien neque, a laoreet libero feugiat ut. 13 | 14 | Nulla facilisi. Maecenas sodales nec purus eget posuere. Sed sapien quam, pretium a risus in, porttitor dapibus erat. Sed sit amet fringilla ipsum, eget iaculis augue. Integer sollicitudin tortor quis ultricies aliquam. Suspendisse fringilla nunc in tellus cursus, at placerat tellus scelerisque. Sed tempus elit a sollicitudin rhoncus. Nulla facilisi. Morbi nec dolor dolor. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Cras et aliquet lectus. Pellentesque sit amet eros nisi. Quisque ac sapien in sapien congue accumsan. Nullam in posuere ante. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Proin lacinia leo a nibh fringilla pharetra. 15 | 16 | Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Proin venenatis lectus dui, vel ultrices ante bibendum hendrerit. Aenean egestas feugiat dui id hendrerit. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Curabitur in tellus laoreet, eleifend nunc id, viverra leo. Proin vulputate non dolor vel vulputate. Curabitur pretium lobortis felis, sit amet finibus lorem suscipit ut. Sed non mollis risus. Duis sagittis, mi in euismod tincidunt, nunc mauris vestibulum urna, at euismod est elit quis erat. Phasellus accumsan vitae neque eu placerat. In elementum arcu nec tellus imperdiet, eget maximus nulla sodales. Curabitur eu sapien eget nisl sodales fermentum. 17 | 18 | Phasellus pulvinar ex id commodo imperdiet. Praesent odio nibh, sollicitudin sit amet faucibus id, placerat at metus. Donec vitae eros vitae tortor hendrerit finibus. Interdum et malesuada fames ac ante ipsum primis in faucibus. Quisque vitae purus dolor. Duis suscipit ac nulla et finibus. Phasellus ac sem sed dui dictum gravida. Phasellus eleifend vestibulum facilisis. Integer pharetra nec enim vitae mattis. Duis auctor, lectus quis condimentum bibendum, nunc dolor aliquam massa, id bibendum orci velit quis magna. Ut volutpat nulla nunc, sed interdum magna condimentum non. Sed urna metus, scelerisque vitae consectetur a, feugiat quis magna. Donec dignissim ornare nisl, eget tempor risus malesuada quis. 19 | -------------------------------------------------------------------------------- /website/blog/2017-09-25-testing-rss.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Adding RSS Support - RSS Truncation Test 3 | author: Eric Nakagawa 4 | authorURL: http://twitter.com/ericnakagawa 5 | authorFBID: 661277173 6 | --- 7 | 8 | 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 9 | 10 | This should be truncated. 11 | 12 | 13 | 14 | This line should never render in XML. 15 | -------------------------------------------------------------------------------- /website/blog/2017-09-26-adding-rss.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Adding RSS Support 3 | author: Eric Nakagawa 4 | authorURL: http://twitter.com/ericnakagawa 5 | authorFBID: 661277173 6 | --- 7 | 8 | This is a test post. 9 | 10 | A whole bunch of other information. 11 | -------------------------------------------------------------------------------- /website/blog/2017-10-24-new-version-1.0.0.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: New Version 1.0.0 3 | author: Eric Nakagawa 4 | authorURL: http://twitter.com/ericnakagawa 5 | authorFBID: 661277173 6 | --- 7 | 8 | This blog post will test file name parsing issues when periods are present. 9 | -------------------------------------------------------------------------------- /website/core/Footer.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | const React = require('react'); 9 | 10 | class Footer extends React.Component { 11 | docUrl(doc) { 12 | const baseUrl = this.props.config.baseUrl; 13 | const docsUrl = this.props.config.docsUrl; 14 | const docsPart = `${docsUrl ? `${docsUrl}/` : ''}`; 15 | return `${baseUrl}${docsPart}${doc}`; 16 | } 17 | 18 | render() { 19 | return ( 20 | 64 | ); 65 | } 66 | } 67 | 68 | module.exports = Footer; 69 | -------------------------------------------------------------------------------- /website/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | docusaurus: 5 | build: . 6 | ports: 7 | - 3000:3000 8 | - 35729:35729 9 | volumes: 10 | - ./docs:/app/docs 11 | - ./website/blog:/app/website/blog 12 | - ./website/core:/app/website/core 13 | - ./website/i18n:/app/website/i18n 14 | - ./website/pages:/app/website/pages 15 | - ./website/static:/app/website/static 16 | - ./website/sidebars.json:/app/website/sidebars.json 17 | - ./website/siteConfig.js:/app/website/siteConfig.js 18 | working_dir: /app/website 19 | -------------------------------------------------------------------------------- /website/docs/doc1.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: doc1 3 | title: Latin-ish 4 | sidebar_label: Example Page 5 | --- 6 | 7 | Check the [documentation](https://docusaurus.io) for how to use Docusaurus. 8 | 9 | ## Lorem 10 | 11 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus elementum massa eget nulla aliquet sagittis. Proin odio tortor, vulputate ut odio in, ultrices ultricies augue. Cras ornare ultrices lorem malesuada iaculis. Etiam sit amet libero tempor, pulvinar mauris sed, sollicitudin sapien. 12 | 13 | ## Mauris In Code 14 | 15 | ``` 16 | Mauris vestibulum ullamcorper nibh, ut semper purus pulvinar ut. Donec volutpat orci sit amet mauris malesuada, non pulvinar augue aliquam. Vestibulum ultricies at urna ut suscipit. Morbi iaculis, erat at imperdiet semper, ipsum nulla sodales erat, eget tincidunt justo dui quis justo. Pellentesque dictum bibendum diam at aliquet. Sed pulvinar, dolor quis finibus ornare, eros odio facilisis erat, eu rhoncus nunc dui sed ex. Nunc gravida dui massa, sed ornare arcu tincidunt sit amet. Maecenas efficitur sapien neque, a laoreet libero feugiat ut. 17 | ``` 18 | 19 | ## Nulla 20 | 21 | Nulla facilisi. Maecenas sodales nec purus eget posuere. Sed sapien quam, pretium a risus in, porttitor dapibus erat. Sed sit amet fringilla ipsum, eget iaculis augue. Integer sollicitudin tortor quis ultricies aliquam. Suspendisse fringilla nunc in tellus cursus, at placerat tellus scelerisque. Sed tempus elit a sollicitudin rhoncus. Nulla facilisi. Morbi nec dolor dolor. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Cras et aliquet lectus. Pellentesque sit amet eros nisi. Quisque ac sapien in sapien congue accumsan. Nullam in posuere ante. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Proin lacinia leo a nibh fringilla pharetra. 22 | 23 | ## Orci 24 | 25 | Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Proin venenatis lectus dui, vel ultrices ante bibendum hendrerit. Aenean egestas feugiat dui id hendrerit. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Curabitur in tellus laoreet, eleifend nunc id, viverra leo. Proin vulputate non dolor vel vulputate. Curabitur pretium lobortis felis, sit amet finibus lorem suscipit ut. Sed non mollis risus. Duis sagittis, mi in euismod tincidunt, nunc mauris vestibulum urna, at euismod est elit quis erat. Phasellus accumsan vitae neque eu placerat. In elementum arcu nec tellus imperdiet, eget maximus nulla sodales. Curabitur eu sapien eget nisl sodales fermentum. 26 | 27 | ## Phasellus 28 | 29 | Phasellus pulvinar ex id commodo imperdiet. Praesent odio nibh, sollicitudin sit amet faucibus id, placerat at metus. Donec vitae eros vitae tortor hendrerit finibus. Interdum et malesuada fames ac ante ipsum primis in faucibus. Quisque vitae purus dolor. Duis suscipit ac nulla et finibus. Phasellus ac sem sed dui dictum gravida. Phasellus eleifend vestibulum facilisis. Integer pharetra nec enim vitae mattis. Duis auctor, lectus quis condimentum bibendum, nunc dolor aliquam massa, id bibendum orci velit quis magna. Ut volutpat nulla nunc, sed interdum magna condimentum non. Sed urna metus, scelerisque vitae consectetur a, feugiat quis magna. Donec dignissim ornare nisl, eget tempor risus malesuada quis. 30 | -------------------------------------------------------------------------------- /website/docs/doc2.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: doc2 3 | title: document number 2 4 | --- 5 | 6 | This is a link to [another document.](doc3.md) 7 | This is a link to an [external page.](http://www.example.com) 8 | -------------------------------------------------------------------------------- /website/docs/doc3.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: doc3 3 | title: This is document number 3 4 | --- 5 | 6 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. In ac euismod odio, eu consequat dui. Nullam molestie consectetur risus id imperdiet. Proin sodales ornare turpis, non mollis massa ultricies id. Nam at nibh scelerisque, feugiat ante non, dapibus tortor. Vivamus volutpat diam quis tellus elementum bibendum. Praesent semper gravida velit quis aliquam. Etiam in cursus neque. Nam lectus ligula, malesuada et mauris a, bibendum faucibus mi. Phasellus ut interdum felis. Phasellus in odio pulvinar, porttitor urna eget, fringilla lectus. Aliquam sollicitudin est eros. Mauris consectetur quam vitae mauris interdum hendrerit. Lorem ipsum dolor sit amet, consectetur adipiscing elit. 7 | 8 | Duis et egestas libero, imperdiet faucibus ipsum. Sed posuere eget urna vel feugiat. Vivamus a arcu sagittis, fermentum urna dapibus, congue lectus. Fusce vulputate porttitor nisl, ac cursus elit volutpat vitae. Nullam vitae ipsum egestas, convallis quam non, porta nibh. Morbi gravida erat nec neque bibendum, eu pellentesque velit posuere. Fusce aliquam erat eu massa eleifend tristique. 9 | 10 | Sed consequat sollicitudin ipsum eget tempus. Integer a aliquet velit. In justo nibh, pellentesque non suscipit eget, gravida vel lacus. Donec odio ante, malesuada in massa quis, pharetra tristique ligula. Donec eros est, tristique eget finibus quis, semper non nisl. Vivamus et elit nec enim ornare placerat. Sed posuere odio a elit cursus sagittis. 11 | 12 | Phasellus feugiat purus eu tortor ultrices finibus. Ut libero nibh, lobortis et libero nec, dapibus posuere eros. Sed sagittis euismod justo at consectetur. Nulla finibus libero placerat, cursus sapien at, eleifend ligula. Vivamus elit nisl, hendrerit ac nibh eu, ultrices tempus dui. Nam tellus neque, commodo non rhoncus eu, gravida in risus. Nullam id iaculis tortor. 13 | 14 | Nullam at odio in sem varius tempor sit amet vel lorem. Etiam eu hendrerit nisl. Fusce nibh mauris, vulputate sit amet ex vitae, congue rhoncus nisl. Sed eget tellus purus. Nullam tempus commodo erat ut tristique. Cras accumsan massa sit amet justo consequat eleifend. Integer scelerisque vitae tellus id consectetur. 15 | -------------------------------------------------------------------------------- /website/docs/exampledoc4.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: doc4 3 | title: Other Document 4 | --- 5 | 6 | this is another document 7 | -------------------------------------------------------------------------------- /website/docs/exampledoc5.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: doc5 3 | title: Fifth Document 4 | --- 5 | 6 | Another one 7 | -------------------------------------------------------------------------------- /website/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "scripts": { 3 | "examples": "docusaurus-examples", 4 | "start": "docusaurus-start", 5 | "build": "docusaurus-build", 6 | "publish-gh-pages": "docusaurus-publish", 7 | "write-translations": "docusaurus-write-translations", 8 | "version": "docusaurus-version", 9 | "delete-version": "docusaurus-delete-version", 10 | "rename-version": "docusaurus-rename-version" 11 | }, 12 | "devDependencies": { 13 | "docusaurus": "^1.14.7", 14 | "docusaurus-delete-version": "^0.1.1" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /website/pages/en/help.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | const React = require('react'); 9 | 10 | const CompLibrary = require('../../core/CompLibrary.js'); 11 | 12 | const Container = CompLibrary.Container; 13 | const GridBlock = CompLibrary.GridBlock; 14 | 15 | function Help(props) { 16 | const {config: siteConfig, language = ''} = props; 17 | const {baseUrl, docsUrl} = siteConfig; 18 | const docsPart = `${docsUrl ? `${docsUrl}/` : ''}`; 19 | const langPart = `${language ? `${language}/` : ''}`; 20 | const docUrl = (doc) => `${baseUrl}${docsPart}${langPart}${doc}`; 21 | 22 | const supportLinks = [ 23 | { 24 | content: `Learn more using the [documentation on this site.](${docUrl( 25 | 'doc1.html', 26 | )})`, 27 | title: 'Browse Docs', 28 | }, 29 | { 30 | content: 'Ask questions about the documentation and project', 31 | title: 'Join the community', 32 | }, 33 | { 34 | content: "Find out what's new with this project", 35 | title: 'Stay up to date', 36 | }, 37 | ]; 38 | 39 | return ( 40 |
41 | 42 |
43 |
44 |

Need help?

45 |
46 |

This project is maintained by a dedicated group of people.

47 | 48 |
49 |
50 |
51 | ); 52 | } 53 | 54 | module.exports = Help; 55 | -------------------------------------------------------------------------------- /website/pages/en/users.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | const React = require('react'); 9 | 10 | const CompLibrary = require('../../core/CompLibrary.js'); 11 | 12 | const Container = CompLibrary.Container; 13 | 14 | class Users extends React.Component { 15 | render() { 16 | const {config: siteConfig} = this.props; 17 | if ((siteConfig.users || []).length === 0) { 18 | return null; 19 | } 20 | 21 | const showcase = siteConfig.users.map((user) => ( 22 | 23 | {user.caption} 24 | 25 | )); 26 | 27 | return ( 28 |
29 | 30 |
31 |
32 |

Who is Using This?

33 |

This project is used by many folks

34 |
35 |
{showcase}
36 | {siteConfig.repoUrl && ( 37 | 38 |

Are you using this project?

39 | 42 | Add your company 43 | 44 |
45 | )} 46 |
47 |
48 |
49 | ); 50 | } 51 | } 52 | 53 | module.exports = Users; 54 | -------------------------------------------------------------------------------- /website/pages/en/versions.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | const React = require('react'); 9 | 10 | const CompLibrary = require('../../core/CompLibrary'); 11 | 12 | const Container = CompLibrary.Container; 13 | 14 | const CWD = process.cwd(); 15 | 16 | const versions = require(`${CWD}/versions.json`); 17 | 18 | function Versions(props) { 19 | const {config: siteConfig} = props; 20 | const latestVersion = versions[0]; 21 | const repoUrl = `https://github.com/${siteConfig.organizationName}/${siteConfig.projectName}`; 22 | return ( 23 |
24 | 25 |
26 |
27 |

{siteConfig.title} Versions

28 |
29 |

New versions of this project are released every so often.

30 |

Current version (Stable)

31 | 32 | 33 | 34 | 35 | 45 | 48 | 49 | 50 |
{latestVersion} 36 | {/* You are supposed to change this href where appropriate 37 | Example: href="/docs(/:language)/:id" */} 38 | 42 | Documentation 43 | 44 | 46 | Release Notes 47 |
51 |

52 | This is the version that is configured automatically when you first 53 | install this project. 54 |

55 |
56 |
57 |
58 | ); 59 | } 60 | 61 | module.exports = Versions; 62 | -------------------------------------------------------------------------------- /website/sidebars.json: -------------------------------------------------------------------------------- 1 | { 2 | "docs": { 3 | "About IOMesh": [ 4 | "introduction/introduction", 5 | "basic-concepts/basic-concepts" 6 | ], 7 | "Deploy IOMesh": [ 8 | "deploy-iomesh-cluster/prerequisites", 9 | "deploy-iomesh-cluster/install-iomesh", 10 | "deploy-iomesh-cluster/setup-iomesh", 11 | "deploy-iomesh-cluster/activate-license" 12 | ], 13 | "Volume Operations": [ 14 | "volume-operations/create-storageclass", 15 | "volume-operations/create-pv", 16 | "volume-operations/authenticate-pv", 17 | "volume-operations/expand-pv", 18 | "volume-operations/clone-pv" 19 | ], 20 | "VolumeSnapshot Operations":[ 21 | "volumesnapshot-operations/create-snapshotclass", 22 | "volumesnapshot-operations/create-volumesnapshot", 23 | "volumesnapshot-operations/restore-volumesnapshot" 24 | ], 25 | "Deploy Stateful Applications": [ 26 | "stateful-applications/iomesh-for-mysql", 27 | "stateful-applications/iomesh-for-mongodb" 28 | ], 29 | "Cluster Operations": [ 30 | "cluster-operations/scale-out-cluster", 31 | "cluster-operations/scale-down-cluster", 32 | "cluster-operations/upgrade-cluster", 33 | "cluster-operations/uninstall-cluster", 34 | "cluster-operations/manage-license", 35 | "cluster-operations/replace-failed-disk" 36 | ], 37 | "Monitor IOMesh":[ 38 | "monitor-iomesh/install-iomesh-dashboard", 39 | "monitor-iomesh/monitoring-iomesh" 40 | ], 41 | "Advanced Functions": [ 42 | "advanced-functions/manage-multiple-cluster", 43 | "advanced-functions/localpv-manager", 44 | "advanced-functions/external-storage" 45 | ], 46 | "Appendices": [ 47 | "appendices/release-notes", 48 | "appendices/downloads", 49 | "appendices/setup-worker-node", 50 | "appendices/iomesh-metrics", 51 | "appendices/faq" 52 | ] 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /website/static/img/IOMesh_logo_on_white.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /website/static/img/Slack_footer.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /website/static/img/Twitter_footer.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /website/static/img/YouTube_footer.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /website/static/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iomesh/docs/a5d1600c3adf566f1398e85ceccc9a6ebe0a46e0/website/static/img/favicon.ico -------------------------------------------------------------------------------- /website/static/img/mesh.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /website/static/img/oss_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iomesh/docs/a5d1600c3adf566f1398e85ceccc9a6ebe0a46e0/website/static/img/oss_logo.png -------------------------------------------------------------------------------- /website/static/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 9 | IOMesh 10 | 11 | 12 | If you are not redirected automatically, please follow this 13 | link. 14 | 15 | 16 | -------------------------------------------------------------------------------- /website/static/js/add-tracker.js: -------------------------------------------------------------------------------- 1 | /** Baidu Analytics */ 2 | var _hmt = _hmt || []; 3 | (function () { 4 | var hm = document.createElement("script"); 5 | hm.src = "https://hm.baidu.com/hm.js?cf10aaf0175b83ffccb0276a7a65b614"; 6 | var s = document.getElementsByTagName("script")[0]; 7 | s.parentNode.insertBefore(hm, s); 8 | })(); 9 | 10 | /** Google Analytics */ 11 | (function () { 12 | var googleScript = document.createElement("script"); 13 | googleScript.src = "https://www.googletagmanager.com/gtag/js?id=G-V0NB3342NG"; 14 | googleScript.async = true; 15 | document.head.appendChild(googleScript); 16 | 17 | googleScript.onload = function () { 18 | window.dataLayer = window.dataLayer || []; 19 | function gtag() { 20 | dataLayer.push(arguments); 21 | } 22 | gtag("js", new Date()); 23 | 24 | gtag("config", "G-V0NB3342NG"); 25 | }; 26 | })(); 27 | 28 | /** Google Ad */ 29 | (function (w, d, s, l, i) { 30 | w[l] = w[l] || []; 31 | w[l].push({ "gtm.start": new Date().getTime(), event: "gtm.js" }); 32 | var f = d.getElementsByTagName(s)[0], 33 | j = d.createElement(s), 34 | dl = l != "dataLayer" ? "&l=" + l : ""; 35 | j.async = true; 36 | j.src = "https://www.googletagmanager.com/gtm.js?id=" + i + dl; 37 | f.parentNode.insertBefore(j, f); 38 | })(window, document, "script", "dataLayer", "GTM-MS3N6Z8C"); 39 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.10.0/about-iomesh/introduction.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.10.0-introduction 3 | title: Introduction 4 | sidebar_label: Introduction 5 | original_id: introduction 6 | --- 7 | 8 | ## What is IOMesh? 9 | 10 | IOMesh is a distributed storage system specially designed for Kubernetes workloads, providing reliable persistent storage capabilities for containerized stateful applications such as MySQL, Cassandra, and MongoDB. 11 | 12 | - Thousands of Pods are created and destroyed every minute in Kubernetes clusters. IOMesh is built for this kind of highly dynamic and large-scale workloads in the cloud-native era. It is designed with this in mind from the beginning to provide the performance, reliability, and scalability required by cloud-native applications. 13 | - IOMesh runs natively on Kubernetes and fully utilizes the Kubernetes's capabilities. Therefore, the operation teams can leverage the standard Kubernetes APIs to uniformly manage the applications and IOMesh, which integrates perfectly with existing DevOps processes. 14 | - IOMesh enables users to start at a small scale and expand the storage at will by adding disks or nodes. 15 | 16 | ## Key Features 17 | 18 | ### High Performance 19 | Database is one of the key applications to measure storage performance. IOMesh performes very well in the database performance benchmark tests with stable read and write latency and high QPS and TPS, which means that it can provide stable data services. 20 | ### No Kernel Dependencies 21 | IOMesh runs entirely in user space and can provide reliable service through effective software fault isolation. When a problem occurs, other applications running on the same node can continue to run without causing the entire system to crash. In addition, deploying and maintaining the IOMesh are very easy since there is no need to install any kernel modules and you do not need to worry about the kernel version compatibility. 22 | ### Storage Performance Tiering 23 | IOMesh supports flexible deployment of hybrid disks including NVMe SSD, SATA SSD and HDD. This can help users make the most of their storage investment, and control the costs to each block while maximizing the storage performance. 24 | 25 | ## Architecture 26 | 27 | ![iomesh-architecture](https://user-images.githubusercontent.com/78140947/116510446-8499ea00-a8f7-11eb-9b1b-fc61a0fbf4b0.png) 28 | 29 | ## Compatibility List with Kubernetes 30 | 31 | | IOMesh Version | Kubernetes Version | 32 | | -------------- | ------------------ | 33 | | v0.10.x | v1.17 or higher | 34 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.10.1/about-iomesh/introduction.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.10.1-introduction 3 | title: Introduction 4 | sidebar_label: Introduction 5 | original_id: introduction 6 | --- 7 | 8 | ## What is IOMesh? 9 | 10 | IOMesh is a distributed storage system specially designed for Kubernetes workloads, providing reliable persistent storage capabilities for containerized stateful applications such as MySQL, Cassandra, and MongoDB. 11 | 12 | - Thousands of Pods are created and destroyed every minute in Kubernetes clusters. IOMesh is built for this kind of highly dynamic and large-scale workloads in the cloud-native era. It is designed with this in mind from the beginning to provide the performance, reliability, and scalability required by cloud-native applications. 13 | - IOMesh runs natively on Kubernetes and fully utilizes the Kubernetes's capabilities. Therefore, the operation teams can leverage the standard Kubernetes APIs to uniformly manage the applications and IOMesh, which integrates perfectly with existing DevOps processes. 14 | - IOMesh enables users to start at a small scale and expand the storage at will by adding disks or nodes. 15 | 16 | ## Key Features 17 | 18 | ### High Performance 19 | Database is one of the key applications to measure storage performance. IOMesh performes very well in the database performance benchmark tests with stable read and write latency and high QPS and TPS, which means that it can provide stable data services. 20 | ### No Kernel Dependencies 21 | IOMesh runs entirely in user space and can provide reliable service through effective software fault isolation. When a problem occurs, other applications running on the same node can continue to run without causing the entire system to crash. In addition, deploying and maintaining the IOMesh are very easy since there is no need to install any kernel modules and you do not need to worry about the kernel version compatibility. 22 | ### Storage Performance Tiering 23 | IOMesh supports flexible deployment of hybrid disks including NVMe SSD, SATA SSD and HDD. This can help users make the most of their storage investment, and control the costs to each block while maximizing the storage performance. 24 | 25 | ## Architecture 26 | 27 | ![IOMesh arch](https://user-images.githubusercontent.com/78140947/122766241-e2352c00-d2d3-11eb-9630-bb5b428c3178.png) 28 | 29 | ## Compatibility List with Kubernetes 30 | 31 | | IOMesh Version | Kubernetes Version | 32 | | -------------- | ------------------ | 33 | | v0.10.x | v1.17 or higher | 34 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.10.1/deploy/prerequisites.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.10.1-prerequisites 3 | title: Prerequisites 4 | sidebar_label: Prerequisites 5 | original_id: prerequisites 6 | --- 7 | 8 | ## Installation Requirements 9 | 10 | - A Kubernetes 1.17+ cluster with at least 3 worker nodes 11 | - Each worker node needs 12 | - At least one free SSD for IOMesh journal and cache 13 | - At least one free HDD for IOMesh datastore 14 | - A 10GbE NIC or above for IOMesh storage network 15 | - At least 100GB free space on /opt 16 | 17 | ## Setup Worker Node 18 | 19 | For each Kubernetes worker node that will run IOMesh, do the following steps: 20 | 21 | ### Setup Open-ISCSI 22 | 23 | 1. Install open-iscsi: 24 | 25 | 26 | 27 | 28 | 29 | ```shell 30 | sudo yum install iscsi-initiator-utils -y 31 | ``` 32 | 33 | 34 | 35 | ```shell 36 | sudo apt-get install open-iscsi -y 37 | ``` 38 | 39 | 40 | 41 | 2. Edit `/etc/iscsi/iscsid.conf` by setting `node.startup` to `manual`: 42 | 43 | ```shell 44 | sudo sed -i 's/^node.startup = automatic$/node.startup = manual/' /etc/iscsi/iscsid.conf 45 | ``` 46 | 47 | 3. Disable SELinux: 48 | 49 | ```shell 50 | sudo setenforce 0 51 | sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config 52 | ``` 53 | 54 | 4. Start `iscsid` service: 55 | 56 | ```shell 57 | sudo systemctl enable --now iscsid 58 | ``` 59 | 60 | ### Setup Local Metadata Store 61 | 62 | IOMesh uses local path `/opt/iomesh` to store metadata. Ensure that there is at least 100G free space at `/opt`. 63 | 64 | ### Setup Data Network 65 | 66 | To avoid contention on network bandwith, it is necessary to setup a seperate network segment for IOMesh cluster. The `dataCIDR` defines the IP block for IOMesh data network. Every node running IOMesh should have an interface whose IP address belongs to the `dataCIDR`. 67 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.11.0/about-iomesh/introduction.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.11.0-introduction 3 | title: Introduction 4 | sidebar_label: Introduction 5 | original_id: introduction 6 | --- 7 | 8 | ## What is IOMesh? 9 | 10 | IOMesh is a distributed storage system specially designed for Kubernetes workloads, providing reliable persistent storage capabilities for containerized stateful applications such as MySQL, Cassandra, and MongoDB. 11 | 12 | - Thousands of Pods are created and destroyed every minute in Kubernetes clusters. IOMesh is built for this kind of highly dynamic and large-scale workloads in the cloud-native era. It is designed with this in mind from the beginning to provide the performance, reliability, and scalability required by cloud-native applications. 13 | - IOMesh runs natively on Kubernetes and fully utilizes the Kubernetes's capabilities. Therefore, the operation teams can leverage the standard Kubernetes APIs to uniformly manage the applications and IOMesh, which integrates perfectly with existing DevOps processes. 14 | - IOMesh enables users to start at a small scale and expand the storage at will by adding disks or nodes. 15 | 16 | ## Key Features 17 | 18 | ### High Performance 19 | Database is one of the key applications to measure storage performance. IOMesh performes very well in the database performance benchmark tests with low and stable read/write latencies and high QPS/TPS, meaning to provide stable data services. 20 | ### No Kernel Dependencies 21 | IOMesh runs entirely in user space and can provide reliable services through effective software fault isolation. When a problem occurs, other applications running at the same node can continue to run without causing entire system crash. In addition, it is very easy to deploy and maintain IOMesh since you don't need to install any kernel modules and don't need to worry about kernel version compatibility at all. 22 | ### Storage Performance Tiering 23 | IOMesh supports flexible deployment of hybrid disks including NVMe SSD, SATA SSD and HDD. This can help users make the most of their storage investment, minimize the cost of each block while maximizing the storage performance. 24 | 25 | ## Architecture 26 | 27 | ![IOMesh arch](https://user-images.githubusercontent.com/78140947/122766241-e2352c00-d2d3-11eb-9630-bb5b428c3178.png) 28 | 29 | ## Compatibility List with Kubernetes 30 | 31 | | IOMesh Version | Kubernetes Version | 32 | | -------------- | ------------------ | 33 | | v0.11.x | v1.17~v1.21 | 34 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.11.0/additional-info/best-practice-in-production.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.11.0-best-practice-in-production 3 | title: Best Practice in Production 4 | sidebar_label: Best Practice in Production 5 | original_id: best-practice-in-production 6 | --- 7 | 8 | ## Recommended Hardware Configurations in Production Environment 9 | 10 | The recommended hardware configurations are as follows. 11 | 12 | | Hardware | Requirements | No | 13 | | --------------- | ------------------------ | ---- | 14 | | CPU | X86 64 (4 cores or more) | 2 | 15 | | RAM | 64 GB or more | | 16 | | Disk Controller | HBA Mode | 1 | 17 | | SSD | 960 GB or more | 2 | 18 | | HDD | SAS/SATA HDD | 1 | 19 | | Boot Device | 64 GB | 1 | 20 | | Ethernet NIC | 10 GbE | \>=1 | 21 | | Ethernet Switch | 10 GbE | 1 | 22 | 23 | #### 24 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.11.0/additional-info/performance-testing.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.11.0-performance-testing 3 | title: Performance Testing 4 | sidebar_label: Performance Testing 5 | original_id: performance-testing 6 | --- 7 | 8 | ## FIO-based Performance Testing 9 | 10 | 1. Create a pod for fio test 11 | 12 | ```shell 13 | kubectl apply -f https://docs.iomesh.com/assets/iomesh-csi-driver/example/fio.yaml 14 | ``` 15 | 16 | 2. Wait until fio-pvc bound is finished and fio pod is ready 17 | 18 | ```shell 19 | kubectl get pvc fio-pvc 20 | ``` 21 | 22 | ```output 23 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 24 | fio-pvc Bound pvc-d7916b34-50cd-49bd-86f9-5287db1265cb 30Gi RWO iomesh-csi-driver-default 15s 25 | ``` 26 | 27 | ```shell 28 | kubectl wait --for=condition=Ready pod/fio 29 | ``` 30 | 31 | ```output 32 | pod/fio condition met 33 | ``` 34 | 35 | 3. Run fio tests 36 | 37 | ```shell 38 | kubectl exec -it fio sh 39 | fio --name fio --filename=/mnt/fio --bs=256k --rw=write --ioengine=libaio --direct=1 --iodepth=128 --numjobs=1 --size=$(blockdev --getsize64 /mnt/fio) 40 | fio --name fio --filename=/mnt/fio --bs=4k --rw=randread --ioengine=libaio --direct=1 --iodepth=128 --numjobs=1 --size=$(blockdev --getsize64 /mnt/fio) 41 | ``` 42 | 43 | 4. Clean up 44 | 45 | ```shell 46 | kubectl delete pod fio 47 | kubectl delete pvc fio-pvc 48 | # You need to delete pv when reclaimPolicy is Retain 49 | kubectl delete pvc-d7916b34-50cd-49bd-86f9-5287db1265cb 50 | ``` 51 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.11.0/deploy/prerequisites.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.11.0-prerequisites 3 | title: Prerequisites 4 | sidebar_label: Prerequisites 5 | original_id: prerequisites 6 | --- 7 | 8 | ## Installation Requirements 9 | #### Kubernetes Cluster Requirements 10 | A Kubernetes (from v1.17 to v1.21) cluster with at least 3 worker nodes. 11 | 12 | #### Disk Requirements 13 | ##### Cache Disk 14 | * All-flash mode: no configuration is required. 15 | * Hybrid-flash mode: there should be at least one available SSD on each worker node, and the SSD capacity should be larger than 60 GB. 16 | 17 | ##### Data Disk 18 | * All-flash mode: there should be at least one available SSD on each worker node, and the SSD capacity should be larger than 60 GB. 19 | * Hybrid-flash mode: there should be at least one available HDD on each worker node, and the HDD capacity should be larger than 60 GB. 20 | 21 | #### Network Requirements 22 | Network cards of 10GbE or above are required for the IOMesh storage network. 23 | 24 | #### Reserved System Space 25 | At least 100GB of disk space is required in the /opt directory on each worker node for storing the IOMesh cluster metadata. 26 | 27 | ## Worker Node Setup 28 | Follow the steps below to set up each Kubernetes worker node that runs IOMesh. 29 | 30 | ### Set Up Open-iSCSI 31 | 32 | 1. Install open-iscsi. 33 | 34 | 35 | 36 | 37 | 38 | ```shell 39 | sudo yum install iscsi-initiator-utils -y 40 | ``` 41 | 42 | 43 | 44 | ```shell 45 | sudo apt-get install open-iscsi -y 46 | ``` 47 | 48 | 49 | 50 | 2. Edit `/etc/iscsi/iscsid.conf` by setting `node.startup` to `manual`. 51 | 52 | ```shell 53 | sudo sed -i 's/^node.startup = automatic$/node.startup = manual/' /etc/iscsi/iscsid.conf 54 | ``` 55 | > **_NOTE_: The default value of the MaxRecvDataSegmentLength in /etc/iscsi/iscsi.conf is set at 32,768, and the maximum number of PVs is limited to 80,000 in IOMesh. To create PVs more than 80,000 in IOMesh, it is recommended to set the value of MaxRecvDataSegmentLength to 163,840 or above.** 56 | 57 | 3. Disable SELinux. 58 | 59 | ```shell 60 | sudo setenforce 0 61 | sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config 62 | ``` 63 | 64 | 4. Ensure `iscsi_tcp` kernel module is loaded. 65 | 66 | ```shell 67 | sudo modprobe iscsi_tcp 68 | sudo bash -c 'echo iscsi_tcp > /etc/modprobe.d/iscsi-tcp.conf' 69 | ``` 70 | 71 | 5. Start `iscsid` service. 72 | 73 | ```shell 74 | sudo systemctl enable --now iscsid 75 | ``` 76 | ### Set Up Local Metadata Store 77 | 78 | IOMesh stores metadata in the local path `/opt/iomesh`. Ensure that there is at least 100Gb of available space at `/opt`. 79 | 80 | ### Set Up Data Network 81 | 82 | To avoid contention on network bandwidth, set up a separate network for the IOMesh Cluster. The `dataCIDR` defines IP block for the IOMesh data network. Every node running IOMesh should have an interface with an IP address belonging to `dataCIDR`. 83 | 84 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.11.0/deploy/setup-snapshotclass.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.11.0-setup-snapshotclass 3 | title: Setup SnapshotClass 4 | sidebar_label: Setup SnapshotClass 5 | original_id: setup-snapshotclass 6 | --- 7 | 8 | [Kubernetes VolumeSnapshotClass](https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes/) objects are analogous to StorageClass. It helps to define multiple storage classes and is referenced by Volume Snapshots to associate the snapshot with the required Snapshot Class. Each Volume Snapshot is associated with a single Volume Snapshot Class. 9 | 10 | A Volume Snapshot Class is created with the following definition: 11 | 12 | ```yaml 13 | apiVersion: snapshot.storage.k8s.io/v1beta1 14 | kind: VolumeSnapshotClass 15 | metadata: 16 | name: iomesh-csi-driver-default 17 | driver: com.iomesh.csi-driver # <-- driver.name in iomesh-values.yaml 18 | deletionPolicy: Retain 19 | ``` 20 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.11.0/deploy/setup-storageclass.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.11.0-setup-storageclass 3 | title: Setup StorageClass 4 | sidebar_label: Setup StorageClass 5 | original_id: setup-storageclass 6 | --- 7 | 8 | The parameters of IOMesh storage class are: 9 | 10 | | Parameter | Value | Default | Description | 11 | | ------------------------- | ----------------------------- | ------- | ---------------------------------- | 12 | | csi.storage.k8s.io/fstype | "xfs", "ext2", "ext3", "ext4" | "ext4" | volume File system type | 13 | | replicaFactor | "2", "3" | "2" | replica factor | 14 | | thinProvision | "true", "false" | "true" | thin provision or thick provision. | 15 | 16 | After IOMesh CSI driver was installed, a default StorageClass `iomesh-csi-driver` would be created. You may also create a new StorageClass with customized parameters: 17 | 18 | 19 | ```yaml 20 | kind: StorageClass 21 | apiVersion: storage.k8s.io/v1 22 | metadata: 23 | name: iomesh-csi-driver-default 24 | provisioner: com.iomesh.csi-driver # <-- driver.name in iomesh-values.yaml 25 | reclaimPolicy: Retain 26 | allowVolumeExpansion: true 27 | parameters: 28 | # "ext4" / "ext3" / "ext2" / "xfs" 29 | csi.storage.k8s.io/fstype: "ext4" 30 | # "2" / "3" 31 | replicaFactor: "2" 32 | # "true" / "false" 33 | thinProvision: "true" 34 | volumeBindingMode: Immediate 35 | ``` 36 | 37 | > _About the `reclaimPolicy`_ 38 | > 39 | > The `reclaimPolicy` attribute of `StorageClass` can have two values of `Retain` and `Delete`, and the default is `Delete`. When a `PV` is created through `StorageClass`, its `persistentVolumeReclaimPolicy` attribute will inherit the `reclaimpolicy` attribute from `StorageClass`. You can also modify the value of `persistentVolumeReclaimPolicy` manually. 40 | > 41 | > The value of `reclaimPolicy` in the example is `Retain`, which means that, if you delete a `PVC`, the `PV` under the `PVC` will not be deleted, but will enter the `Released` state. Please note that, if you delete the `PV`, the corresponding IOMesh volume will not be deleted, instead, you need to change the value of `persistentVolumeReclaimPolicy` of the `PV` to `Delete` and then delete the `PV`. Or before creating a `PV`, you can set the value of `reclaimpolicy` of `StorageClass` to `Delete` so that all the resources will be released in cascade. 42 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.11.0/iomesh-operations/cluster-operations.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.11.0-cluster-operations 3 | title: Cluster Operations 4 | sidebar_label: Cluster Operations 5 | original_id: cluster-operations 6 | --- 7 | 8 | IOMesh Cluster can be scaled out and upgraded without interrupting the online services. 9 | 10 | ## Scale IOMesh Storage Cluster 11 | 12 | ### Meta Server 13 | 14 | #### Scale out 15 | 16 | Edit `meta/replicaCount` in `iomesh-values.yaml`. It is recommanded to have 3~5 Meta Servers in a production environment. 17 | 18 | Example: 19 | ```yaml 20 | meta: 21 | replicaCount: 3 22 | ``` 23 | 24 | You may also want to adjust `meta/podPolicy` for higher resilience: 25 | 26 | ```yaml 27 | meta: 28 | podPolicy: 29 | affinity: 30 | nodeAffinity: 31 | requiredDuringSchedulingIgnoredDuringExecution: 32 | nodeSelectorTerms: 33 | - matchExpressions: 34 | - key: kubernetes.io/e2e-az-name 35 | operator: In 36 | values: 37 | - az1 38 | - az2 39 | ``` 40 | 41 | Then apply the change: 42 | 43 | > **_NOTE_: replace `iomesh` with your release name.** 44 | 45 | ```bash 46 | helm upgrade --namespace iomesh-system iomesh iomesh/iomesh --values iomesh-values.yaml 47 | ``` 48 | 49 | ### Chunk Server 50 | 51 | #### Scale out 52 | 53 | Edit `chunk/replicaCount` in `iomesh-values.yaml`. 54 | 55 | ```yaml 56 | chunk: 57 | replicaCount: 5 # <- increase this number to scale Chunk Server 58 | ``` 59 | 60 | Then apply the change: 61 | 62 | > **_NOTE_: replace `iomesh` with your release name.** 63 | 64 | ```bash 65 | helm upgrade --namespace iomesh-system iomesh iomesh/iomesh --values iomesh-values.yaml 66 | ``` 67 | 68 | ## Upgrade IOMesh storage cluster 69 | 70 | Follow the following steps to upgrade IOMesh once a new version is released. 71 | 72 | > **_NOTE_: If you only have 1 replica of meta server or chunk server, the upgrade process will never start.** 73 | 74 | 1. Export the default config `iomesh-values.yaml` from Chart 75 | 76 | > **_NOTE_: If you already exported the config, you can skip this step.** 77 | 78 | ```bash 79 | helm show values iomesh/iomesh > iomesh-values.yaml 80 | ``` 81 | 82 | 2. Edit `iomesh-values.yaml` 83 | 84 | ```yaml 85 | # The version of the IOMeshCluster. You get a new release from: http://iomesh.com/docs/release/releases 86 | version: v5.0.0-rc5 87 | ``` 88 | 89 | 3. Upgrade the IOMesh Cluster 90 | 91 | > **_NOTE_: `iomesh` is the release name, you may modify it.** 92 | 93 | ```bash 94 | helm upgrade --namespace iomesh-system iomesh iomesh/iomesh --values iomesh-values.yaml 95 | ``` 96 | 97 | 4. Wait untill the new chunk server pods are ready. 98 | 99 | ```bash 100 | watch kubectl get pod --namespace iomesh-system 101 | ``` 102 | 103 | ## Uninstall IOMesh storage cluster 104 | 105 | > **_Attention_: All data will be lost after you uninstall an IOMesh storage cluster, including PVCs created with IOMesh StorageClass.** 106 | 107 | Run the following command to uninstall an IOMesh cluster. 108 | 109 | > **_NOTE_: You may replace `iomesh` with your own name.** 110 | 111 | ```bash 112 | helm uninstall --namespace iomesh-system iomesh 113 | ``` 114 | 115 | [1]: http://www.iomesh.com/docs/installation/setup-iomesh-storage#mount-device 116 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.11.0/iomesh-operations/monitoring.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.11.0-monitoring 3 | title: Monitoring 4 | sidebar_label: Monitoring 5 | original_id: monitoring 6 | --- 7 | 8 | IOMesh cluster can be monitored by using Prometheus and Grafana. 9 | 10 | ## Integrating with Prometheus 11 | 12 | If Prometheus was installed by [Prometheus Operator][1] in the same Kubernetes cluster with IOMesh, simply modify `iomesh-values.yaml` with: 13 | 14 | ```yaml 15 | serviceMonitor: 16 | create: true 17 | ``` 18 | 19 | Then upgrade the existing IOMesh Cluster: 20 | 21 | > **_NOTE_: You may replace `iomesh` with your release name.** 22 | 23 | ```bash 24 | helm upgrade --namespace iomesh-system iomesh iomesh/iomesh --values iomesh-values.yaml 25 | ``` 26 | 27 | An exporter will be created and the metric data would be collected by Prometheus automatically. 28 | 29 | It is also possible to configure Prometheus mannually by importing [iomesh-prometheus-kubernetes-sd-example.yaml][4]. 30 | 31 | ## Integrating with Grafana 32 | 33 | Download and import [iomesh-dashboard.json][3] to any existing Grafana. 34 | 35 | [1]: https://github.com/prometheus-operator/prometheus-operator 36 | [2]: https://grafana.com/grafana/download 37 | [3]: https://raw.githubusercontent.com/iomesh/docs/master/docs/assets/iomesh-operation/ioemsh-dashobard.json 38 | [4]: https://raw.githubusercontent.com/iomesh/docs/master/docs/assets/iomesh-operation/iomesh-prometheus-kubernetes-sd-example.yaml 39 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.11.0/stateful-applications/iomesh-for-mysql.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.11.0-iomesh-for-mysql 3 | title: IOMesh for MySQL 4 | sidebar_label: IOMesh for MySQL 5 | original_id: iomesh-for-mysql 6 | --- 7 | 8 | ## Setup k8s Cluster Storage 9 | 10 | 1. Create a file named `iomesh-mysql-sc.yaml` with the following contentS: 11 | 12 | ```yaml 13 | kind: StorageClass 14 | apiVersion: storage.k8s.io/v1 15 | metadata: 16 | name: iomesh-mysql-sc 17 | provisioner: com.iomesh.csi-driver # driver.name in values.yaml when install IOMesh cluster 18 | reclaimPolicy: Retain 19 | allowVolumeExpansion: true 20 | parameters: 21 | csi.storage.k8s.io/fstype: "ext4" 22 | replicaFactor: "2" 23 | thinProvision: "true" 24 | ``` 25 | 26 | 2. Apply the yaml config: 27 | 28 | ```bash 29 | kubectl apply -f iomesh-mysql-sc.yaml 30 | ``` 31 | 32 | ## Deploy MySQL 33 | 34 | 1. Create a file named `mysql-deployment.yaml`. It describes a Deployment that runs MySQL and creates a PVC that consumes the IOMesh storage. 35 | 36 | ```yaml 37 | apiVersion: v1 38 | kind: PersistentVolumeClaim 39 | metadata: 40 | name: iomesh-mysql-pvc 41 | spec: 42 | storageClassName: iomesh-mysql-sc 43 | accessModes: 44 | - ReadWriteOnce 45 | resources: 46 | requests: 47 | storage: 10Gi 48 | --- 49 | apiVersion: v1 50 | kind: Service 51 | metadata: 52 | name: mysql 53 | spec: 54 | ports: 55 | - port: 3306 56 | selector: 57 | app: mysql 58 | clusterIP: None 59 | --- 60 | apiVersion: apps/v1 61 | kind: Deployment 62 | metadata: 63 | name: mysql 64 | spec: 65 | selector: 66 | matchLabels: 67 | app: mysql 68 | strategy: 69 | type: Recreate 70 | template: 71 | metadata: 72 | labels: 73 | app: mysql 74 | spec: 75 | containers: 76 | - image: mysql:5.6 77 | name: mysql 78 | env: 79 | # Use secret in real usage 80 | - name: MYSQL_ROOT_PASSWORD 81 | value: password 82 | ports: 83 | - containerPort: 3306 84 | name: mysql 85 | volumeMounts: 86 | - name: mysql-persistent-storage 87 | mountPath: /var/lib/mysql 88 | volumes: 89 | - name: mysql-persistent-storage 90 | persistentVolumeClaim: 91 | claimName: iomesh-mysql-pvc # pvc from iomesh created above 92 | ``` 93 | 94 | 2. Apply the yaml config: 95 | 96 | ```bash 97 | kubectl apply -f mysql-deployment.yaml 98 | ``` 99 | 100 | ## Operate MySQL Data 101 | 102 | Users can use the features provided by IOMesh storage to perform such operations as expansion/snapshot/rollback/clone of the Persistent Volumes where MySQL data are located, see the reference for details [application-operations](https://docs.iomesh.com/volume-operations/snapshot-restore-and-clone) -------------------------------------------------------------------------------- /website/versioned_docs/version-0.11.0/volume-operations/create-volume.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.11.0-create-volume 3 | title: Create Volume 4 | sidebar_label: Create Volume 5 | original_id: create-volume 6 | --- 7 | 8 | A volume can be created by using the following YAML file. Users should ensure that the corresponding `StorageClass` already exists. 9 | 10 | ```yaml 11 | apiVersion: v1 12 | kind: PersistentVolumeClaim 13 | metadata: 14 | name: iomesh-example-pvc 15 | spec: 16 | storageClassName: iomesh-example-sc 17 | accessModes: 18 | - ReadWriteOnce 19 | resources: 20 | requests: 21 | storage: 10Gi 22 | ``` 23 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.11.0/volume-operations/expand-volume.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.11.0-expand-volume 3 | title: Expand Volume 4 | sidebar_label: Expand Volume 5 | original_id: expand-volume 6 | --- 7 | 8 | IOMesh volumes can be expanded after creation, no matter whether they are being used or not. 9 | 10 | In the following example, assume that there is a PVC named `example-pvc` and its capacity is `10Gi`: 11 | 12 | ```yaml 13 | apiVersion: v1 14 | kind: PersistentVolumeClaim 15 | metadata: 16 | name: example-pvc 17 | spec: 18 | storageClassName: iomesh-csi-driver-default 19 | accessModes: 20 | - ReadWriteOnce 21 | resources: 22 | requests: 23 | storage: 10Gi # original capacity 24 | ``` 25 | 26 | Apply the YAML file: 27 | 28 | ```bash 29 | kubectl get pvc example-pvc 30 | ``` 31 | 32 | ```output 33 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 34 | example-pvc Bound pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca 10Gi RWO iomesh-csi-driver-default 11m 35 | ``` 36 | 37 | To expand the capacity of this PVC to `20Gi`, simply modify the PVC declaration: 38 | 39 | ```yaml 40 | apiVersion: v1 41 | kind: PersistentVolumeClaim 42 | metadata: 43 | name: example-pvc 44 | spec: 45 | storageClassName: iomesh-csi-driver-default 46 | accessModes: 47 | - ReadWriteOnce 48 | resources: 49 | requests: 50 | storage: 20Gi # now expand capacity from 10 Gi to 20Gi 51 | ``` 52 | 53 | Apply the new YAML file: 54 | 55 | ```bash 56 | kubectl apply -f example-pvc.yaml 57 | ``` 58 | 59 | Then check the result: 60 | 61 | ```bash 62 | kubectl get pvc example-pvc 63 | ``` 64 | 65 | ```output 66 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 67 | example-pvc Bound pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca 20Gi RWO iomesh-csi-driver-default 11m 68 | ``` 69 | 70 | ```bash 71 | kubectl get pv pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca 72 | ``` 73 | 74 | ```output 75 | NAME CAPACITY RECLAIM POLICY STATUS CLAIM STORAGECLASS 76 | pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca 20Gi Retain Bound default/example-pvc iomesh-csi-driver-default 77 | ``` 78 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.11.0/volume-operations/snapshot-restore-and-clone.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.11.0-snapshot-restore-and-clone 3 | title: Snapshot, Restore and Clone 4 | sidebar_label: Snapshot, Restore and Clone 5 | original_id: snapshot-restore-and-clone 6 | --- 7 | 8 | ## Snapshot 9 | 10 | Users can use IOMesh to create a snapshot for an existing persistent volume (PV). 11 | 12 | A VolumeSnapshot object defines a request for taking a snapshot of the PVC. 13 | 14 | For example: 15 | 16 | ```yaml 17 | apiVersion: snapshot.storage.k8s.io/v1beta1 18 | kind: VolumeSnapshot 19 | metadata: 20 | name: example-snapshot 21 | spec: 22 | volumeSnapshotClassName: iomesh-csi-driver-default 23 | source: 24 | persistentVolumeClaimName: mongodb-data-pvc # PVC name that want to take snapshot 25 | ``` 26 | 27 | Apply the YAML file: 28 | 29 | ```text 30 | kubectl apply -f example-snapshot.yaml 31 | ``` 32 | 33 | After the VolumeSnapshot object is created, a corresponding VolumeSnapshotContent object will be created by IOMesh. 34 | 35 | ```bash 36 | kubectl get Volumesnapshots example-snapshot 37 | ``` 38 | 39 | ```output 40 | NAME SOURCEPVC RESTORESIZE SNAPSHOTCONTENT CREATIONTIME 41 | example-snapshot mongodb-data-pvc 6Gi snapcontent-fb64d696-725b-4f1b-9847-c95e25b68b13 10h 42 | ``` 43 | 44 | ## Restore 45 | 46 | Users can restore a volume snapshot by creating a PVC in which the `dataSource` field references to a snapshot. 47 | 48 | For example: 49 | 50 | ```yaml 51 | apiVersion: v1 52 | kind: PersistentVolumeClaim 53 | metadata: 54 | name: example-restore 55 | spec: 56 | storageClassName: iomesh-csi-driver-default 57 | dataSource: 58 | name: example-snapshot 59 | kind: VolumeSnapshot 60 | apiGroup: snapshot.storage.k8s.io 61 | accessModes: 62 | - ReadWriteOnce 63 | resources: 64 | requests: 65 | storage: 6Gi 66 | ``` 67 | 68 | Apply the YAML file: 69 | 70 | ```bash 71 | kubectl apply -f example-restore.yaml 72 | ``` 73 | 74 | ## Clone 75 | 76 | Users can clone a persistent volume (PV) by creating a PVC while adding a dataSource linked to an existing PVC in the same namespace. 77 | 78 | For example: 79 | 80 | ```yaml 81 | apiVersion: v1 82 | kind: PersistentVolumeClaim 83 | metadata: 84 | name: cloned-pvc 85 | spec: 86 | storageClassName: iomesh-csi-driver-default 87 | dataSource: 88 | name: existing-pvc # an existing PVC in the same namespace 89 | kind: PersistentVolumeClaim 90 | accessModes: 91 | - ReadWriteOnce 92 | resources: 93 | requests: 94 | storage: 5Gi 95 | volumeMode: Block 96 | ``` 97 | 98 | Apply the YAML file: 99 | 100 | ```bash 101 | kubectl apply -f example-clone.yaml 102 | ``` 103 | 104 | After applying the YAML file, a clone of `existing-pvc` will be created. 105 | 106 | There are some limitations on clone operation: 107 | 108 | 1. A cloned PVC must exist in the same namespace as the original PVC. 109 | 2. Both PVCs must have the same StorageClass and VolumeMode setting. 110 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.5/about-iomesh/introduction.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.5-introduction 3 | title: Introduction 4 | sidebar_label: Introduction 5 | original_id: introduction 6 | --- 7 | 8 | ## What is IOMesh? 9 | 10 | IOMesh is a distributed storage system specially designed for Kubernetes workloads, providing reliable persistent storage capabilities for containerized stateful applications such as MySQL, Cassandra, and MongoDB. 11 | 12 | - Thousands of Pods are created and destroyed every minute in Kubernetes clusters. IOMesh is built for this kind of highly dynamic and large-scale workloads in the cloud-native era. It is designed with this in mind from the beginning to provide the performance, reliability, and scalability required by cloud-native applications. 13 | - IOMesh runs natively on Kubernetes and fully utilizes the Kubernetes's capabilities. Therefore, the operation teams can leverage the standard Kubernetes APIs to uniformly manage the applications and IOMesh, which integrates perfectly with existing DevOps processes. 14 | - IOMesh enables users to start at a small scale and expand the storage at will by adding disks or nodes. 15 | 16 | ## Key Features 17 | 18 | ### High Performance 19 | Database is one of the key applications to measure storage performance. IOMesh performes very well in the database performance benchmark tests with stable read and write latency and high QPS and TPS, which means that it can provide stable data services. 20 | ### No Kernel Dependencies 21 | IOMesh runs entirely in user space and can provide reliable service through effective software fault isolation. When a problem occurs, other applications running on the same node can continue to run without causing the entire system to crash. In addition, deploying and maintaining the IOMesh are very easy since there is no need to install any kernel modules and you do not need to worry about the kernel version compatibility. 22 | ### Storage Performance Tiering 23 | IOMesh supports flexible deployment of hybrid disks including NVMe SSD, SATA SSD and HDD. This can help users make the most of their storage investment, and control the costs to each block while maximizing the storage performance. 24 | 25 | ## Architecture 26 | 27 | ![img](https://lh3.googleusercontent.com/4Yssin2b7eH5xylvgJ5Do0khj8Dlfv_cG8F-sHrJ7ztah5ixKleRvL_uX_b8maQ1w72lPoallwviBzvCMVgQUFrV6y2yFWNmXk4wQNAMNfaLMMeRQ9cIWznvF-gZeOeP4SnGUOsF) 28 | 29 | ## Compatibility List with Kubernetes 30 | 31 | | IOMesh Version | Kubernetes Version | 32 | | -------------- | ------------------ | 33 | | v0.9.x | v1.17 or higher | 34 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.5/additional-info/best-practice-in-production.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.5-best-practice-in-production 3 | title: Best Practice in Production 4 | sidebar_label: Best Practice in Production 5 | original_id: best-practice-in-production 6 | --- 7 | 8 | ## Recommended Hardware Configuration for Production Environment 9 | 10 | The recommended hardware configuration for production is as follows. 11 | 12 | | Hardware | Requirements | No | 13 | | --------------- | ------------------------ | ---- | 14 | | CPU | X86 64 (4 cores or more) | 2 | 15 | | RAM | 64 GB or more | | 16 | | Disk Controller | HBA Mode | 1 | 17 | | SSD | 960 GB or more | 2 | 18 | | HDD | SAS/SATA HDD | 1 | 19 | | Boot Device | 64 GB | 1 | 20 | | Ethernet NIC | 10 GbE | \>=1 | 21 | | Ethernet Switch | 10 GbE | 1 | 22 | 23 | #### 24 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.5/additional-info/deployment-architectures.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.5-deploy-architectures 3 | title: Deployment Architectures 4 | sidebar_label: Deployment Architectures 5 | original_id: deploy-architectures 6 | --- 7 | 8 | ## Hyperconverged Deployment 9 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.5/additional-info/performance-testing.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.5-performance-testing 3 | title: Performance Testing 4 | sidebar_label: Performance Testing 5 | original_id: performance-testing 6 | --- 7 | 8 | ## FIO-based Performance Testing 9 | 10 | 1. Create a pod for fio test 11 | 12 | ```shell 13 | kubectl apply -f http://www.iomesh.com/docs/assets/iomesh-csi-driver/example/fio.yaml 14 | ``` 15 | 16 | 2. Wait until fio-pvc bound is finished and fio pod is ready 17 | 18 | ```shell 19 | kubectl get pvc fio-pvc 20 | ``` 21 | 22 | ```output 23 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 24 | fio-pvc Bound pvc-d7916b34-50cd-49bd-86f9-5287db1265cb 30Gi RWO iomesh-csi-driver-default 15s 25 | ``` 26 | 27 | ```shell 28 | kubectl wait --for=condition=Ready pod/fio 29 | ``` 30 | 31 | ```output 32 | pod/fio condition met 33 | ``` 34 | 35 | 3. Run fio tests 36 | 37 | ```shell 38 | kubectl exec -it fio sh 39 | fio --name fio --filename=/mnt/fio --bs=256k --rw=write --ioengine=libaio --direct=1 --iodepth=128 --numjobs=1 --size=$(blockdev --getsize64 /mnt/fio) 40 | fio --name fio --filename=/mnt/fio --bs=4k --rw=randread --ioengine=libaio --direct=1 --iodepth=128 --numjobs=1 --size=$(blockdev --getsize64 /mnt/fio) 41 | ``` 42 | 43 | 4. Clean up 44 | 45 | ```shell 46 | kubectl delete pod fio 47 | kubectl delete pvc fio-pvc 48 | # You need to delete pv when reclaimPolicy is Retain 49 | kubectl delete pvc-d7916b34-50cd-49bd-86f9-5287db1265cb 50 | ``` 51 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.5/deploy/prerequisites.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.5-prerequisites 3 | title: Prerequisites 4 | sidebar_label: Prerequisites 5 | original_id: prerequisites 6 | --- 7 | 8 | ## Installation Requirements 9 | 10 | - A Kubernetes 1.17+ cluster with at least 3 worker nodes 11 | - Each worker node needs 12 | - At least one idle SSD for IOMesh journal and cache 13 | - At least one idle HDD for IOMesh datastore 14 | - A 10GbE (or higher) network interface for IOMesh data network connectivity 15 | - 100G disk space for hostpath-provisioner 16 | 17 | ## Setup Worker Node 18 | 19 | For each Kubernetes worker node that will run IOMesh, do the following steps: 20 | 21 | ### Setup Open-ISCSI 22 | 23 | 1. Install open-iscsi: 24 | 25 | 26 | 27 | 28 | 29 | ```shell 30 | sudo yum install iscsi-initiator-utils -y 31 | ``` 32 | 33 | 34 | 35 | ```shell 36 | sudo apt-get install open-iscsi -y 37 | ``` 38 | 39 | 40 | 41 | 2. Edit `/etc/iscsi/iscsid.conf` by setting `node.startup` to `manual`: 42 | 43 | ```shell 44 | sudo sed -i 's/^node.startup = automatic$/node.startup = manual/' /etc/iscsi/iscsid.conf 45 | ``` 46 | 47 | 3. Disable SELinux: 48 | 49 | ```shell 50 | sudo setenforce 0 51 | sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config 52 | ``` 53 | 54 | 4. Start `iscsid` service: 55 | 56 | ```shell 57 | sudo systemctl enable --now iscsid 58 | ``` 59 | 60 | ### Setup Local Metadata Store 61 | 62 | IOMesh uses local path `/opt/iomesh` to store metadata. Ensure that there is at least 100G free space at `/opt`. 63 | 64 | ### Setup Data Network 65 | 66 | To avoid contention on network bandwith, it is necessary to setup a seperate network segment for IOMesh cluster. The `dataCIDR` defines the IP block for IOMesh data network. Every node running IOMesh should have an interface whose IP address belongs to the `dataCIDR`. 67 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.5/deploy/setup-snapshotclass.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.5-setup-snapshotclass 3 | title: Setup SnapshotClass 4 | sidebar_label: Setup SnapshotClass 5 | original_id: setup-snapshotclass 6 | --- 7 | 8 | [Kubernetes VolumeSnapshotClass](https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes/) objects are analogous to StorageClasses. They help define multiple classes of storage and are referenced by Volume Snapshots to associate the snapshot with the required Snapshot Class. Each Volume Snapshot is associated with a single Volume Snapshot Class. 9 | 10 | A Volume Snapshot Class is created with this definition: 11 | 12 | ```yaml 13 | apiVersion: snapshot.storage.k8s.io/v1beta1 14 | kind: VolumeSnapshotClass 15 | metadata: 16 | name: iomesh-csi-driver-default 17 | driver: com.iomesh.csi-driver # <-- driver.name in iomesh-values.yaml 18 | deletionPolicy: Retain 19 | ``` 20 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.5/deploy/setup-storageclass.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.5-setup-storageclass 3 | title: Setup StorageClass 4 | sidebar_label: Setup StorageClass 5 | original_id: setup-storageclass 6 | --- 7 | 8 | The IOMesh storage class parameters are: 9 | 10 | | Parameters | Values | Default | Description | 11 | | ------------------------- | ----------------------------- | ------- | ---------------------------------- | 12 | | csi.storage.k8s.io/fstype | "xfs", "ext2", "ext3", "ext4" | "ext4" | volume File system type | 13 | | replicaFactor | "1", "2", "3" | "2" | replica factor | 14 | | thinProvision | "true", "false" | "true" | thin provision or thick provision. | 15 | 16 | After IOMesh CSI driver was installed, a default StorageClass `iomesh-csi-driver` would be created. You may also create a new StorageClass with customized parameters: 17 | 18 | 19 | ```yaml 20 | kind: StorageClass 21 | apiVersion: storage.k8s.io/v1 22 | metadata: 23 | name: my-iomesh-csi-driver-default 24 | provisioner: com.iomesh.csi-driver # <-- driver.name in iomesh-values.yaml 25 | reclaimPolicy: Retain 26 | allowVolumeExpansion: true 27 | parameters: 28 | # "ext4" / "ext3" / "ext2" / "xfs" 29 | csi.storage.k8s.io/fstype: "ext4" 30 | # "1" / "2" / "3" 31 | replicaFactor: "2" 32 | # "true" / "false" 33 | thinProvision: "true" 34 | volumeBindingMode: Immediate 35 | ``` 36 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.5/iomesh-operations/cluster-operations.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.5-cluster-operations 3 | title: Cluster Operations 4 | sidebar_label: Cluster Operations 5 | original_id: cluster-operations 6 | --- 7 | 8 | IOMesh Cluster can be scaled and upgraded without interrupting the online services. 9 | 10 | ## Scale IOMesh Storage Cluster 11 | 12 | ### Meta Server 13 | 14 | #### Scale out 15 | 16 | Edit `meta/replicaCount` in `iomesh-values.yaml`. It is recommanded to have 3~5 Meta Servers in a production environment. 17 | 18 | Example: 19 | ```yaml 20 | meta: 21 | replicaCount: 3 22 | ``` 23 | 24 | You may also want to adjust `meta/podPolicy` for higher resilience: 25 | 26 | ```yaml 27 | meta: 28 | podPolicy: 29 | affinity: 30 | nodeAffinity: 31 | requiredDuringSchedulingIgnoredDuringExecution: 32 | nodeSelectorTerms: 33 | - matchExpressions: 34 | - key: kubernetes.io/e2e-az-name 35 | operator: In 36 | values: 37 | - az1 38 | - az2 39 | ``` 40 | 41 | Then apply the change: 42 | 43 | > **_NOTE_: replace `my-iomesh` with your release name.** 44 | 45 | ```bash 46 | helm upgrade --namespace iomesh-system my-iomesh iomesh/iomesh --values iomesh-values.yaml 47 | ``` 48 | 49 | ### Chunk Server 50 | 51 | #### Scale out 52 | 53 | Edit `chunk/replicaCount` in `iomesh-values.yaml`. 54 | 55 | ```yaml 56 | chunk: 57 | replicaCount: 5 # <- increase this number to scale Chunk Server 58 | ``` 59 | 60 | Then apply the change: 61 | 62 | > **_NOTE_: replace `my-iomesh` with your release name.** 63 | 64 | ```bash 65 | helm upgrade --namespace iomesh-system my-iomesh iomesh/iomesh --values iomesh-values.yaml 66 | ``` 67 | 68 | ## Upgrade IOMesh storage cluster 69 | 70 | Follow the steps below to upgrade the IOMesh once a new version is released. 71 | 72 | > **_NOTE_: If you only have 1 replica of meta server or chunk server, the upgrade process will never start.** 73 | 74 | 1. Export default config `iomesh-values.yaml` from Chart 75 | 76 | > **_NOTE_: If you already exported the config, you can skip this step.** 77 | 78 | ```bash 79 | helm show values iomesh/iomesh > iomesh-values.yaml 80 | ``` 81 | 82 | 2. Edit `iomesh-values.yaml` 83 | 84 | ```yaml 85 | # The version of the IOMeshCluster. You get new release from: http://iomesh.com/docs/release/releases 86 | version: v5.0.0-rc5 87 | ``` 88 | 89 | 3. Upgrade the IOMesh Cluster 90 | 91 | > **_NOTE_: `my-iomesh` is release name, maybe you want to modify it.** 92 | 93 | ```bash 94 | helm upgrade --namespace iomesh-system my-iomesh iomesh/iomesh --values iomesh-values.yaml 95 | ``` 96 | 97 | 4. Wait new chunk server pods are ready. 98 | 99 | ```bash 100 | watch kubectl get pod --namespace iomesh-system 101 | ``` 102 | 103 | ## Uninstallation IOMesh storage cluster 104 | 105 | > **_/!\ Attention_: All data will be lost after you uninstall a IOMesh storage cluster, including PVCs created with IOMesh StorageClass.** 106 | 107 | Run the below command to unistall a IOMesh cluster. 108 | 109 | > **_NOTE_: You may replace `my-iomesh` with your own name.** 110 | 111 | ```bash 112 | helm uninstall --namespace iomesh-system my-iomesh 113 | ``` 114 | 115 | [1]: http://www.iomesh.com/docs/installation/setup-iomesh-storage#mount-device 116 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.5/iomesh-operations/failover.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.5-failover 3 | title: Failover 4 | sidebar_label: Failover 5 | original_id: failover 6 | --- 7 | 8 | ## Node Failure 9 | TBD 10 | 11 | 12 | ## Disk Failure 13 | TBD 14 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.5/iomesh-operations/monitoring.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.5-monitoring 3 | title: Monitoring 4 | sidebar_label: Monitoring 5 | original_id: monitoring 6 | --- 7 | 8 | IOMesh cluster can be monitored by Prometheus and Grafana. 9 | 10 | ## Integrating with Prometheus 11 | 12 | If Prometheus was installed by [Prometheus Operator][1] at the same Kubernetes cluster with IOMesh, just modify `iomesh-values.yaml` with: 13 | 14 | ```yaml 15 | serviceMonitor: 16 | create: true 17 | ``` 18 | 19 | Then upgrade the existing IOMesh Cluster: 20 | 21 | > **_NOTE_: You may replace `my-iomesh` with your release name.** 22 | 23 | ```bash 24 | helm upgrade --namespace iomesh-system my-iomesh iomesh/iomesh --values iomesh-values.yaml 25 | ``` 26 | 27 | The exporter will be created and metric data would be collected by Prometheus automatically. 28 | 29 | It is also possible to configure Prometheus mannually by importing [iomesh-prometheus-kubernetes-sd-example.yaml][4]. 30 | 31 | ## Integrating with Grafana 32 | 33 | Download and import [iomesh-dashboard.json][3] to any existing Grafana. 34 | 35 | [1]: https://github.com/prometheus-operator/prometheus-operator 36 | [2]: https://grafana.com/grafana/download 37 | [3]: http://www.iomesh.com/docs/assets/iomesh-operation/iomesh-dashboard.json 38 | [4]: http://www.iomesh.com/docs/assets/iomesh-operation/iomesh-prometheus-kubernetes-sd-example.yaml 39 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.5/stateful-applications/iomesh-for-mongodb.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.5-iomesh-for-mongodb 3 | title: IOMesh for MongoDB 4 | sidebar_label: IOMesh for MongoDB 5 | original_id: iomesh-for-mongodb 6 | --- 7 | 8 | ## Setup k8s Cluster Storage 9 | 10 | 1. Create a file named `iomesh-mongodb-sc.yaml` with the following content: 11 | 12 | ```text 13 | iomesh-mongodb-sc.yaml 14 | ``` 15 | 16 | ```output 17 | kind: StorageClass 18 | apiVersion: storage.k8s.io/v1 19 | metadata: 20 | name: iomesh-mongodb-sc 21 | provisioner: com.iomesh.csi-driver # driver.name in values.yaml when install IOMesh 22 | reclaimPolicy: Retain 23 | allowVolumeExpansion: true 24 | parameters: 25 | csi.storage.k8s.io/fstype: "ext4" 26 | replicaFactor: "2" 27 | thinProvision: "true" 28 | ``` 29 | 30 | 2. Apply the yaml config: 31 | 32 | ```bash 33 | kubectl apply -f iomesh-mongodb-sc.yaml 34 | ``` 35 | 36 | ## Deploy MongoDB 37 | 38 | ### Create a headless Service for MongoDB 39 | 40 | 1. Create a Service used for DNS lookups between MongoDB Pods and clients within your cluster 41 | 42 | ```text 43 | mongodb-service.yaml 44 | ``` 45 | 46 | ```output 47 | apiVersion: v1 48 | kind: Service 49 | metadata: 50 | name: mongo 51 | labels: 52 | name: mongo 53 | spec: 54 | ports: 55 | - port: 27017 56 | targetPort: 27017 57 | clusterIP: None 58 | selector: 59 | role: mongo 60 | ``` 61 | 62 | 2. Apply the yaml config: 63 | 64 | ```bash 65 | kubectl apply -f mongodb-service.yaml 66 | ``` 67 | 68 | ### Create MongoDB cluster use pv provided for IOMesh Storage 69 | 70 | 1. Use StatefulSet to create a MongoDB cluster 71 | 72 | ```text 73 | mongodb-statefulset.yaml 74 | ``` 75 | 76 | ```output 77 | apiVersion: apps/v1beta1 78 | kind: StatefulSet 79 | metadata: 80 | name: mongo 81 | spec: 82 | selector: 83 | matchLabels: 84 | role: mongo 85 | environment: test 86 | serviceName: "mongo" 87 | replicas: 3 88 | template: 89 | metadata: 90 | labels: 91 | role: mongo 92 | environment: test 93 | spec: 94 | terminationGracePeriodSeconds: 10 95 | containers: 96 | - name: mongo 97 | image: mongo 98 | command: 99 | - mongod 100 | - "--replSet" 101 | - rs0 102 | - "--smallfiles" 103 | - "--noprealloc" 104 | ports: 105 | - containerPort: 27017 106 | volumeMounts: 107 | - name: mongo-persistent-storage 108 | mountPath: /data/db 109 | - name: mongo-sidecar 110 | image: cvallance/mongo-k8s-sidecar 111 | env: 112 | - name: MONGO_SIDECAR_POD_LABELS 113 | value: "role=mongo,environment=test" 114 | volumeClaimTemplates: 115 | - metadata: 116 | name: mongodb-data 117 | spec: 118 | accessModes: [ "ReadWriteOnce" ] 119 | storageClassName: iomesh-mongodb-sc # storageClass created above 120 | resources: 121 | requests: 122 | storage: 10Gi 123 | ``` 124 | 125 | 2. Apply the yaml config: 126 | 127 | ```bash 128 | kubectl apply -f mongodb-statefulset.yaml 129 | ``` 130 | 131 | IOMesh Storage will create Persistent Volumes for each cassandra pod. These volumes use the ext4 file system with a replica factor of 2 and are thin provisioned. 132 | 133 | ## Operate MongoDB Data 134 | 135 | User can use the feature provided by IOMesh storage to perform operations such as expansion/snapshot/rollback/clone of the Persistent Volumes where MongoDB data is located, see reference for details [application-operations](http://iomesh.com/docs/storage-usage/application-operations) 136 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.5/stateful-applications/iomesh-for-mysql.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.5-iomesh-for-mysql 3 | title: IOMesh for MySQL 4 | sidebar_label: IOMesh for MySQL 5 | original_id: iomesh-for-mysql 6 | --- 7 | 8 | ## Setup k8s Cluster Storage 9 | 10 | 1. Create a file named `iomesh-mysql-sc.yaml` with the following content: 11 | 12 | ```text 13 | iomesh-mysql-sc.yaml 14 | ``` 15 | 16 | ```output 17 | kind: StorageClass 18 | apiVersion: storage.k8s.io/v1 19 | metadata: 20 | name: iomesh-mysql-sc 21 | provisioner: com.iomesh.csi-driver # driver.name in values.yaml when install IOMesh cluster 22 | reclaimPolicy: Retain 23 | allowVolumeExpansion: true 24 | parameters: 25 | csi.storage.k8s.io/fstype: "ext4" 26 | replicaFactor: "2" 27 | thinProvision: "true" 28 | ``` 29 | 30 | 2. Apply the yaml config: 31 | 32 | ```bash 33 | kubectl apply -f iomesh-mysql-sc.yaml 34 | ``` 35 | 36 | ## Deploy MySQL 37 | 38 | 1. Create a file named `mysql-deployment.yaml`. It describes a Deployment that runs MySQL and creates a PVC that consumes the IOMesh storage. 39 | 40 | ```text 41 | mysql-deployment.yaml 42 | ``` 43 | 44 | ```output 45 | apiVersion: v1 46 | kind: PersistentVolumeClaim 47 | metadata: 48 | name: iomesh-mysql-pvc 49 | spec: 50 | storageClassName: iomesh-mysql-sc 51 | accessModes: 52 | - ReadWriteOnce 53 | resources: 54 | requests: 55 | storage: 10Gi 56 | --- 57 | apiVersion: v1 58 | kind: Service 59 | metadata: 60 | name: mysql 61 | spec: 62 | ports: 63 | - port: 3306 64 | selector: 65 | app: mysql 66 | clusterIP: None 67 | --- 68 | apiVersion: apps/v1 69 | kind: Deployment 70 | metadata: 71 | name: mysql 72 | spec: 73 | selector: 74 | matchLabels: 75 | app: mysql 76 | strategy: 77 | type: Recreate 78 | template: 79 | metadata: 80 | labels: 81 | app: mysql 82 | spec: 83 | containers: 84 | - image: mysql:5.6 85 | name: mysql 86 | env: 87 | # Use secret in real usage 88 | - name: MYSQL_ROOT_PASSWORD 89 | value: password 90 | ports: 91 | - containerPort: 3306 92 | name: mysql 93 | volumeMounts: 94 | - name: mysql-persistent-storage 95 | mountPath: /var/lib/mysql 96 | volumes: 97 | - name: mysql-persistent-storage 98 | persistentVolumeClaim: 99 | claimName: iomesh-mysql-pvc # pvc from iomesh created above 100 | ``` 101 | 102 | 2. Apply the yaml config: 103 | 104 | ```bash 105 | kubectl apply -f mysql-deployment.yaml 106 | ``` 107 | 108 | ## Operate MySQL Data 109 | 110 | User can use the feature provided by IOMesh storage to perform operations such as expansion/snapshot/rollback/clone of the pv where MySQL data is located, see reference for details [application-operations](http://iomesh.com/docs/storage-usage/application-operations) 111 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.5/volume-operations/create-volume.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.5-create-volume 3 | title: Create Volume 4 | sidebar_label: Create Volume 5 | original_id: create-volume 6 | --- 7 | 8 | A volume can be created by the following YAML. User should ensure that the corresponding StorageClass already exists. 9 | 10 | ```yaml 11 | apiVersion: v1 12 | kind: PersistentVolumeClaim 13 | metadata: 14 | name: iomesh-example-pvc 15 | spec: 16 | storageClassName: iomesh-example-sc 17 | accessModes: 18 | - ReadWriteOnce 19 | resources: 20 | requests: 21 | storage: 10Gi 22 | ``` 23 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.5/volume-operations/expand-volume.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.5-expand-volume 3 | title: Expand Volume 4 | sidebar_label: Expand Volume 5 | original_id: expand-volume 6 | --- 7 | 8 | IOMesh volumes are allowed to be expanded after creation, no matter whether it is being used or not. 9 | 10 | Here is an example. Assume that there is a PVC named example-pvc which capacity is 10Gi: 11 | 12 | ```bash 13 | example-pvc.yaml 14 | ``` 15 | 16 | ```output 17 | apiVersion: v1 18 | kind: PersistentVolumeClaim 19 | metadata: 20 | name: example-pvc 21 | spec: 22 | storageClassName: iomesh-csi-driver-default 23 | accessModes: 24 | - ReadWriteOnce 25 | resources: 26 | requests: 27 | storage: 10Gi 28 | ``` 29 | 30 | ```bash 31 | kubectl get pvc example-pvc 32 | ``` 33 | 34 | ```output 35 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 36 | example-pvc Bound pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca 10Gi RWO iomesh-csi-driver-default 11m 37 | ``` 38 | 39 | To expand this PVC to 20Gi, just modify the PVC declaration: 40 | 41 | ```bash 42 | example-pvc.yaml 43 | ``` 44 | 45 | ```output 46 | apiVersion: v1 47 | kind: PersistentVolumeClaim 48 | metadata: 49 | name: example-pvc 50 | spec: 51 | storageClassName: iomesh-csi-driver-default 52 | accessModes: 53 | - ReadWriteOnce 54 | resources: 55 | requests: 56 | storage: 20Gi # expand to 20Gi 57 | ``` 58 | 59 | Apply it to Kubernetes: 60 | 61 | ```bash 62 | kubectl apply -f example-pvc.yaml 63 | ``` 64 | 65 | Then check the result: 66 | 67 | ```bash 68 | kubectl get pvc example-pvc 69 | ``` 70 | 71 | ```output 72 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 73 | example-pvc Bound pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca 20Gi RWO iomesh-csi-driver-default 11m 74 | ``` 75 | 76 | ```bash 77 | kubectl get pv pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca 78 | ``` 79 | 80 | ```output 81 | NAME CAPACITY RECLAIM POLICY STATUS CLAIM STORAGECLASS 82 | pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca 20Gi Retain Bound default/example-pvc iomesh-csi-driver-default 83 | ``` 84 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.5/volume-operations/snapshot-restore-and-clone.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.5-snapshot-restore-and-clone 3 | title: Snapshot, Restore And Clone 4 | sidebar_label: Snapshot, Restore And Clone 5 | original_id: snapshot-restore-and-clone 6 | --- 7 | 8 | ## Snapshot 9 | 10 | IOMesh provides the ability to create a snapshot of an existing persistent volume. 11 | 12 | A VolumeSnapshot object defines a request of taking a snapshot of the PVC. 13 | 14 | For example: 15 | 16 | ```text 17 | example-snapshot.yaml 18 | ``` 19 | 20 | ```output 21 | apiVersion: snapshot.storage.k8s.io/v1beta1 22 | kind: VolumeSnapshot 23 | metadata: 24 | name: example-snapshot 25 | spec: 26 | volumeSnapshotClassName: iomesh-csi-driver-default 27 | source: 28 | persistentVolumeClaimName: mongodb-data-pvc # PVC name that want to take snapshot 29 | ``` 30 | 31 | Apply the YAML file: 32 | 33 | ```text 34 | kubectl apply -f example-snapshot.yaml 35 | ``` 36 | 37 | After VolumeSnapshot object created, a corresponding VolumeSnapshotContent will be created by IOMesh. 38 | 39 | ```bash 40 | kubectl get Volumesnapshots example-snapshot 41 | ``` 42 | 43 | ```output 44 | NAME SOURCEPVC RESTORESIZE SNAPSHOTCONTENT CREATIONTIME 45 | example-snapshot mongodb-data-pvc 6Gi snapcontent-fb64d696-725b-4f1b-9847-c95e25b68b13 10h 46 | ``` 47 | 48 | ## Restore 49 | 50 | User can restore volume snapshots by creating a PVC which `dataSource` field reference to a snapshot. 51 | 52 | For example: 53 | 54 | ```text 55 | restore.yaml 56 | ``` 57 | 58 | ```output 59 | apiVersion: v1 60 | kind: PersistentVolumeClaim 61 | metadata: 62 | name: example-restore 63 | spec: 64 | storageClassName: iomesh-csi-driver-default 65 | dataSource: 66 | name: example-snapshot 67 | kind: VolumeSnapshot 68 | apiGroup: snapshot.storage.k8s.io 69 | accessModes: 70 | - ReadWriteOnce 71 | resources: 72 | requests: 73 | storage: 6Gi 74 | ``` 75 | 76 | Apply the YAML file: 77 | 78 | ```bash 79 | kubectl apply -f example-restore.yaml 80 | ``` 81 | 82 | ## Clone 83 | 84 | Users can clone a volume by create a PVC while adding a dataSource that linked to an existing PVC in the same namespace. 85 | 86 | For example: 87 | 88 | ```yaml 89 | apiVersion: v1 90 | kind: PersistentVolumeClaim 91 | metadata: 92 | name: cloned-pvc 93 | spec: 94 | storageClassName: iomesh-csi-driver-default 95 | dataSource: 96 | name: existing-pvc # an existing PVC in the same namespace 97 | kind: PersistentVolumeClaim 98 | accessModes: 99 | - ReadWriteOnce 100 | resources: 101 | requests: 102 | storage: 5Gi 103 | volumeMode: Block 104 | ``` 105 | 106 | After applying it, a clone of `existing-pvc` will be created. 107 | 108 | There are some limitations on clone operation: 109 | 110 | 1. A cloned PVC must exist at the same namespace with the original PVC with same StorageClass. 111 | 2. The new and source PVC must have the same VolumeMode setting. 112 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.7/about-iomesh/introduction.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.7-introduction 3 | title: Introduction 4 | sidebar_label: Introduction 5 | original_id: introduction 6 | --- 7 | 8 | ## What is IOMesh? 9 | 10 | IOMesh is a distributed storage system specially designed for Kubernetes workloads, providing reliable persistent storage capabilities for containerized stateful applications such as MySQL, Cassandra, and MongoDB. 11 | 12 | - Thousands of Pods are created and destroyed every minute in Kubernetes clusters. IOMesh is built for this kind of highly dynamic and large-scale workloads in the cloud-native era. It is designed with this in mind from the beginning to provide the performance, reliability, and scalability required by cloud-native applications. 13 | - IOMesh runs natively on Kubernetes and fully utilizes the Kubernetes's capabilities. Therefore, the operation teams can leverage the standard Kubernetes APIs to uniformly manage the applications and IOMesh, which integrates perfectly with existing DevOps processes. 14 | - IOMesh enables users to start at a small scale and expand the storage at will by adding disks or nodes. 15 | 16 | ## Key Features 17 | 18 | ### High Performance 19 | Database is one of the key applications to measure storage performance. IOMesh performes very well in the database performance benchmark tests with stable read and write latency and high QPS and TPS, which means that it can provide stable data services. 20 | ### No Kernel Dependencies 21 | IOMesh runs entirely in user space and can provide reliable service through effective software fault isolation. When a problem occurs, other applications running on the same node can continue to run without causing the entire system to crash. In addition, deploying and maintaining the IOMesh are very easy since there is no need to install any kernel modules and you do not need to worry about the kernel version compatibility. 22 | ### Storage Performance Tiering 23 | IOMesh supports flexible deployment of hybrid disks including NVMe SSD, SATA SSD and HDD. This can help users make the most of their storage investment, and control the costs to each block while maximizing the storage performance. 24 | 25 | ## Architecture 26 | 27 | ![iomesh-architecture](https://user-images.githubusercontent.com/78140947/116510446-8499ea00-a8f7-11eb-9b1b-fc61a0fbf4b0.png) 28 | 29 | ## Compatibility List with Kubernetes 30 | 31 | | IOMesh Version | Kubernetes Version | 32 | | -------------- | ------------------ | 33 | | v0.9.x | v1.17 or higher | 34 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.7/additional-info/performance-testing.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.7-performance-testing 3 | title: Performance Testing 4 | sidebar_label: Performance Testing 5 | original_id: performance-testing 6 | --- 7 | 8 | ## FIO-based Performance Testing 9 | 10 | 1. Create a pod for fio test 11 | 12 | ```shell 13 | kubectl apply -f https://docs.iomesh.com/assets/iomesh-csi-driver/example/fio.yaml 14 | ``` 15 | 16 | 2. Wait until fio-pvc bound is finished and fio pod is ready 17 | 18 | ```shell 19 | kubectl get pvc fio-pvc 20 | ``` 21 | 22 | ```output 23 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 24 | fio-pvc Bound pvc-d7916b34-50cd-49bd-86f9-5287db1265cb 30Gi RWO iomesh-csi-driver-default 15s 25 | ``` 26 | 27 | ```shell 28 | kubectl wait --for=condition=Ready pod/fio 29 | ``` 30 | 31 | ```output 32 | pod/fio condition met 33 | ``` 34 | 35 | 3. Run fio tests 36 | 37 | ```shell 38 | kubectl exec -it fio sh 39 | fio --name fio --filename=/mnt/fio --bs=256k --rw=write --ioengine=libaio --direct=1 --iodepth=128 --numjobs=1 --size=$(blockdev --getsize64 /mnt/fio) 40 | fio --name fio --filename=/mnt/fio --bs=4k --rw=randread --ioengine=libaio --direct=1 --iodepth=128 --numjobs=1 --size=$(blockdev --getsize64 /mnt/fio) 41 | ``` 42 | 43 | 4. Clean up 44 | 45 | ```shell 46 | kubectl delete pod fio 47 | kubectl delete pvc fio-pvc 48 | # You need to delete pv when reclaimPolicy is Retain 49 | kubectl delete pvc-d7916b34-50cd-49bd-86f9-5287db1265cb 50 | ``` 51 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.7/iomesh-operations/monitoring.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.7-monitoring 3 | title: Monitoring 4 | sidebar_label: Monitoring 5 | original_id: monitoring 6 | --- 7 | 8 | IOMesh cluster can be monitored by Prometheus and Grafana. 9 | 10 | ## Integrating with Prometheus 11 | 12 | If Prometheus was installed by [Prometheus Operator][1] at the same Kubernetes cluster with IOMesh, just modify `iomesh-values.yaml` with: 13 | 14 | ```yaml 15 | serviceMonitor: 16 | create: true 17 | ``` 18 | 19 | Then upgrade the existing IOMesh Cluster: 20 | 21 | > **_NOTE_: You may replace `my-iomesh` with your release name.** 22 | 23 | ```bash 24 | helm upgrade --namespace iomesh-system my-iomesh iomesh/iomesh --values iomesh-values.yaml 25 | ``` 26 | 27 | The exporter will be created and metric data would be collected by Prometheus automatically. 28 | 29 | It is also possible to configure Prometheus mannually by importing [iomesh-prometheus-kubernetes-sd-example.yaml][4]. 30 | 31 | ## Integrating with Grafana 32 | 33 | Download and import [iomesh-dashboard.json][3] to any existing Grafana. 34 | 35 | [1]: https://github.com/prometheus-operator/prometheus-operator 36 | [2]: https://grafana.com/grafana/download 37 | [3]: https://raw.githubusercontent.com/iomesh/docs/master/docs/assets/iomesh-operation/ioemsh-dashobard.json 38 | [4]: https://raw.githubusercontent.com/iomesh/docs/master/docs/assets/iomesh-operation/iomesh-prometheus-kubernetes-sd-example.yaml 39 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.7/stateful-applications/iomesh-for-mongodb.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.7-iomesh-for-mongodb 3 | title: IOMesh for MongoDB 4 | sidebar_label: IOMesh for MongoDB 5 | original_id: iomesh-for-mongodb 6 | --- 7 | 8 | ## Setup k8s Cluster Storage 9 | 10 | 1. Create a file named `iomesh-mongodb-sc.yaml` with the following content: 11 | 12 | ```text 13 | iomesh-mongodb-sc.yaml 14 | ``` 15 | 16 | ```output 17 | kind: StorageClass 18 | apiVersion: storage.k8s.io/v1 19 | metadata: 20 | name: iomesh-mongodb-sc 21 | provisioner: com.iomesh.csi-driver # driver.name in values.yaml when install IOMesh 22 | reclaimPolicy: Retain 23 | allowVolumeExpansion: true 24 | parameters: 25 | csi.storage.k8s.io/fstype: "ext4" 26 | replicaFactor: "2" 27 | thinProvision: "true" 28 | ``` 29 | 30 | 2. Apply the yaml config: 31 | 32 | ```bash 33 | kubectl apply -f iomesh-mongodb-sc.yaml 34 | ``` 35 | 36 | ## Deploy MongoDB 37 | 38 | ### Create a headless Service for MongoDB 39 | 40 | 1. Create a Service used for DNS lookups between MongoDB Pods and clients within your cluster 41 | 42 | ```text 43 | mongodb-service.yaml 44 | ``` 45 | 46 | ```output 47 | apiVersion: v1 48 | kind: Service 49 | metadata: 50 | name: mongo 51 | labels: 52 | name: mongo 53 | spec: 54 | ports: 55 | - port: 27017 56 | targetPort: 27017 57 | clusterIP: None 58 | selector: 59 | role: mongo 60 | ``` 61 | 62 | 2. Apply the yaml config: 63 | 64 | ```bash 65 | kubectl apply -f mongodb-service.yaml 66 | ``` 67 | 68 | ### Create MongoDB cluster use pv provided for IOMesh Storage 69 | 70 | 1. Use StatefulSet to create a MongoDB cluster 71 | 72 | ```text 73 | mongodb-statefulset.yaml 74 | ``` 75 | 76 | ```output 77 | apiVersion: apps/v1beta1 78 | kind: StatefulSet 79 | metadata: 80 | name: mongo 81 | spec: 82 | selector: 83 | matchLabels: 84 | role: mongo 85 | environment: test 86 | serviceName: "mongo" 87 | replicas: 3 88 | template: 89 | metadata: 90 | labels: 91 | role: mongo 92 | environment: test 93 | spec: 94 | terminationGracePeriodSeconds: 10 95 | containers: 96 | - name: mongo 97 | image: mongo 98 | command: 99 | - mongod 100 | - "--replSet" 101 | - rs0 102 | - "--smallfiles" 103 | - "--noprealloc" 104 | ports: 105 | - containerPort: 27017 106 | volumeMounts: 107 | - name: mongo-persistent-storage 108 | mountPath: /data/db 109 | - name: mongo-sidecar 110 | image: cvallance/mongo-k8s-sidecar 111 | env: 112 | - name: MONGO_SIDECAR_POD_LABELS 113 | value: "role=mongo,environment=test" 114 | volumeClaimTemplates: 115 | - metadata: 116 | name: mongodb-data 117 | spec: 118 | accessModes: [ "ReadWriteOnce" ] 119 | storageClassName: iomesh-mongodb-sc # storageClass created above 120 | resources: 121 | requests: 122 | storage: 10Gi 123 | ``` 124 | 125 | 2. Apply the yaml config: 126 | 127 | ```bash 128 | kubectl apply -f mongodb-statefulset.yaml 129 | ``` 130 | 131 | IOMesh Storage will create Persistent Volumes for each cassandra pod. These volumes use the ext4 file system with a replica factor of 2 and are thin provisioned. 132 | 133 | ## Operate MongoDB Data 134 | 135 | User can use the feature provided by IOMesh storage to perform operations such as expansion/snapshot/rollback/clone of the Persistent Volumes where MongoDB data is located, see reference for details [application-operations](https://docs.iomesh.com/volume-operations/snapshot-restore-and-clone) 136 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.7/stateful-applications/iomesh-for-mysql.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.7-iomesh-for-mysql 3 | title: IOMesh for MySQL 4 | sidebar_label: IOMesh for MySQL 5 | original_id: iomesh-for-mysql 6 | --- 7 | 8 | ## Setup k8s Cluster Storage 9 | 10 | 1. Create a file named `iomesh-mysql-sc.yaml` with the following content: 11 | 12 | ```text 13 | iomesh-mysql-sc.yaml 14 | ``` 15 | 16 | ```output 17 | kind: StorageClass 18 | apiVersion: storage.k8s.io/v1 19 | metadata: 20 | name: iomesh-mysql-sc 21 | provisioner: com.iomesh.csi-driver # driver.name in values.yaml when install IOMesh cluster 22 | reclaimPolicy: Retain 23 | allowVolumeExpansion: true 24 | parameters: 25 | csi.storage.k8s.io/fstype: "ext4" 26 | replicaFactor: "2" 27 | thinProvision: "true" 28 | ``` 29 | 30 | 2. Apply the yaml config: 31 | 32 | ```bash 33 | kubectl apply -f iomesh-mysql-sc.yaml 34 | ``` 35 | 36 | ## Deploy MySQL 37 | 38 | 1. Create a file named `mysql-deployment.yaml`. It describes a Deployment that runs MySQL and creates a PVC that consumes the IOMesh storage. 39 | 40 | ```text 41 | mysql-deployment.yaml 42 | ``` 43 | 44 | ```output 45 | apiVersion: v1 46 | kind: PersistentVolumeClaim 47 | metadata: 48 | name: iomesh-mysql-pvc 49 | spec: 50 | storageClassName: iomesh-mysql-sc 51 | accessModes: 52 | - ReadWriteOnce 53 | resources: 54 | requests: 55 | storage: 10Gi 56 | --- 57 | apiVersion: v1 58 | kind: Service 59 | metadata: 60 | name: mysql 61 | spec: 62 | ports: 63 | - port: 3306 64 | selector: 65 | app: mysql 66 | clusterIP: None 67 | --- 68 | apiVersion: apps/v1 69 | kind: Deployment 70 | metadata: 71 | name: mysql 72 | spec: 73 | selector: 74 | matchLabels: 75 | app: mysql 76 | strategy: 77 | type: Recreate 78 | template: 79 | metadata: 80 | labels: 81 | app: mysql 82 | spec: 83 | containers: 84 | - image: mysql:5.6 85 | name: mysql 86 | env: 87 | # Use secret in real usage 88 | - name: MYSQL_ROOT_PASSWORD 89 | value: password 90 | ports: 91 | - containerPort: 3306 92 | name: mysql 93 | volumeMounts: 94 | - name: mysql-persistent-storage 95 | mountPath: /var/lib/mysql 96 | volumes: 97 | - name: mysql-persistent-storage 98 | persistentVolumeClaim: 99 | claimName: iomesh-mysql-pvc # pvc from iomesh created above 100 | ``` 101 | 102 | 2. Apply the yaml config: 103 | 104 | ```bash 105 | kubectl apply -f mysql-deployment.yaml 106 | ``` 107 | 108 | ## Operate MySQL Data 109 | 110 | User can use the feature provided by IOMesh storage to perform operations such as expansion/snapshot/rollback/clone of the pv where MySQL data is located, see reference for details [application-operations](https://docs.iomesh.com/volume-operations/snapshot-restore-and-clone) 111 | -------------------------------------------------------------------------------- /website/versioned_docs/version-0.9.7/volume-operations/create-volume.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-0.9.7-create-volume 3 | title: Create Volume 4 | sidebar_label: Create Volume 5 | original_id: create-volume 6 | --- 7 | 8 | A volume can be created by the following YAML. User should ensure that the corresponding StorageClass already exists. 9 | 10 | ```yaml 11 | apiVersion: v1 12 | kind: PersistentVolumeClaim 13 | metadata: 14 | name: iomesh-example-pvc 15 | spec: 16 | storageClassName: iomesh-example-sc 17 | accessModes: 18 | - ReadWriteOnce 19 | resources: 20 | requests: 21 | storage: 10Gi 22 | ``` 23 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v0.11.1/about-iomesh/introduction.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v0.11.1-introduction 3 | title: Introduction 4 | sidebar_label: Introduction 5 | original_id: introduction 6 | --- 7 | 8 | ## What is IOMesh? 9 | 10 | IOMesh is a distributed storage system specially designed for Kubernetes workloads, providing reliable persistent storage capabilities for containerized stateful applications such as MySQL, Cassandra, and MongoDB. 11 | 12 | - Thousands of Pods are created and destroyed every minute in Kubernetes clusters. IOMesh is built for this kind of highly dynamic and large-scale workloads in the cloud-native era. It is designed with this in mind from the beginning to provide the performance, reliability, and scalability required by cloud-native applications. 13 | - IOMesh runs natively on Kubernetes and fully utilizes the Kubernetes's capabilities. Therefore, the operation teams can leverage the standard Kubernetes APIs to uniformly manage the applications and IOMesh, which integrates perfectly with existing DevOps processes. 14 | - IOMesh enables users to start at a small scale and expand the storage at will by adding disks or nodes. 15 | 16 | ## Key Features 17 | 18 | ### High Performance 19 | Database is one of the key applications to measure storage performance. IOMesh performes very well in the database performance benchmark tests with low and stable read/write latencies and high QPS/TPS, meaning to provide stable data services. 20 | ### No Kernel Dependencies 21 | IOMesh runs entirely in user space and can provide reliable services through effective software fault isolation. When a problem occurs, other applications running at the same node can continue to run without causing entire system crash. In addition, it is very easy to deploy and maintain IOMesh since you don't need to install any kernel modules and don't need to worry about kernel version compatibility at all. 22 | ### Storage Performance Tiering 23 | IOMesh supports flexible deployment of hybrid disks including NVMe SSD, SATA SSD and HDD. This can help users make the most of their storage investment, minimize the cost of each block while maximizing the storage performance. 24 | 25 | ## Architecture 26 | 27 | ![IOMesh arch](https://user-images.githubusercontent.com/78140947/122766241-e2352c00-d2d3-11eb-9630-bb5b428c3178.png) 28 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v0.11.1/deploy/prerequisites.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v0.11.1-prerequisites 3 | title: Prerequisites 4 | sidebar_label: Prerequisites 5 | original_id: prerequisites 6 | --- 7 | 8 | ## Installation Requirements 9 | #### Kubernetes Cluster Requirements 10 | A Kubernetes (from v1.17 to v1.24) cluster with at least 3 worker nodes. 11 | 12 | #### CPU and Memory Requirements 13 | ##### CPU 14 | At least 6 cores on each worker node. 15 | 16 | ##### Memory 17 | At least 12GB RAM on each worker node. 18 | 19 | #### Disk Requirements 20 | ##### Cache Disk 21 | * All-flash mode: no configuration is required. 22 | * Hybrid-flash mode: there should be at least one available SSD on each worker node, and the SSD capacity should be larger than 60 GB. 23 | 24 | ##### Data Disk 25 | * All-flash mode: there should be at least one available SSD on each worker node, and the SSD capacity should be larger than 60 GB. 26 | * Hybrid-flash mode: there should be at least one available HDD on each worker node, and the HDD capacity should be larger than 60 GB. 27 | 28 | #### Network Requirements 29 | Network cards of 10GbE or above are required for the IOMesh storage network. 30 | 31 | #### Reserved System Space 32 | At least 100GB of disk space is required in the /opt directory on each worker node for storing the IOMesh cluster metadata. 33 | 34 | ## Worker Node Setup 35 | Follow the steps below to set up each Kubernetes worker node that runs IOMesh. 36 | 37 | ### Set Up Open-iSCSI 38 | 39 | 1. Install open-iscsi. 40 | 41 | 42 | 43 | 44 | 45 | ```shell 46 | sudo yum install iscsi-initiator-utils -y 47 | ``` 48 | 49 | 50 | 51 | ```shell 52 | sudo apt-get install open-iscsi -y 53 | ``` 54 | 55 | 56 | 57 | 2. Edit `/etc/iscsi/iscsid.conf` by setting `node.startup` to `manual`. 58 | 59 | ```shell 60 | sudo sed -i 's/^node.startup = automatic$/node.startup = manual/' /etc/iscsi/iscsid.conf 61 | ``` 62 | > **_NOTE_: The default value of the MaxRecvDataSegmentLength in /etc/iscsi/iscsi.conf is set at 32,768, and the maximum number of PVs is limited to 80,000 in IOMesh. To create PVs more than 80,000 in IOMesh, it is recommended to set the value of MaxRecvDataSegmentLength to 163,840 or above.** 63 | 64 | 3. Disable SELinux. 65 | 66 | ```shell 67 | sudo setenforce 0 68 | sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config 69 | ``` 70 | 71 | 4. Ensure `iscsi_tcp` kernel module is loaded. 72 | 73 | ```shell 74 | sudo modprobe iscsi_tcp 75 | sudo bash -c 'echo iscsi_tcp > /etc/modprobe.d/iscsi-tcp.conf' 76 | ``` 77 | 78 | 5. Start `iscsid` service. 79 | 80 | ```shell 81 | sudo systemctl enable --now iscsid 82 | ``` 83 | ### Set Up Local Metadata Store 84 | 85 | IOMesh stores metadata in the local path `/opt/iomesh`. Ensure that there is at least 100Gb of available space at `/opt`. 86 | 87 | ### Set Up Data Network 88 | 89 | To avoid contention on network bandwidth, set up a separate network for the IOMesh Cluster. The `dataCIDR` defines IP block for the IOMesh data network. Every node running IOMesh should have an interface with an IP address belonging to `dataCIDR`. 90 | 91 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.0/appendices/downloads.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.0-downloads 3 | title: Downloads 4 | sidebar_label: Downloads 5 | original_id: downloads 6 | --- 7 | 8 | 9 | IOMesh Offline Installation Package 10 | 11 | - Intel x86_64: 12 | - Download Link: 13 | 14 | 15 | - MD5: 16 | ``` 17 | 9e6dc20525e19ea67cf0f6bdfd3863d2 18 | ``` 19 | - Kunpeng AArch64: 20 | - Download Link: 21 | 22 | 23 | - MD5: 24 | ``` 25 | eaf369d8bfe307286f906aab64a1772f 26 | ``` 27 | 28 | - Hygon x86_64: 29 | - Download Link: 30 | 31 | 32 | - MD5: 33 | ``` 34 | 5a6dc308624c288cba4c286de4e4c724 35 | ``` 36 | 37 | IOMesh Cluster Dashboard File 38 | - Download Link: 39 | 40 | 41 | 42 | - MD5: 43 | ``` 44 | e063db897db783365ad476b8582c1534 45 | ``` 46 | 47 | 48 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.0/appendices/faq.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.0-faq 3 | title: FAQ 4 | sidebar_label: FAQ 5 | original_id: faq 6 | --- 7 | 8 | Q: Failed to pull docker image during IOMesh installation, and the error log reads: `Too Many Requests - Server message: toomanyrequests: You have reached your pull rate limit.` 9 | 10 | A: Log in to your Docker account on the worker node that experienced the above issue, or alternatively, update your account. This issue typically occurs during online installation as opposed to offline installation. 11 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.0/cluster-operations/uninstall-cluster.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.0-uninstall-cluster 3 | title: Uninstall Cluster 4 | sidebar_label: Uninstall Cluster 5 | original_id: uninstall-cluster 6 | --- 7 | 8 | >_ATTENTION_: After uninstalling the IOMesh cluster, all data will be lost including all PVCs in the IOMesh cluster. 9 | 10 | To uninstall the IOMesh cluster, run the following command: 11 | 12 | ```shell 13 | helm uninstall --namespace iomesh-system iomesh 14 | ``` 15 | 16 | If there are IOMesh resources left after uninstalling IOMesh due to network or other issues, run the following command to clear all IOMesh resources. 17 | ```shell 18 | curl -sSL https://iomesh.run/uninstall_iomesh.sh | sh - 19 | ``` 20 | 21 | 22 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.0/deploy-iomesh-cluster/activate-license.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.0-activate-license 3 | title: Activate License 4 | sidebar_label: Activate License 5 | original_id: activate-license 6 | --- 7 | 8 | IOMesh currently offers two editions: Community and Enterprise. They differ in the maximum number of worker nodes and level of business support provided. You can find more information on the IOMesh official website at https://www.iomesh.com/spec. 9 | 10 | IOMesh comes with a trial license when it is installed and deployed. However, it is recommended that you update the trial license to a subscription or perpetual license, depending on your IOMesh edition and how long you plan to use it. 11 | 12 | **Prerequisites** 13 | - **Community Edition**: Apply for the new license code at https://www.iomesh.com/license. 14 | - **Enterprise Edition**: Get the license code either of a subscription or perpetual license from SmartX sales. 15 | 16 | **Procedure** 17 | 18 | 1. Create a file `license-code.txt` and save the license code in it. 19 | 20 | 2. Create a Kubernetes Secret. 21 | 22 | ```bash 23 | kubectl create secret generic iomesh-authorization-code -n iomesh-system --from-file=authorizationCode=./license-code.txt 24 | ``` 25 | 3. Add the field `spec.licenseSecretName` or update it if it exists. Fill in the value `iomesh-authorization-code`, which is the Kubernetes Secret name created above. 26 | 27 | ```bash 28 | kubectl edit iomesh -n iomesh-system 29 | ``` 30 | 31 | ```output 32 | spec: 33 | licenseSecretName: iomesh-authorization-code 34 | ``` 35 | 36 | 4. Confirm the update, and whether the update is successful will be shown in the output. 37 | 38 | ```bash 39 | kubectl describe iomesh -n iomesh-system # Whether the update is successful will be displayed in the events. 40 | ``` 41 | If the update fails, verify if you have entered the correct license code. If it still does not work, reset the field `spec.licenseSecretName`. 42 | 43 | 5. Verify that the license expiration date and other details are as expected. 44 | 45 | ```bash 46 | kubectl get iomesh -n iomesh-system -o=jsonpath='{.items[0].status.license}' 47 | ``` 48 | 49 | 50 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.0/deploy-iomesh-cluster/prerequisites.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.0-prerequisites 3 | title: Prerequisites 4 | sidebar_label: Prerequisites 5 | original_id: prerequisites 6 | --- 7 | 8 | Before installing and deploying IOMesh, verify the following requirements. 9 | 10 | > _NOTE:_ Expanding an IOMesh cluster to multiple clusters is not currently supported. You should decide at the beginning whether to deploy one or multiple clusters. For multi-cluster deployment and operations, refer to [Multiple Cluster Management](../advanced-functions/manage-multiple-cluster.md). 11 | 12 | ## Cluster Requirements 13 | 14 | - A Kubernetes or OpenShift cluster with minimum 3 worker nodes. 15 | - The Kubernetes version should be 1.17-1.25 or OpenShift version should be 3.11-4.10. 16 | 17 | ## Hardware Requirements 18 | 19 | Ensure that each worker node has the following hardware configurations, and note that IOMesh Community and Enterprise editions have the same hardware requirements. 20 | 21 | **CPU** 22 | 23 | - The CPU architecture should be Intel x86_64, Kunpeng AArch64, or Hygon x86_64. 24 | - At least 8 cores for each worker node. 25 | 26 | **Memory** 27 | 28 | - At least 16 GB on each worker node. 29 | 30 | **Storage Controller** 31 | 32 | - SAS HBA or RAID cards that support passthrough mode (JBOD). 33 | 34 | **OS Disk** 35 | 36 | - An SSD with at least 100 GB of free space in the `/opt` directory for storing IOMesh metadata. 37 | 38 | **Data & Cache Disk** 39 | 40 | Depends on whether the storage architecture is tiered storage or non-tiered storage. 41 | 42 | |Architecture|Description| 43 | |---|---| 44 | |Tiered Storage| Faster storage media for cache and slower storage media for capacity. For example, use faster NVMe SSDs as cache disks and slower SATA SSDs or HDDs as data disks.| 45 | |Non-Tiered Storage|Cache disks are not required. All disks except the physical disk containing the system partition are used as data disks.| 46 | 47 | In IOMesh 1.0, hybrid mode is only supported for tiered storage and all-flash mode for non-tiered storage. 48 | 49 | |Deployment Mode|Disk Requirements| 50 | |---|---| 51 | |Hybrid Mode|
  • Cache Disk: At least 1 SATA SSD, SAS SSD or NVMe SSD, and the capacity must be greater than 60 GB.
  • Data Disk: At least 1 SATA HDD or SAS HDD.
  • The total SSD capacity should be 10% to 20% of total HDD capacity.
| 52 | |All-Flash Mode|At least 1 SSD with a capacity greater than 60G.| 53 | 54 | **NIC** 55 | 56 | - Each worker node should have at least one 10/25 GbE NIC. 57 | 58 | ## Network Requirements 59 | 60 | To prevent network bandwidth contention, create a dedicated storage network for IOMesh or leverage an existing network. 61 | 62 | - Plan a CIDR for IOMesh storage network. The IP of each worker node running IOMesh should be within that CIDR. 63 | - The ping latency of the IOMesh storage network should below 1 ms. 64 | - All worker nodes must be connected to the L2 layer network. 65 | 66 | 67 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.0/deploy-iomesh-cluster/setup-worker-node.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.0-setup-worker-node 3 | title: Set Up Worker Node 4 | sidebar_label: Set Up Worker Node 5 | original_id: setup-worker-node 6 | --- 7 | 8 | Once you have confirmed that all requirements in [Prerequisites](../deploy-iomesh-cluster/prerequisites) are met, set up `open-iscsi` for each worker node on which IOMesh will be installed and running. 9 | 10 | It is important to note that if you intend for IOMesh to provide storage to other nodes within this Kubernetes cluster, you will also need to set up `open-iscsi` for those nodes. 11 | 12 | 1. On the node console, run the following command to install `open-iscsi`. 13 | 14 | 15 | 16 | 17 | 18 | ```shell 19 | sudo yum install iscsi-initiator-utils -y 20 | ``` 21 | 22 | 23 | 24 | ```shell 25 | sudo apt-get install open-iscsi -y 26 | ``` 27 | 28 | 29 | 30 | 2. Edit the file `/etc/iscsi/iscsid.conf`. Set the value of the field `node.startup` to `manual`. 31 | 32 | ```shell 33 | sudo sed -i 's/^node.startup = automatic$/node.startup = manual/' /etc/iscsi/iscsid.conf 34 | ``` 35 | > _Note:_ 36 | > The value of `MaxRecvDataSegmentLength` in `/etc/iscsi/iscsi.conf` is set at 32,768 by default, and the maximum number of PVs is limited to 80,000 in IOMesh. To create PVs more than 80,000 in IOMesh, it is recommended to set the value of `MaxRecvDataSegmentLength` to 163,840 or above. 37 | 38 | 3. Disable SELinux. 39 | 40 | ```shell 41 | sudo setenforce 0 42 | sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config 43 | ``` 44 | 45 | 4. Load `iscsi_tcp` kernel module. 46 | 47 | ```shell 48 | sudo modprobe iscsi_tcp 49 | sudo bash -c 'echo iscsi_tcp > /etc/modprobe.d/iscsi-tcp.conf' 50 | ``` 51 | 52 | 5. Start `iscsid` service. 53 | 54 | ```shell 55 | sudo systemctl enable --now iscsid 56 | ``` 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.0/introduction/introduction.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.0-introduction 3 | title: Introduction 4 | sidebar_label: Introduction 5 | original_id: introduction 6 | --- 7 | 8 | ## What is IOMesh? 9 | 10 | IOMesh is a Kubernetes-native storage system that manages storage resources within a Kubernetes cluster, automates operations and maintenance, and provides persistent storage, data protection and migration capabilities for data applications such as MySQL, Cassandra, MongoDB and middleware running on Kubernetes. 11 | 12 | ## Key Features 13 | 14 | **Kubernetes Native** 15 | 16 | IOMesh is fully built on the capabilities of Kubernetes and implements storage as code through declarative APIs, allowing for managing infrastructure and deployment environments through code to better support DevOps. 17 | 18 | **High Performance** 19 | 20 | IOMesh enables I/O-intensive databases and applications to run efficiently in the container environment. Leveraging the high-performance I/O link, IOMesh achieves high IOPS while maintaining low latency to ensure stable operation of data applications. 21 | 22 | **No Kernel Dependencies** 23 | 24 | IOMesh runs in user space rather than kernel space, isolated from other applications. This means if IOMesh fails, other applications on the same node can continue delivering services as usual without affecting the entire system. Since it is kernel independent, there is no need to install kernel modules or worry about compatibility issues. 25 | 26 | **Tiered Storage** 27 | 28 | IOMesh facilitates cost-effective, hybrid deployment of SSDs & HDDs, maximizing storage performance and capacity for different media while reducing storage costs from the outset. 29 | 30 | **Data Protection & Security** 31 | 32 | A system with multiple levels of data protection makes sure that data is always secure and available. IOMesh does this by placing multiple replicas on different nodes, allowing PV-level snapshots for easy recovery in case of trouble, while also isolating abnormal disks to minimize impact on system performance and reduce operational burden. Authentication is also provided for specific PVs to ensure secure access. 33 | 34 | **Fully Integrated into Kubernetes Ecosystem** 35 | 36 | IOMesh flexibly provides storage for stateful applications via CSI even when they are migrated. It also works seamlessly with the Kubernetes toolchain, easily deploying IOMesh using Helm Chart and integrating with Prometheus and Grafana to provide standardized, visualized monitoring and alerting service. 37 | 38 | ## Architecture 39 | ![IOMesh arch](https://user-images.githubusercontent.com/78140947/122766241-e2352c00-d2d3-11eb-9630-bb5b428c3178.png) 40 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.0/volume-operations/clone-pv.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.0-clone-pv 3 | title: Clone PV 4 | sidebar_label: Clone PV 5 | original_id: clone-pv 6 | --- 7 | 8 | To clone a PV, you should create a new PVC and specify an existing PVC in the field `dataSource` so that you can clone a volume based on it. 9 | 10 | **Precautions** 11 | - The target PVC must be in the same namespace as the source PVC. 12 | - The target PVC must have the same StorageClass and volume mode as the source PVC. 13 | - The capacity of the target PVC must match the capacity of the source PVC. 14 | 15 | **Prerequisite** 16 | 17 | Verify that there is already a PVC available for cloning. 18 | 19 | **Procedure** 20 | 1. Create a YAML config `clone.yaml`. Specify the source PVC in the field `name`. 21 | 22 | ```yaml 23 | # Source: clone.yaml 24 | apiVersion: v1 25 | kind: PersistentVolumeClaim 26 | metadata: 27 | name: cloned-pvc 28 | spec: 29 | storageClassName: iomesh-csi-driver # The StorageClass must be the same as that of the source PVC. 30 | dataSource: 31 | name: iomesh-example-pvc # Specify the source PVC that should be from the same namespace as the target PVC. 32 | kind: PersistentVolumeClaim 33 | accessModes: 34 | - ReadWriteOnce 35 | resources: 36 | requests: 37 | storage: 10Gi # The capacity value must be the same as that of the source PVC. 38 | volumeMode: Filesystem # The volume mode must be the same as that of the source PVC. 39 | ``` 40 | 41 | 2. Apply the YAML config. Once done, a clone of `existing-pvc` will be created. 42 | 43 | ```bash 44 | kubectl apply -f clone.yaml 45 | ``` 46 | 47 | 3. Check the new PVC. 48 | 49 | ``` 50 | kubectl get pvc cloned-pvc 51 | ``` 52 | If successful, you should see output below: 53 | ```output 54 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 55 | cloned-pvc Bound pvc-161b8c15-3b9f-4742-95db-dcd69c9a2931 10Gi RWO iomesh-csi-driver 12s 56 | ``` 57 | 4. Get the cloned PV. 58 | ```shell 59 | kubectl get pv pvc-161b8c15-3b9f-4742-95db-dcd69c9a2931 # The PV name you get in Step 3. 60 | ``` 61 | 62 | If successful, you should see output below: 63 | ```output 64 | NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE 65 | pvc-161b8c15-3b9f-4742-95db-dcd69c9a2931 10Gi RWO Delete Bound default/cloned-pvc iomesh-csi-driver 122m 66 | ``` -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.0/volume-operations/create-pv.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.0-create-pv 3 | title: Create PV 4 | sidebar_label: Create PV 5 | original_id: create-pv 6 | --- 7 | 8 | To create a PV, you need to first create a PVC. Once done, IOMesh will detect the creation of the PVC and automatically generate a new PV based on its specs, binding them together. Then the pair of PV and PVC will be ready for use. 9 | 10 | > _NOTE:_ IOMesh supports access modes `ReadWriteOnce`,`ReadWriteMany`,and `ReadOnlyMany`, but `ReadWriteMany` and `ReadOnlyMany` are only for PVs with `volumemode` as Block. 11 | 12 | **Prerequisite** 13 | 14 | Ensure that there is already a StorageClass available for use. 15 | 16 | **Procedure** 17 | 1. Create a YAML config `pvc.yaml`. Configure the fields `accessModes`, `storage`, and `volumeMode`. 18 | 19 | ```yaml 20 | # Source: pvc.yaml 21 | apiVersion: v1 22 | kind: PersistentVolumeClaim 23 | metadata: 24 | name: iomesh-example-pvc 25 | spec: 26 | storageClassName: iomesh-csi-driver 27 | accessModes: 28 | - ReadWriteOnce # Specify the access mode. 29 | resources: 30 | requests: 31 | storage: 10Gi # Specify the storage value. 32 | volumeMode: Filesystem # Specify the volume mode. 33 | ``` 34 | 35 | For details, refer to [Kubernetes Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). 36 | 37 | 2. Apply the YAML config to create the PVC. Once done, the corresponding PV will be created. 38 | 39 | ``` 40 | kubectl apply -f pvc.yaml 41 | ``` 42 | 43 | 3. Verify that the PVC was created. 44 | 45 | ``` 46 | kubectl get pvc iomesh-example-pvc 47 | ``` 48 | If successful, you should see output like this: 49 | ```output 50 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 51 | iomesh-example-pvc Bound pvc-34230f3f-47dc-46e8-8c42-38c073c40598 10Gi RWO iomesh-csi-driver 21h 52 | ``` 53 | 54 | 4. View the PV bound to this PVC. You can find the PV name from the PVC output. 55 | 56 | ``` 57 | kubectl get pv pvc-34230f3f-47dc-46e8-8c42-38c073c40598 58 | ``` 59 | If successful, you should see output like this: 60 | ```output 61 | NAME CAPACITY RECLAIM POLICY STATUS CLAIM STORAGECLASS 62 | pvc-34230f3f-47dc-46e8-8c42-38c073c40598 10Gi Delete Bound default/iomesh-example-pvc iomesh-csi-driver 63 | ``` 64 | 65 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.0/volume-operations/create-storageclass.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.0-create-storageclass 3 | title: Create StorageClass 4 | sidebar_label: Create StorageClass 5 | original_id: create-storageclass 6 | --- 7 | 8 | IOMesh provides a default StorageClass `iomesh-csi-driver` that provides PVs for pods through dynamic volume provisioning. Its parameters adopt the default values in the table below and cannot be modified. If you want a StorageClass with custom parameters, refer to the following. 9 | 10 | | Field |Description|Default (`iomesh-csi-driver`)| 11 | |---|---|---| 12 | |`provisioner`| The provisioner that determines what volume plugin is used for provisioning PVs. |`com.iomesh.csi-driver`| 13 | |`reclaimPolicy`|

Determines whether PV is retained when the PVC is deleted.

`Delete`: When PVC is deleted, PV and the corresponding IOMesh volume will be deleted.

`Retain`: When PVC is deleted, PV and the corresponding IOMesh volume will be retained.|`delete`| 14 | |`allowVolumeExpansion`|Shows if volume expansion support is enabled.| `true`| 15 | |`csi.storage.k8s.io/fstype`|

The filesystem type, including

`xfs`, `ext2`, `ext3`, `ext4`|`ext4`| 16 | |`replicaFactor` | The number of replicas for PVs, either `2` or `3`|`2`.| 17 | | `thinProvision` |

Shows the provisioning type.

`true` for thin provisioning.

`false` for thick provisioning.

|`true`| 18 | 19 | 20 | **Procedure** 21 | 22 | 1. Create a YAML config `sc.yaml` and configure the parameters as needed. 23 | 24 | ```yaml 25 | # Source: sc.yaml 26 | kind: StorageClass 27 | apiVersion: storage.k8s.io/v1 28 | metadata: 29 | name: iomesh-example-sc 30 | provisioner: com.iomesh.csi-driver 31 | reclaimPolicy: Delete # Specify the reclaim policy. 32 | allowVolumeExpansion: true 33 | parameters: 34 | # Specify the filesystem type, including "ext4", "ext3", "ext2", and "xfs". 35 | csi.storage.k8s.io/fstype: "ext4" 36 | # Specify the replication factor, either "2" or "3". 37 | replicaFactor: "2" 38 | # Specify the provisioning type. 39 | thinProvision: "true" 40 | volumeBindingMode: Immediate 41 | ``` 42 | 43 | 2. Apply the YAML config to create the StorageClass. 44 | 45 | ``` 46 | kubectl apply -f sc.yaml 47 | ``` 48 | 49 | 3. View the newly created StorageClass. 50 | 51 | ``` 52 | kubectl get storageclass iomesh-example-sc 53 | ``` 54 | After running the command, you should see an example like: 55 | ```output 56 | NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE 57 | iomesh-example-sc com.iomesh.csi-driver Delete Immediate true 24h 58 | ``` 59 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.0/volume-operations/expand-pv.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.0-expand-pv 3 | title: Expand PV 4 | sidebar_label: Expand PV 5 | original_id: expand-pv 6 | --- 7 | 8 | To expand the capacity of a PV, you only need to modify the field `storage` in its corresponding PVC. 9 | 10 | **Prerequisite** 11 | 12 | The StorageClass must have `allowVolumeExpansion` set to true. The default StorageClass `iomesh-csi-driver` already does this. If a StorageClass is created and configured with custom parameters, verify that its `allowVolumeExpansion` is set to `true`. 13 | 14 | **Procedure** 15 | 16 | The following example assumes a YAML config `pvc.yaml` that points to a PVC `iomesh-example-pvc` with a capacity of `10Gi`. 17 | ```yaml 18 | # Source: pvc.yaml 19 | apiVersion: v1 20 | kind: PersistentVolumeClaim 21 | metadata: 22 | name: iomesh-example-pvc 23 | spec: 24 | storageClassName: iomesh-csi-driver 25 | accessModes: 26 | - ReadWriteOnce 27 | resources: 28 | requests: 29 | storage: 10Gi # The original capacity of the PVC. 30 | ``` 31 | 32 | 1. Get the PVC. 33 | 34 | ```bash 35 | kubectl get pvc iomesh-example-pvc 36 | ``` 37 | 38 | If successful, you should see output below: 39 | 40 | ```output 41 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 42 | iomesh-example-pvc Bound pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca 10Gi RWO iomesh-csi-driver 11m 43 | ``` 44 | 45 | 2. Access `pvc.yaml`. Then set the field `storage` to a new value. 46 | ```yaml 47 | apiVersion: v1 48 | kind: PersistentVolumeClaim 49 | metadata: 50 | name: iomesh-example-pvc 51 | spec: 52 | storageClassName: iomesh-csi-driver 53 | accessModes: 54 | - ReadWriteOnce 55 | resources: 56 | requests: 57 | storage: 20Gi # The new value must be greater than the original one. 58 | ``` 59 | 60 | 3. Apply the modification. 61 | 62 | ```bash 63 | kubectl apply -f pvc.yaml 64 | ``` 65 | 66 | 4. View the PVC and its corresponding PV. 67 | 68 | > **_NOTE_:** The PV capacity will be changed to the new value, but the capacity value in the PVC will remain the same until it is actually used by the pod. 69 | 70 | ```bash 71 | kubectl get pvc iomesh-example-pvc 72 | ``` 73 | 74 | If successful, you should see output below. 75 | 76 | ```output 77 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 78 | iomesh-example-pvc Bound pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca 10Gi RWO iomesh-csi-driver 11m 79 | ``` 80 | 81 | 5. Verify that the PV capacity was expanded. You can find the PV name from the PVC output. 82 | 83 | ```bash 84 | kubectl get pv pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca # The PV name you get in Step 4. 85 | ``` 86 | 87 | If successful, you should see output below: 88 | ```output 89 | NAME CAPACITY RECLAIM POLICY STATUS CLAIM STORAGECLASS 90 | pvc-b2fc8425-9dbc-4204-8240-41cb4a7fa8ca 20Gi Delete Bound default/iomesh-example-pvc iomesh-csi-driver 91 | ``` -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.0/volumesnapshot-operations/create-snapshotclass.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.0-create-snapshotclass 3 | title: Create VolumeSnapshotClass 4 | sidebar_label: Create VolumeSnapshotClass 5 | original_id: create-snapshotclass 6 | --- 7 | 8 | A VolumeSnapshot is a snapshot of an existing PV on the storage system, and each VolumeSnapshot is bound to a SnapshotClass that describes the class of snapshots when provisioning a VolumeSnapshot. 9 | 10 | To create a VolumeSnaphotClass, refer to the following: 11 | 12 | |Field|Description|Value| 13 | |---|---|---| 14 | |`driver`|The driver that determines what CSI volume plugin is used for provisioning VolumeSnapshots.|`com.iomesh.csi-driver`| 15 | |[`deletionPolicy`](https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes/#deletionpolicy)|Allows you to configure what happens to the VolumeSnapshotContent when the VolumeSnapshot object is to be deleted.| `Delete`| 16 | 17 | **Procedure** 18 | 19 | 1. Create a YAML config `snc.yaml` and configure the fields `driver` and `deletionPolicy`. 20 | 21 | ```yaml 22 | # Source: snc.yaml 23 | apiVersion: snapshot.storage.k8s.io/v1 24 | kind: VolumeSnapshotClass 25 | metadata: 26 | name: iomesh-csi-driver 27 | driver: com.iomesh.csi-driver # The driver in iomesh.yaml. 28 | deletionPolicy: Delete # "Delete" is recommended. 29 | ``` 30 | 31 | 2. Apply the YAML config to create the VolumeSnapshotClass. 32 | 33 | ``` 34 | kubectl apply -f snc.yaml 35 | ``` 36 | 37 | 3. Get the VolumeSnapshotClass. 38 | 39 | ``` 40 | kubectl get volumesnapshotclass iomesh-csi-driver 41 | ``` 42 | 43 | If successful, you should see output like this: 44 | ```output 45 | NAME DRIVER DELETIONPOLICY AGE 46 | iomesh-csi-driver com.iomesh.csi-driver Delete 24s 47 | ``` 48 | 49 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.0/volumesnapshot-operations/create-volumesnapshot.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.0-create-volumesnapshot 3 | title: Create VolumeSnapshot 4 | sidebar_label: Create VolumeSnapshot 5 | original_id: create-volumesnapshot 6 | --- 7 | 8 | A VolumeSnapshot is a request for snapshot of a volume and similar to a PVC, while a VolumeSnapshotContent is the snapshot taken from a volume provisioned in the cluster. 9 | 10 | **Prerequisite** 11 | 12 | Ensure that there is already a SnapshotClass. 13 | 14 | **Procedure** 15 | 16 | 1. Create a YAML config `snapshot.yaml`. Specify the SnapshotClass and PVC. 17 | 18 | ```yaml 19 | # Source: snapshot.yaml 20 | apiVersion: snapshot.storage.k8s.io/v1 21 | kind: VolumeSnapshot 22 | metadata: 23 | name: example-snapshot 24 | spec: 25 | volumeSnapshotClassName: iomesh-csi-driver # Specify a SnapshotClass such as `iomesh-csi-driver`. 26 | source: 27 | persistentVolumeClaimName: mongodb-data-pvc # Specify the PVC for which you want to take a snapshot such as `mongodb-data-pvc`. 28 | ``` 29 | 2. Apply the YAML config to create a VolumeSnapshot. 30 | 31 | ```bash 32 | kubectl apply -f snapshot.yaml 33 | ``` 34 | 35 | 3. When the VolumeSnapshot is created, the corresponding VolumeSnapshotContent will be created by IOMesh. Run the following command to verify that they were both created. 36 | 37 | ```bash 38 | kubectl get Volumesnapshots example-snapshot 39 | ``` 40 | 41 | If successful, you should see output like this: 42 | 43 | ```output 44 | NAME SOURCEPVC RESTORESIZE SNAPSHOTCONTENT CREATIONTIME 45 | example-snapshot mongodb-data-pvc 6Gi snapcontent-fb64d696-725b-4f1b-9847-c95e25b68b13 10h 46 | ``` 47 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.0/volumesnapshot-operations/restore-volumesnapshot.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.0-restore-volumesnapshot 3 | title: Restore VolumeSnapshot 4 | sidebar_label: Restore VolumeSnapshot 5 | original_id: restore-volumesnapshot 6 | --- 7 | 8 | Restoring a VolumeSnapshot means creating a PVC while specifying the `dataSource` field referencing to the target snapshot. 9 | 10 | **Precaution** 11 | - The new PVC must have the same access mode as the VolumeSnapshot. 12 | - The new PVC must have the same storage value as the VolumeSnapshot. 13 | 14 | **Procedure** 15 | 16 | 1. Create a YAML config `restore.yaml`. Specify the field `dataSource.name`. 17 | 18 | ```yaml 19 | # Source: restore.yaml 20 | apiVersion: v1 21 | kind: PersistentVolumeClaim 22 | metadata: 23 | name: example-restore 24 | spec: 25 | storageClassName: iomesh-csi-driver 26 | dataSource: 27 | name: example-snapshot # Specify the VolumeSnapshot. 28 | kind: VolumeSnapshot 29 | apiGroup: snapshot.storage.k8s.io 30 | accessModes: 31 | - ReadWriteOnce # Must be same as the access mode in the VolumeSnapshot. 32 | resources: 33 | requests: 34 | storage: 6Gi # Must be same as the storage value in the VolumeSnapshot. 35 | ``` 36 | 37 | 2. Apply the YAML config to create the PVC. 38 | 39 | ```bash 40 | kubectl apply -f restore.yaml 41 | ``` 42 | 3. Check the PVC. A PV will be created and bounded to this PVC. 43 | 44 | ``` 45 | kubectl get pvc example-restore 46 | ``` 47 | If successful, you should see output like this: 48 | ```output 49 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 50 | example-restore Bound pvc-54230f3f-47dc-46e8-8c42-38c073c40598 6Gi RWO iomesh-csi-driver 21h 51 | ``` 52 | 4. View the PV. You can find the PV name from the PVC output. 53 | ```bash 54 | kubectl get pv pvc-54230f3f-47dc-46e8-8c42-38c073c40598 # The PV name you get in Step 3. 55 | ``` 56 | ```output 57 | NAME CAPACITY RECLAIM POLICY STATUS CLAIM STORAGECLASS 58 | pvc-54230f3f-47dc-46e8-8c42-38c073c40598 6Gi Delete Bound example-restore iomesh-csi-driver 59 | ``` -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.1/appendices/downloads.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.1-downloads 3 | title: Downloads 4 | sidebar_label: Downloads 5 | original_id: downloads 6 | --- 7 | 8 | 9 | IOMesh Offline Installation Package 10 | 11 | - Intel x86_64: 12 | - Download Link: 13 | 14 | 15 | - MD5: 16 | ``` 17 | 0c5e40ecb6780b8533be49e918afc767 18 | ``` 19 | - Kunpeng AArch64: 20 | - Download Link: 21 | 22 | 23 | - MD5: 24 | ``` 25 | 5a841691fd568d1af9eed53d7a0f5a72 26 | ``` 27 | 28 | - Hygon x86_64: 29 | - Download Link: 30 | 31 | 32 | - MD5: 33 | ``` 34 | d4774a94a81bfc0b5b10684190126bd7 35 | ``` 36 | 37 | IOMesh Cluster Dashboard File 38 | - Download Link: 39 | 40 | 41 | 42 | - MD5: 43 | ``` 44 | e063db897db783365ad476b8582c1534 45 | ``` 46 | 47 | 48 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.1/appendices/setup-worker-node.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.1-setup-worker-node 3 | title: Set Up Worker Node 4 | sidebar_label: Set Up Worker Node 5 | original_id: setup-worker-node 6 | --- 7 | 8 | Before setting up `open-iscsi` for the worker nodes, ensure all requirements in [Prerequisites](../deploy-iomesh-cluster/prerequisites) are met. 9 | 10 | 1. On the node console, run the following command to install `open-iscsi`. 11 | 12 | 13 | 14 | 15 | 16 | ```shell 17 | sudo yum install iscsi-initiator-utils -y 18 | ``` 19 | 20 | 21 | 22 | ```shell 23 | sudo apt-get install open-iscsi -y 24 | ``` 25 | 26 | 27 | 28 | 2. Edit the file `/etc/iscsi/iscsid.conf`. Set the value of the field `node.startup` to `manual`. 29 | 30 | ```shell 31 | sudo sed -i 's/^node.startup = automatic$/node.startup = manual/' /etc/iscsi/iscsid.conf 32 | ``` 33 | > _Note:_ 34 | > The value of `MaxRecvDataSegmentLength` in `/etc/iscsi/iscsi.conf` is set at 32,768 by default, and the maximum number of PVs is limited to 80,000 in IOMesh. To create PVs more than 80,000 in IOMesh, it is recommended to set the value of `MaxRecvDataSegmentLength` to 163,840 or above. 35 | 36 | 3. Disable SELinux. 37 | 38 | ```shell 39 | sudo setenforce 0 40 | sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config 41 | ``` 42 | 43 | 4. Load `iscsi_tcp` kernel module. 44 | 45 | ```shell 46 | sudo modprobe iscsi_tcp 47 | sudo bash -c 'echo iscsi_tcp > /etc/modprobe.d/iscsi-tcp.conf' 48 | ``` 49 | 50 | 5. Start `iscsid` service. 51 | 52 | ```shell 53 | sudo systemctl enable --now iscsid 54 | ``` 55 | 56 | 57 | 58 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.1/deploy-iomesh-cluster/prerequisites.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.1-prerequisites 3 | title: Prerequisites 4 | sidebar_label: Prerequisites 5 | original_id: prerequisites 6 | --- 7 | 8 | Before installing and deploying IOMesh, verify the following requirements. 9 | 10 | > _NOTE:_ Expanding an IOMesh cluster to multiple clusters is not currently supported. You should decide at the beginning whether to deploy one or multiple clusters. For multi-cluster deployment and operations, refer to [Multiple Cluster Management](../advanced-functions/manage-multiple-cluster.md). 11 | 12 | ## Cluster Requirements 13 | 14 | - A Kubernetes or OpenShift cluster with minimum three worker nodes. 15 | - The Kubernetes version should be 1.17-1.25 or OpenShift version should be 3.11-4.10. 16 | 17 | ## Hardware Requirements 18 | 19 | Ensure that each worker node has the following hardware configurations, and note that IOMesh Community and Enterprise editions have the same hardware requirements. 20 | 21 | **CPU** 22 | 23 | - The CPU architecture should be Intel x86_64, Kunpeng AArch64, or Hygon x86_64. 24 | - At least eight cores for each worker node. 25 | 26 | **Memory** 27 | 28 | - At least 16 GB on each worker node. 29 | 30 | **Storage Controller** 31 | 32 | - SAS HBA or RAID cards that support passthrough mode (JBOD). 33 | 34 | **OS Disk** 35 | 36 | - An SSD with at least 100 GB of free space in the `/opt` directory for storing IOMesh metadata. 37 | 38 | **Data & Cache Disk** 39 | 40 | Depends on whether the storage architecture is tiered storage or non-tiered storage. 41 | 42 | |Architecture|Description| 43 | |---|---| 44 | |Tiered Storage| Faster storage media for cache and slower storage media for capacity. For example, use faster NVMe SSDs as cache disks and slower SATA SSDs or HDDs as data disks.| 45 | |Non-Tiered Storage|Cache disks are not required. All disks except the physical disk containing the system partition are used as data disks.| 46 | 47 | In this release, hybrid mode is only supported for tiered storage and all-flash mode for non-tiered storage. 48 | 49 | |Deployment Mode|Disk Requirements| 50 | |---|---| 51 | |Hybrid Mode|
  • Cache Disk: At least one SATA SSD, SAS SSD or NVMe SSD, and the capacity must be greater than 60 GB.
  • Data Disk: At least one SATA HDD or SAS HDD.
  • The total SSD capacity should be 10% to 20% of total HDD capacity.
| 52 | |All-Flash Mode|At least one SSD with a capacity greater than 60G.| 53 | 54 | **NIC** 55 | 56 | - Each worker node should have at least one 10/25 GbE NIC. 57 | 58 | ## Network Requirements 59 | 60 | To prevent network bandwidth contention, create a dedicated storage network for IOMesh or leverage an existing network. 61 | 62 | - Plan a CIDR for IOMesh storage network. The IP of each worker node running IOMesh should be within that CIDR. 63 | - The ping latency of the IOMesh storage network should below 1 millisecond. 64 | - All worker nodes must be connected to the L2 layer network. 65 | 66 | 67 | -------------------------------------------------------------------------------- /website/versioned_docs/version-v1.0.2/cluster-operations/scale-down-cluster.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: version-v1.0.2-scale-down-cluster 3 | title: Scale Down Cluster 4 | sidebar_label: Scale Down Cluster 5 | original_id: scale-down-cluster 6 | --- 7 | 8 | You can scale down the IOMesh cluster by removing chunk pods in the Kubernetes worker nodes. 9 | 10 | **Precautions** 11 | - You can only delete chunk pods. Deleting meta pods is not supported. 12 | - You can only remove one chunk pod at a time. 13 | - Each chunk pod is created sequentially and given a unique number by `StatefulSet`, and you should remove them in reverse creation order. For example, if there are 5 chunk pods `iomesh-chunk-0`, `iomesh-chunk-1`, `iomesh-chunk-2`, `iomesh-chunk-3`, `iomesh-chunk-4`, deletion should start with `iomesh-chunk-4`. 14 | 15 | 16 | **Procedure** 17 | 18 | The following example reduces the number of chunk pods by removing `iomesh-chunk-2` on the node `k8s-worker-2`. 19 | 20 | 1. Run the `ip a` command on the `k8s-worker-2` node to obtain the unique IP within the data CIDR. Assume the IP is `192.168.29.23`. 21 | 22 | 2. Run the following command. Locate the `status.summary.chunkSummary.chunks` field and find the ID of `chunks` whose IP is `192.168.29.23`. 23 | ```shell 24 | kubectl get iomesh iomesh -n iomesh-system -o yaml 25 | ``` 26 | ```yaml 27 | chunks: 28 | - id: 2 # The chunk ID. 29 | ip: 192.168.29.23 30 | ``` 31 | 32 | 3. Get the meta leader pod name. 33 | ```shell 34 | kubectl get pod -n iomesh-system -l=iomesh.com/meta-leader -o=jsonpath='{.items[0].metadata.name}' 35 | ``` 36 | ```output 37 | iomesh-meta-0 38 | ``` 39 | 4. Access the meta leader pod. 40 | ```shell 41 | kubectl exec -it iomesh-meta-0 -n iomesh-system -c iomesh-meta bash 42 | ``` 43 | 44 | 5. Perform `chunk unregister`. Replace with the chunk ID obtained from Step 2. 45 | 46 | Depending on the size of the data in the chunk, executing this command can take from a few minutes to several hours. 47 | ``` 48 | /opt/iomesh/iomeshctl chunk unregister 49 | ``` 50 | 51 | 6. Find `chunk` in `iomesh.yaml`, the default configuration file exported during IOMesh installation, and then edit `replicaCount`. 52 | ```yaml 53 | chunk: 54 | replicaCount: 3 # Reduce the value to 2. 55 | ``` 56 | 57 | 7. Apply the modification. 58 | 59 | ```shell 60 | helm upgrade --namespace iomesh-system iomesh iomesh/iomesh --values iomesh.yaml 61 | ``` 62 | 63 | 8. Verify that the number of chunk pods is reduced. 64 | 65 | ```shell 66 | kubectl get pod -n iomesh-system | grep chunk 67 | ``` 68 | If successful, you should see output like this: 69 | ```output 70 | iomesh-chunk-0 3/3 Running 0 5h5m 71 | iomesh-chunk-1 3/3 Running 0 5h5m 72 | ``` 73 | 74 | 9. Run the following command. Then locate the `status.summary.chunkSummary.chunks` field to verify that the chunk was removed. 75 | ```shell 76 | kubectl get iomesh iomesh -n iomesh-system -o yaml 77 | ``` -------------------------------------------------------------------------------- /website/versioned_sidebars/version-0.11.0-sidebars.json: -------------------------------------------------------------------------------- 1 | { 2 | "version-0.11.0-docs": { 3 | "About IOMesh": [ 4 | "version-0.11.0-about-iomesh/introduction" 5 | ], 6 | "Deploy": [ 7 | "version-0.11.0-deploy/prerequisites", 8 | "version-0.11.0-deploy/install-iomesh", 9 | "version-0.11.0-deploy/setup-iomesh", 10 | "version-0.11.0-deploy/setup-storageclass", 11 | "version-0.11.0-deploy/setup-snapshotclass" 12 | ], 13 | "Volume Operations": [ 14 | "version-0.11.0-volume-operations/create-volume", 15 | "version-0.11.0-volume-operations/expand-volume", 16 | "version-0.11.0-volume-operations/snapshot-restore-and-clone" 17 | ], 18 | "IOMesh Operations": [ 19 | "version-0.11.0-iomesh-operations/monitoring", 20 | "version-0.11.0-iomesh-operations/cluster-operations" 21 | ], 22 | "Stateful Applications": [ 23 | "version-0.11.0-stateful-applications/iomesh-for-mysql", 24 | "version-0.11.0-stateful-applications/iomesh-for-cassandra", 25 | "version-0.11.0-stateful-applications/iomesh-for-mongodb" 26 | ], 27 | "Advanced Usage": [ 28 | "version-0.11.0-advanced-usage/external-iscsi" 29 | ], 30 | "Additional Info": [ 31 | "version-0.11.0-additional-info/performance-testing", 32 | "version-0.11.0-additional-info/release-notes" 33 | ], 34 | "References": [ 35 | "version-0.11.0-references/metrics" 36 | ] 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /website/versioned_sidebars/version-0.9.5-sidebars.json: -------------------------------------------------------------------------------- 1 | { 2 | "version-0.9.5-docs": { 3 | "About IOMesh": [ 4 | "version-0.9.5-about-iomesh/introduction" 5 | ], 6 | "Deploy": [ 7 | "version-0.9.5-deploy/prerequisites", 8 | "version-0.9.5-deploy/install-iomesh", 9 | "version-0.9.5-deploy/setup-iomesh", 10 | "version-0.9.5-deploy/setup-storageclass", 11 | "version-0.9.5-deploy/setup-snapshotclass" 12 | ], 13 | "Volume Operations": [ 14 | "version-0.9.5-volume-operations/create-volume", 15 | "version-0.9.5-volume-operations/expand-volume", 16 | "version-0.9.5-volume-operations/snapshot-restore-and-clone" 17 | ], 18 | "IOMesh Operations": [ 19 | "version-0.9.5-iomesh-operations/monitoring", 20 | "version-0.9.5-iomesh-operations/cluster-operations" 21 | ], 22 | "Stateful Applications": [ 23 | "version-0.9.5-stateful-applications/iomesh-for-mysql", 24 | "version-0.9.5-stateful-applications/iomesh-for-cassandra", 25 | "version-0.9.5-stateful-applications/iomesh-for-mongodb" 26 | ], 27 | "Additional Info": [ 28 | "version-0.9.5-additional-info/performance-testing" 29 | ] 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /website/versioned_sidebars/version-v1.0.0-sidebars.json: -------------------------------------------------------------------------------- 1 | { 2 | "version-v1.0.0-docs": { 3 | "About IOMesh": [ 4 | "version-v1.0.0-introduction/introduction", 5 | "version-v1.0.0-basic-concepts/basic-concepts" 6 | ], 7 | "Deploy IOMesh": [ 8 | "version-v1.0.0-deploy-iomesh-cluster/prerequisites", 9 | "version-v1.0.0-deploy-iomesh-cluster/setup-worker-node", 10 | "version-v1.0.0-deploy-iomesh-cluster/install-iomesh", 11 | "version-v1.0.0-deploy-iomesh-cluster/setup-iomesh", 12 | "version-v1.0.0-deploy-iomesh-cluster/activate-license" 13 | ], 14 | "Volume Operations": [ 15 | "version-v1.0.0-volume-operations/create-storageclass", 16 | "version-v1.0.0-volume-operations/create-pv", 17 | "version-v1.0.0-volume-operations/encrypt-pv", 18 | "version-v1.0.0-volume-operations/expand-pv", 19 | "version-v1.0.0-volume-operations/clone-pv" 20 | ], 21 | "VolumeSnapshot Operations": [ 22 | "version-v1.0.0-volumesnapshot-operations/create-snapshotclass", 23 | "version-v1.0.0-volumesnapshot-operations/create-volumesnapshot", 24 | "version-v1.0.0-volumesnapshot-operations/restore-volumesnapshot" 25 | ], 26 | "Deploy Stateful Applications": [ 27 | "version-v1.0.0-stateful-applications/iomesh-for-mysql", 28 | "version-v1.0.0-stateful-applications/iomesh-for-mongodb" 29 | ], 30 | "Cluster Operations": [ 31 | "version-v1.0.0-cluster-operations/scale-cluster", 32 | "version-v1.0.0-cluster-operations/upgrade-cluster", 33 | "version-v1.0.0-cluster-operations/uninstall-cluster", 34 | "version-v1.0.0-cluster-operations/manage-license", 35 | "version-v1.0.0-cluster-operations/replace-failed-disk" 36 | ], 37 | "Monitor IOMesh": [ 38 | "version-v1.0.0-monitor-iomesh/install-iomesh-dashboard", 39 | "version-v1.0.0-monitor-iomesh/monitoring-iomesh" 40 | ], 41 | "Advanced Functions": [ 42 | "version-v1.0.0-advanced-functions/manage-multiple-cluster", 43 | "version-v1.0.0-advanced-functions/localpv-manager", 44 | "version-v1.0.0-advanced-functions/external-storage" 45 | ], 46 | "Appendices": [ 47 | "version-v1.0.0-appendices/release-notes", 48 | "version-v1.0.0-appendices/downloads", 49 | "version-v1.0.0-appendices/iomesh-metrics", 50 | "version-v1.0.0-appendices/faq" 51 | ] 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /website/versioned_sidebars/version-v1.0.1-sidebars.json: -------------------------------------------------------------------------------- 1 | { 2 | "version-v1.0.1-docs": { 3 | "About IOMesh": [ 4 | "version-v1.0.1-introduction/introduction", 5 | "version-v1.0.1-basic-concepts/basic-concepts" 6 | ], 7 | "Deploy IOMesh": [ 8 | "version-v1.0.1-deploy-iomesh-cluster/prerequisites", 9 | "version-v1.0.1-deploy-iomesh-cluster/install-iomesh", 10 | "version-v1.0.1-deploy-iomesh-cluster/setup-iomesh", 11 | "version-v1.0.1-deploy-iomesh-cluster/activate-license" 12 | ], 13 | "Volume Operations": [ 14 | "version-v1.0.1-volume-operations/create-storageclass", 15 | "version-v1.0.1-volume-operations/create-pv", 16 | "version-v1.0.1-volume-operations/authenticate-pv", 17 | "version-v1.0.1-volume-operations/expand-pv", 18 | "version-v1.0.1-volume-operations/clone-pv" 19 | ], 20 | "VolumeSnapshot Operations": [ 21 | "version-v1.0.1-volumesnapshot-operations/create-snapshotclass", 22 | "version-v1.0.1-volumesnapshot-operations/create-volumesnapshot", 23 | "version-v1.0.1-volumesnapshot-operations/restore-volumesnapshot" 24 | ], 25 | "Deploy Stateful Applications": [ 26 | "version-v1.0.1-stateful-applications/iomesh-for-mysql", 27 | "version-v1.0.1-stateful-applications/iomesh-for-mongodb" 28 | ], 29 | "Cluster Operations": [ 30 | "version-v1.0.1-cluster-operations/scale-cluster", 31 | "version-v1.0.1-cluster-operations/upgrade-cluster", 32 | "version-v1.0.1-cluster-operations/uninstall-cluster", 33 | "version-v1.0.1-cluster-operations/manage-license", 34 | "version-v1.0.1-cluster-operations/replace-failed-disk" 35 | ], 36 | "Monitor IOMesh": [ 37 | "version-v1.0.1-monitor-iomesh/install-iomesh-dashboard", 38 | "version-v1.0.1-monitor-iomesh/monitoring-iomesh" 39 | ], 40 | "Advanced Functions": [ 41 | "version-v1.0.1-advanced-functions/manage-multiple-cluster", 42 | "version-v1.0.1-advanced-functions/localpv-manager", 43 | "version-v1.0.1-advanced-functions/external-storage" 44 | ], 45 | "Appendices": [ 46 | "version-v1.0.1-appendices/release-notes", 47 | "version-v1.0.1-appendices/downloads", 48 | "version-v1.0.1-appendices/setup-worker-node", 49 | "version-v1.0.1-appendices/iomesh-metrics", 50 | "version-v1.0.1-appendices/faq" 51 | ] 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /website/versioned_sidebars/version-v1.0.2-sidebars.json: -------------------------------------------------------------------------------- 1 | { 2 | "version-v1.0.2-docs": { 3 | "About IOMesh": [ 4 | "version-v1.0.2-introduction/introduction", 5 | "version-v1.0.2-basic-concepts/basic-concepts" 6 | ], 7 | "Deploy IOMesh": [ 8 | "version-v1.0.2-deploy-iomesh-cluster/prerequisites", 9 | "version-v1.0.2-deploy-iomesh-cluster/install-iomesh", 10 | "version-v1.0.2-deploy-iomesh-cluster/setup-iomesh", 11 | "version-v1.0.2-deploy-iomesh-cluster/activate-license" 12 | ], 13 | "Volume Operations": [ 14 | "version-v1.0.2-volume-operations/create-storageclass", 15 | "version-v1.0.2-volume-operations/create-pv", 16 | "version-v1.0.2-volume-operations/authenticate-pv", 17 | "version-v1.0.2-volume-operations/expand-pv", 18 | "version-v1.0.2-volume-operations/clone-pv" 19 | ], 20 | "VolumeSnapshot Operations": [ 21 | "version-v1.0.2-volumesnapshot-operations/create-snapshotclass", 22 | "version-v1.0.2-volumesnapshot-operations/create-volumesnapshot", 23 | "version-v1.0.2-volumesnapshot-operations/restore-volumesnapshot" 24 | ], 25 | "Deploy Stateful Applications": [ 26 | "version-v1.0.2-stateful-applications/iomesh-for-mysql", 27 | "version-v1.0.2-stateful-applications/iomesh-for-mongodb" 28 | ], 29 | "Cluster Operations": [ 30 | "version-v1.0.2-cluster-operations/scale-out-cluster", 31 | "version-v1.0.2-cluster-operations/scale-down-cluster", 32 | "version-v1.0.2-cluster-operations/upgrade-cluster", 33 | "version-v1.0.2-cluster-operations/uninstall-cluster", 34 | "version-v1.0.2-cluster-operations/manage-license", 35 | "version-v1.0.2-cluster-operations/replace-failed-disk" 36 | ], 37 | "Monitor IOMesh": [ 38 | "version-v1.0.2-monitor-iomesh/install-iomesh-dashboard", 39 | "version-v1.0.2-monitor-iomesh/monitoring-iomesh" 40 | ], 41 | "Advanced Functions": [ 42 | "version-v1.0.2-advanced-functions/manage-multiple-cluster", 43 | "version-v1.0.2-advanced-functions/localpv-manager", 44 | "version-v1.0.2-advanced-functions/external-storage" 45 | ], 46 | "Appendices": [ 47 | "version-v1.0.2-appendices/release-notes", 48 | "version-v1.0.2-appendices/downloads", 49 | "version-v1.0.2-appendices/setup-worker-node", 50 | "version-v1.0.2-appendices/iomesh-metrics", 51 | "version-v1.0.2-appendices/faq" 52 | ] 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /website/versions.json: -------------------------------------------------------------------------------- 1 | [ 2 | "v1.0.4", 3 | "v1.0.3", 4 | "v1.0.2", 5 | "v1.0.1", 6 | "v1.0.0", 7 | "v0.11.1", 8 | "0.11.0", 9 | "0.10.1", 10 | "0.10.0", 11 | "0.9.7", 12 | "0.9.5" 13 | ] 14 | -------------------------------------------------------------------------------- /website/website/blog/2016-03-11-blog-post.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Blog Title 3 | author: Blog Author 4 | authorURL: http://twitter.com/ 5 | authorFBID: 100002976521003 6 | --- 7 | 8 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus elementum massa eget nulla aliquet sagittis. Proin odio tortor, vulputate ut odio in, ultrices ultricies augue. Cras ornare ultrices lorem malesuada iaculis. Etiam sit amet libero tempor, pulvinar mauris sed, sollicitudin sapien. 9 | 10 | 11 | 12 | Mauris vestibulum ullamcorper nibh, ut semper purus pulvinar ut. Donec volutpat orci sit amet mauris malesuada, non pulvinar augue aliquam. Vestibulum ultricies at urna ut suscipit. Morbi iaculis, erat at imperdiet semper, ipsum nulla sodales erat, eget tincidunt justo dui quis justo. Pellentesque dictum bibendum diam at aliquet. Sed pulvinar, dolor quis finibus ornare, eros odio facilisis erat, eu rhoncus nunc dui sed ex. Nunc gravida dui massa, sed ornare arcu tincidunt sit amet. Maecenas efficitur sapien neque, a laoreet libero feugiat ut. 13 | 14 | Nulla facilisi. Maecenas sodales nec purus eget posuere. Sed sapien quam, pretium a risus in, porttitor dapibus erat. Sed sit amet fringilla ipsum, eget iaculis augue. Integer sollicitudin tortor quis ultricies aliquam. Suspendisse fringilla nunc in tellus cursus, at placerat tellus scelerisque. Sed tempus elit a sollicitudin rhoncus. Nulla facilisi. Morbi nec dolor dolor. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Cras et aliquet lectus. Pellentesque sit amet eros nisi. Quisque ac sapien in sapien congue accumsan. Nullam in posuere ante. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Proin lacinia leo a nibh fringilla pharetra. 15 | 16 | Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Proin venenatis lectus dui, vel ultrices ante bibendum hendrerit. Aenean egestas feugiat dui id hendrerit. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Curabitur in tellus laoreet, eleifend nunc id, viverra leo. Proin vulputate non dolor vel vulputate. Curabitur pretium lobortis felis, sit amet finibus lorem suscipit ut. Sed non mollis risus. Duis sagittis, mi in euismod tincidunt, nunc mauris vestibulum urna, at euismod est elit quis erat. Phasellus accumsan vitae neque eu placerat. In elementum arcu nec tellus imperdiet, eget maximus nulla sodales. Curabitur eu sapien eget nisl sodales fermentum. 17 | 18 | Phasellus pulvinar ex id commodo imperdiet. Praesent odio nibh, sollicitudin sit amet faucibus id, placerat at metus. Donec vitae eros vitae tortor hendrerit finibus. Interdum et malesuada fames ac ante ipsum primis in faucibus. Quisque vitae purus dolor. Duis suscipit ac nulla et finibus. Phasellus ac sem sed dui dictum gravida. Phasellus eleifend vestibulum facilisis. Integer pharetra nec enim vitae mattis. Duis auctor, lectus quis condimentum bibendum, nunc dolor aliquam massa, id bibendum orci velit quis magna. Ut volutpat nulla nunc, sed interdum magna condimentum non. Sed urna metus, scelerisque vitae consectetur a, feugiat quis magna. Donec dignissim ornare nisl, eget tempor risus malesuada quis. 19 | -------------------------------------------------------------------------------- /website/website/blog/2017-04-10-blog-post-two.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: New Blog Post 3 | author: Blog Author 4 | authorURL: http://twitter.com/ 5 | authorFBID: 100002976521003 6 | --- 7 | 8 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus elementum massa eget nulla aliquet sagittis. Proin odio tortor, vulputate ut odio in, ultrices ultricies augue. Cras ornare ultrices lorem malesuada iaculis. Etiam sit amet libero tempor, pulvinar mauris sed, sollicitudin sapien. 9 | 10 | 11 | 12 | Mauris vestibulum ullamcorper nibh, ut semper purus pulvinar ut. Donec volutpat orci sit amet mauris malesuada, non pulvinar augue aliquam. Vestibulum ultricies at urna ut suscipit. Morbi iaculis, erat at imperdiet semper, ipsum nulla sodales erat, eget tincidunt justo dui quis justo. Pellentesque dictum bibendum diam at aliquet. Sed pulvinar, dolor quis finibus ornare, eros odio facilisis erat, eu rhoncus nunc dui sed ex. Nunc gravida dui massa, sed ornare arcu tincidunt sit amet. Maecenas efficitur sapien neque, a laoreet libero feugiat ut. 13 | 14 | Nulla facilisi. Maecenas sodales nec purus eget posuere. Sed sapien quam, pretium a risus in, porttitor dapibus erat. Sed sit amet fringilla ipsum, eget iaculis augue. Integer sollicitudin tortor quis ultricies aliquam. Suspendisse fringilla nunc in tellus cursus, at placerat tellus scelerisque. Sed tempus elit a sollicitudin rhoncus. Nulla facilisi. Morbi nec dolor dolor. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Cras et aliquet lectus. Pellentesque sit amet eros nisi. Quisque ac sapien in sapien congue accumsan. Nullam in posuere ante. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Proin lacinia leo a nibh fringilla pharetra. 15 | 16 | Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Proin venenatis lectus dui, vel ultrices ante bibendum hendrerit. Aenean egestas feugiat dui id hendrerit. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Curabitur in tellus laoreet, eleifend nunc id, viverra leo. Proin vulputate non dolor vel vulputate. Curabitur pretium lobortis felis, sit amet finibus lorem suscipit ut. Sed non mollis risus. Duis sagittis, mi in euismod tincidunt, nunc mauris vestibulum urna, at euismod est elit quis erat. Phasellus accumsan vitae neque eu placerat. In elementum arcu nec tellus imperdiet, eget maximus nulla sodales. Curabitur eu sapien eget nisl sodales fermentum. 17 | 18 | Phasellus pulvinar ex id commodo imperdiet. Praesent odio nibh, sollicitudin sit amet faucibus id, placerat at metus. Donec vitae eros vitae tortor hendrerit finibus. Interdum et malesuada fames ac ante ipsum primis in faucibus. Quisque vitae purus dolor. Duis suscipit ac nulla et finibus. Phasellus ac sem sed dui dictum gravida. Phasellus eleifend vestibulum facilisis. Integer pharetra nec enim vitae mattis. Duis auctor, lectus quis condimentum bibendum, nunc dolor aliquam massa, id bibendum orci velit quis magna. Ut volutpat nulla nunc, sed interdum magna condimentum non. Sed urna metus, scelerisque vitae consectetur a, feugiat quis magna. Donec dignissim ornare nisl, eget tempor risus malesuada quis. 19 | -------------------------------------------------------------------------------- /website/website/blog/2017-09-25-testing-rss.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Adding RSS Support - RSS Truncation Test 3 | author: Eric Nakagawa 4 | authorURL: http://twitter.com/ericnakagawa 5 | authorFBID: 661277173 6 | --- 7 | 8 | 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 9 | 10 | This should be truncated. 11 | 12 | 13 | 14 | This line should never render in XML. 15 | -------------------------------------------------------------------------------- /website/website/blog/2017-09-26-adding-rss.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Adding RSS Support 3 | author: Eric Nakagawa 4 | authorURL: http://twitter.com/ericnakagawa 5 | authorFBID: 661277173 6 | --- 7 | 8 | This is a test post. 9 | 10 | A whole bunch of other information. 11 | -------------------------------------------------------------------------------- /website/website/blog/2017-10-24-new-version-1.0.0.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: New Version 1.0.0 3 | author: Eric Nakagawa 4 | authorURL: http://twitter.com/ericnakagawa 5 | authorFBID: 661277173 6 | --- 7 | 8 | This blog post will test file name parsing issues when periods are present. 9 | -------------------------------------------------------------------------------- /website/website/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "scripts": { 3 | "examples": "docusaurus-examples", 4 | "start": "docusaurus-start", 5 | "build": "docusaurus-build", 6 | "publish-gh-pages": "docusaurus-publish", 7 | "write-translations": "docusaurus-write-translations", 8 | "version": "docusaurus-version", 9 | "rename-version": "docusaurus-rename-version" 10 | }, 11 | "devDependencies": { 12 | "docusaurus": "^1.14.7" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /website/website/pages/en/help.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | const React = require('react'); 9 | 10 | const CompLibrary = require('../../core/CompLibrary.js'); 11 | 12 | const Container = CompLibrary.Container; 13 | const GridBlock = CompLibrary.GridBlock; 14 | 15 | function Help(props) { 16 | const {config: siteConfig, language = ''} = props; 17 | const {baseUrl, docsUrl} = siteConfig; 18 | const docsPart = `${docsUrl ? `${docsUrl}/` : ''}`; 19 | const langPart = `${language ? `${language}/` : ''}`; 20 | const docUrl = (doc) => `${baseUrl}${docsPart}${langPart}${doc}`; 21 | 22 | const supportLinks = [ 23 | { 24 | content: `Learn more using the [documentation on this site.](${docUrl( 25 | 'doc1.html', 26 | )})`, 27 | title: 'Browse Docs', 28 | }, 29 | { 30 | content: 'Ask questions about the documentation and project', 31 | title: 'Join the community', 32 | }, 33 | { 34 | content: "Find out what's new with this project", 35 | title: 'Stay up to date', 36 | }, 37 | ]; 38 | 39 | return ( 40 |
41 | 42 |
43 |
44 |

Need help?

45 |
46 |

This project is maintained by a dedicated group of people.

47 | 48 |
49 |
50 |
51 | ); 52 | } 53 | 54 | module.exports = Help; 55 | -------------------------------------------------------------------------------- /website/website/pages/en/users.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | const React = require('react'); 9 | 10 | const CompLibrary = require('../../core/CompLibrary.js'); 11 | 12 | const Container = CompLibrary.Container; 13 | 14 | class Users extends React.Component { 15 | render() { 16 | const {config: siteConfig} = this.props; 17 | if ((siteConfig.users || []).length === 0) { 18 | return null; 19 | } 20 | 21 | const showcase = siteConfig.users.map((user) => ( 22 | 23 | {user.caption} 24 | 25 | )); 26 | 27 | return ( 28 |
29 | 30 |
31 |
32 |

Who is Using This?

33 |

This project is used by many folks

34 |
35 |
{showcase}
36 | {siteConfig.repoUrl && ( 37 | 38 |

Are you using this project?

39 | 42 | Add your company 43 | 44 |
45 | )} 46 |
47 |
48 |
49 | ); 50 | } 51 | } 52 | 53 | module.exports = Users; 54 | -------------------------------------------------------------------------------- /website/website/sidebars.json: -------------------------------------------------------------------------------- 1 | { 2 | "docs": { 3 | "Docusaurus": ["doc1"], 4 | "First Category": ["doc2"], 5 | "Second Category": ["doc3"] 6 | }, 7 | "docs-other": { 8 | "First Category": ["doc4", "doc5"] 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /website/website/static/css/custom.css: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | /* your custom css */ 9 | 10 | @media only screen and (min-device-width: 360px) and (max-device-width: 736px) { 11 | } 12 | 13 | @media only screen and (min-width: 1024px) { 14 | } 15 | 16 | @media only screen and (max-width: 1023px) { 17 | } 18 | 19 | @media only screen and (min-width: 1400px) { 20 | } 21 | 22 | @media only screen and (min-width: 1500px) { 23 | } 24 | -------------------------------------------------------------------------------- /website/website/static/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iomesh/docs/a5d1600c3adf566f1398e85ceccc9a6ebe0a46e0/website/website/static/img/favicon.ico -------------------------------------------------------------------------------- /website/website/static/img/oss_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iomesh/docs/a5d1600c3adf566f1398e85ceccc9a6ebe0a46e0/website/website/static/img/oss_logo.png --------------------------------------------------------------------------------