├── LICENSE ├── README.md ├── assignments ├── Assignment1.md ├── Assignment2.md ├── Assignment3.md ├── Assignment4.md └── Assignment5.md ├── lecture_slides ├── .DS_Store ├── keynote │ ├── .DS_Store │ ├── 01_intro.key │ ├── 02_webdev.key │ ├── 03_cloudcomputing.key │ ├── 04_kubernetes.key │ ├── 05_devops.key │ ├── 07_techniques.key │ ├── 08_monitoring.key │ └── 09_productiontesting.key └── pdf │ ├── 01_intro.pdf │ ├── 02_webdev.pdf │ ├── 03_cloudcomputing.pdf │ ├── 04_kubernetes.pdf │ ├── 05_devops.pdf │ ├── 07_techniques.pdf │ ├── 08_monitoring.pdf │ └── 09_productiontesting.pdf └── project_templates ├── .DS_Store ├── clustermanagement ├── .gitlab-ci.yml ├── .gitlab │ └── managed-apps │ │ ├── cert-manager │ │ └── values.yaml │ │ ├── cilium │ │ └── values.yaml │ │ ├── config.yaml │ │ ├── crossplane │ │ └── values.yaml │ │ ├── elastic-stack │ │ └── values.yaml │ │ ├── gitlab-runner │ │ └── values.yaml │ │ ├── ingress │ │ └── values.yaml │ │ ├── jupyterhub │ │ └── values.yaml │ │ ├── prometheus │ │ └── values.yaml │ │ ├── sentry │ │ └── values.yaml │ │ └── vault │ │ └── values.yaml └── README.md ├── scalyshop-backend ├── .gitignore ├── README.md ├── app.js ├── controllers │ ├── orders.js │ └── products.js ├── dummy_data │ ├── product1.json │ ├── product2.json │ ├── product3.json │ ├── product4.json │ └── product5.json ├── models │ ├── order.js │ └── product.js ├── package.json ├── stress_data │ └── stress_test_product.json └── tests │ ├── ScalyShop.postman_collection.json │ └── dropdb.js └── scalyshop-frontend ├── .eslintrc.js ├── .gitignore ├── README.md ├── package.json ├── public ├── favicon.ico └── index.html ├── src ├── .DS_Store ├── Api.js ├── App.vue ├── assets │ ├── addtocart.jpg │ ├── close.png │ ├── edit.png │ └── game.png ├── components │ ├── OrderItem.vue │ └── ProductItem.vue ├── main.js ├── router.js └── views │ ├── Admin.vue │ ├── Basket.vue │ ├── Customer.vue │ ├── History.vue │ └── Home.vue └── vue.config.js /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Internet Computing and Emerging Technologies lab (ICET-lab) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Material for DAT490 (Architectures for Scale-Out Systems) 2 | ### (colloquially "the DevOps course") 3 | 4 | Here you find material used in teaching DAT490 (Architectures for Scale-Out Systems), an elective master-level course in the [Master Program Software Engineering](https://www.chalmers.se/en/education/programmes/masters-info/Pages/Software-Engineering-and-Technology.aspx) at Chalmers as well as in the [Software Engineering and Management master](https://www.gu.se/en/study-gothenburg/software-engineering-and-management-masters-programme-n2sof) at Gothenburg University. 5 | 6 | The course is designed as a highly practical and pragmatic crash course in the varied techniques that characterize modern Web development. We discuss and experiment with cloud computing, Docker, Kubernetes, DevOps, monitoring, and release engineering, along with general principles such as how to engineer scalable systems. 7 | 8 | Please see [the official course syllabus](https://student.portal.chalmers.se/en/chalmersstudies/programme-information/Pages/SearchProgram.aspx?course_id=32908&parsergrp=2) for more information on the context of the course. 9 | 10 | ## Course Project 11 | A key element of the course is a practical project, which students are expected to develop during the course duration. The goal of the project is to adopt an existing (highly simplified) Web application and to: 12 | 13 | - Dockerize it 14 | - Deploy it using Kubernetes on Google Cloud 15 | - Develop a continuous deployment pipeline using GitLab 16 | - Re-architect and extend it 17 | - Add a Prometheus based monitoring solution 18 | - Set up canary releases, as well as feature toggles 19 | 20 | These tasks are split into 5 weekly assignments. Each task is either marked as mandatory (required because something later builds on it) or optional. All tasks come with (relatively) extensive and specific instructions (see `assignments` folder). All necessary code is contained in `project_templates` (frontend and backend are the example application, cluster_management is used in Assignment 3). 21 | 22 | ### Technical Requirements 23 | The project relies fairly heavily on GitLab (more specifically, a private GitLab installation, *not* the free-tier public version of GitLab) and Google Cloud (trial version is sufficient). All instructions have been tested using the specific version of GitLab available to Chalmers students. Version-specific differences are not unlikely (GitLab is moving fast, and sometimes breaking things). 24 | 25 | *Unfortunately I am not able to help make the project work on other cloud providers or other versions of GitLab.* 26 | 27 | ### Necessary Prior Student Knowledge 28 | The project does not require any specific prior knowledge, but does expect a certain level of technical sophistication of students (i.e., students that find it difficult to quickly grasp the basics of a new programming language or web framework may struggle). Students should also be willing and able to read online documentation. 29 | 30 | We have also found that students with no prior knowledge of system administration and server management find some of the tasks in the project rather daunting. 31 | 32 | ## Lecture Slides 33 | In addition to the project description, this repository also contains the slides used for classroom lectures (nine lecture units, available in Keynote and PDF format). These slide decks are mostly intended for other instructors - the slides are likely not, and are not intended to be, directly useful for solving the project. 34 | 35 | ## Copyright 36 | All material is shared as-is with the hope that it will be useful for learners or other instructors. However, I cannot give guarantees about the correctness of any of the provided material. Furthermore, I will have only limited time to support you if you are studying this material on your own. 37 | 38 | **Lecture slides** and **assignment descriptions** are provided under a creative commons [CC-BY](https://creativecommons.org/licenses/by/4.0/) license, and are free to use for other instructors. I would appreciate a [message](mailto:philipp.leitner@chalmers.se) if you found this material helpful to plan or execute your own course (or studying on your own). More material, such as example exams, can be potentially be shared upon direct request. 39 | 40 | **Project templates** are available under an [MIT License](https://www.mit.edu/~amini/LICENSE.md). Copyright to the source code is held by Philipp Leitner, Joel Scheuner, and Ranim Khojah. 41 | 42 | --- 43 | 44 | ### About the Author 45 | This course was designed by [Philipp Leitner](http://philippleitner.net), Associate Professor at [Chalmers University of Technology](https://www.chalmers.se/sv/Sidor/default.aspx) in Gothenburg, Sweden. Contact me via [email](mailto:philipp.leitner@chalmers.se) or on [Twitter](https://twitter.com/xLeitix). -------------------------------------------------------------------------------- /assignments/Assignment1.md: -------------------------------------------------------------------------------- 1 | # Assignment 1 2 | 3 | In the first assignment, the goal is mostly to get set up and to familiarize yourself with [ScalyShop](https://git.chalmers.se/courses/dat490/resources/scalyshop), our little case study application for the course. You will clone the app, potentially extend it, and dockerize it. The following weekly assignments will see us use, extend, and modify this initial deployment in various ways. 4 | 5 | **Deadline**: Wednesday Week 3 6 | 7 | **Discussion Session**: Friday Week 3 8 | 9 | **Related Lectures**: Lectures 1 to 3 10 | 11 | **Deliverables:** 12 | * Written report (submitted in Canvas, please use report template in Canvas) 13 | * Updates in GitLab 14 | 15 | ## T1.1 - Cloning the project [basic] 16 | Your first task is simple - clone ScalyShop. Log in to GitLab and locate the "group" we created for you ("dat490-2022-groupid"). A "group" in GitLab is basically a folder containing related projects. Your group should already contain three repositories. Find the repos containing the frontend and backend, clone them, and follow the initial installation instructions. Make sure that you have [Node](https://nodejs.org/en/download/) and [MongoDB](https://www.mongodb.com) installed on your computer. 17 | 18 | Start the application according to the installation instructions, and make sure that the application successfully starts and is operable. If you find problems in the installation instructions, please provide a pull request with suggested updates (or at least create an issue in the project repository). 19 | 20 | **(Very briefly) document your working solution in your report. It's sufficient to add one or two screenshots of the working application, and describe problems you had to overcome (if any).** 21 | 22 | ## T1.2 - Write a small code extension (new feature) for ScalyShop [optional, 25 pts] 23 | Now that the application is nice and ready (and you are able to launch it locally), you could start exploring the implementation of ScalyShop by writing a small code change. 24 | 25 | You are free to design and add any new feature you like. To get full credit for this extra task, you should devise a new feature that requires at least small changes in frontend, backend (API), and the database schema. However, it's ok if the feature isn't super-useful or sophisticated (most of ScalyShop is not ;) ), the main goal is for you to explore how Express, Vue.js, and Mongoose work in combination. 26 | 27 | Some resources to help you get started: 28 | * [Javascript reference](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference) 29 | * [Introduction to Express](https://developer.mozilla.org/en-US/docs/Learn/Server-side/Express_Nodejs/Introduction) (used to write the backend) 30 | * [Introduction to Mongoose](https://developer.mozilla.org/en-US/docs/Learn/Server-side/Express_Nodejs/mongoose) (used in the backend to interface with MongoDB) 31 | * [Video tutorial for Vue.js](https://www.vuemastery.com/courses/intro-to-vue-js/vue-instance/) (used to implement the frontend) 32 | * [Building frontend layouts using Bootstrap-Vue](https://bootstrap-vue.org/docs/components/layout) (used in the frontend to make buttons, tables, layouts, etc.) 33 | 34 | **Push your code changes to the master of your GitLab projects. Describe your feature (and its implementation) briefly in your report.** 35 | 36 | ## T1.3 - Dockerize the application [basic] 37 | Much of our future delivery pipeline will assume that our application components are available as Docker containers, so now our next step is to dockerize the application. As we learned in Lecture 3, [Docker](https://www.docker.com) is a container format that is incredibly widely used in cloud computing. As a first step, make sure to install Docker on your computer. 38 | 39 | Then you should build **two or three** working Docker images, one each for frontend and backend, and optionally one for the database (though you can also just start a [MongoDB image directly](https://hub.docker.com/_/mongo)). Build on suitable base images and configure each image correctly (the [Node.js docker image](https://hub.docker.com/_/node) may be a good starting point, but you can choose any base image that works for you). 40 | 41 | Run your images (in the right order - database, backend, frontend) and validate that all images start correctly. If you have configured everything accurately, you should now be able to test the application by navigating to [http://localhost:5046/](http://localhost:5046/) and clicking around in the application, in the same way as when you started your application without Docker. Check for errors in the Docker logs of your images. 42 | 43 | *Tips:* 44 | - A common source of errors are incorrect port numbers, incorrect port mapping, or other issues related to Docker's virtual networking. Refer to the lecture and the online documentation of Docker and make sure that you are exposing and using the correct ports for all services. 45 | - ScalyShop uses environment variables to configure endpoints, ports, database username, database password, etc. Many of these options have suitable default values for this first deployment task, but it is possible that you will have to provide custom values for some. In this case, *make sure to set the right environment variable*, do not just change the value in the source code. If you do, this will trouble you (a lot) in later tasks and assignments. 46 | 47 | **Push your Dockerfiles to the master of your GitLab projects. Document your working solution in your report. It's sufficient to describe the final solution in a few sentences.** 48 | 49 | ## T1.4 - Create a composition with docker-compose [basic] 50 | 51 | Now create a *composition* that represents your entire application using [Docker-Compose](https://docs.docker.com/compose/) (you probably installed it already along with Docker). Write a single `docker-compose.yaml` file that starts your entire application, including the database (don't assume a MongoDB database is already running, even though you likely installed MongoDB in the first task). Again validate that your composition works by navigating to [http://localhost:5046/](http://localhost:5046/) and clicking around in the application. 52 | 53 | Note that the CI pipeline we will build in subsequent assignments will *not* use this docker-compose file, but it will remain a tremendously helpful tool to quickly spin up the application for local development and testing. 54 | 55 | *Tips:* 56 | - You can either use the Docker images you wrote in the previous task, or define the services in your composition directly based on standard Node images. 57 | - Don't forget that in a docker-compose composition, services can be addressed through their service name (i.e., the service name becomes the internal DNS name of that service). 58 | - As in the previous task, make sure to use the right port numbers, and use environment variables to set parameters such as the database or backend host names. 59 | 60 | **Push your docker-compose file to the master of the frontend GitLab project. Document your working solution in your report. It's sufficient to describe the final solution in a few sentences.** 61 | 62 | ## T1.5 - Reflect on the architecture of ScalyShop [optional, open pts] 63 | 64 | As a last task, you may reflect in your report on the current architecture and implementation of ScalyShop. Assume that the application is in its infancy (a "minimal viable product", in startup terms), but eventually should serve an international, large customer base. In what ways is the application already prepared for a large deployment, and what will definitely need to be changed? Do you see any design decisions that will hamper a large-scale deployment and that will be difficult to change later on? 65 | 66 | *(focus on the architecture and deployment - it's obvious that ScalyShop would need more functional features to become an actual product)* 67 | 68 | **Describe the outcomes of your analysis in your weekly report. Be specific, and describe alternatives if you can think of any. Architectural diagrams may be very helpful to bring your point across.** 69 | 70 | ## Extra Work [optional, open pts] 71 | 72 | In every assignment, you are free to further explore the topic. Document in the report if you have done extra work that is not related to any of the tasks above. Depending on the scope of the extra work, we may award points towards a better grade for this extra work. 73 | -------------------------------------------------------------------------------- /assignments/Assignment2.md: -------------------------------------------------------------------------------- 1 | # Assignment 2 2 | 3 | In the second assignment, our focus is on exploring the Google cloud services and Kubernetes, as well as Helm as a "package manager" for Kubernetes. 4 | 5 | **Deadline**: Wednesday Week 4 6 | 7 | **Discussion Session**: Friday Week 4 8 | 9 | **Related Lectures**: Lecture 3 and 4 10 | 11 | **Deliverables:** 12 | * Written report (submitted in Canvas, please use report template in Canvas) 13 | * Updates in GitLab 14 | 15 | ## T2.1 - Create an account at Google cloud [basic] 16 | Your first step is to create an account at the [Google cloud](http://cloud.google.com/). **This will (unfortunately) require a valid credit card.** However, if you create a new account you will get access [to 300$ in credit and access to a free tier](https://cloud.google.com/free/docs/gcp-free-tier/#free-trial) for 90 days, which in combination should be more than enough to ensure that no costs will accrue while working on the course assignments. Further, not every member of the group needs to create an account - you can also create one billing account and individual sub-accounts for the all group members. 17 | 18 | **It is important that you remember that, even though you are using a free tier, you are working with a real, billed, live service. Monitor your charges in the Google dashboard at least every few days, do not start large instances that you don't actually use, make sure to terminate clusters and instances that you are not using any longer, and keep your cloud credentials private. At no point should cloud or instance passwords be committed to Git or made public in some other manner.** 19 | 20 | Your group will need at least one billing account, but every group member should have access to this account (that does not mean that somebody should give out the admin password to their Google account - create subaccounts with appropriate rights). Creating individual billing accounts for multiple or all students can be useful so that everybody can experiment on their own without impacting the overal "budget", but the CI pipeline we will build next week will need to deploy into a *single* Google cluster. 21 | 22 | Please do not hesitate to contact Philipp or Hamdy on Slack or via email if you have any questions or feel insecure about any of this. 23 | 24 | **This task does not require any explicit deliverable. *Do not* document your account credentials in your weekly report ;)** 25 | 26 | ## T2.2 - Getting started with Kubernetes [basic] 27 | Your next task is to get a grasp on the basics of Kubernetes and Helm. To do so, we will, for a brief moment, step away from ScalyShop and utilize an even simpler Node-based example application. 28 | 29 | We will follow [this tutorial](https://web.archive.org/web/20200417012757/https://thecloud.christmas/2019/16). Scan it now, and familiarize yourself with the basic concepts of Kubernetes and Helm as discussed in Lecture 4. 30 | 31 | Install gcloud (the Google cloud command line utilities), Kubectl (the Kubernetes commandline client), and Helm. Make sure that your gcloud client is authenticated against your Google cloud account using `gcloud auth login` and following the subsequent instructions. 32 | 33 | Now follow the tutorial linked above. Create a Kubernetes cluster with **two** nodes, which we will continue using in the rest of the project (use the argument `--num-nodes 2` to start only two nodes rather than the default of three, and specify an explicit region `--region europe-north1-a` when running your `gcloud container cluster` command). Successfully deploy the example Node application described in the tutorial. 34 | 35 | If you run into problems, use kubectl to narrow down the problem (`kubectl get pods` lists all pods and their state - CRASHLOOPBACKOFF means that the container failed at startup and Kubernetes tries to restart it; use `kubectl logs ` to look at the log of the failing pod). 36 | 37 | It *may* be useful to create a "development instance" using the [Google Compute Engine](https://cloud.google.com/compute), and use this instance as your client to run gcloud and kubectl commands from. This eliminates some networking, authentication, and Docker issues (and at least gcloud is already pre-installed on Google instances). 38 | 39 | We are not going to be using the "christmas" example application further in the assignment. However, we *will* continue to use the cluster you created as part of the tutorial (so don't delete it just yet). 40 | 41 | **Create a new project in your GitLab group, and commit your Helm chart there. Further, briefly document your working solution in your weekly report. Document with one or two screenshots your working solution, and briefly describe any major problems you run into. Make screenshots of the Google dashboard or appropriate kubectl output that shows that you actually have a working cluster.** 42 | 43 | ## T2.3 - Install MongoDB in your cluster [basic] 44 | One of the core promises of Helm as a "package manager" is that it should simplify installing new dependencies into our cluster. We will now explore this by installing a basic MongoDB database in the cluster we created in the previous task. 45 | 46 | We will use the [Bitnami Helm chart](https://bitnami.com). Simply find the right package [here](https://bitnami.com/stack/mongodb/helm), and follow the installation instructions for the cluster you created (do *not* use the Azure marketplace version - we are not using Microsoft Azure). 47 | 48 | When Helm successfully finishes, it will print a variety of "next steps", including instructions how to get the generated root password and how to log into your database. **Save these instructions**, and try them out. You should be able to log into your database. 49 | 50 | Once you have logged into the database server, create a new database for ScalyShop called "scaly" and create a new user that our application will then use to connect to the new database. Execute the following commands from the database server shell (replace `` with a password of your choice): 51 | 52 | `use scalyDB` 53 | 54 | `db.createUser({user: "scaly", pwd: "", roles: ["readWrite"]})` 55 | 56 | Refer to the [MongoDB documentation](https://docs.mongodb.com/manual/reference/method/db.createUser/) if you need more details about this step. Close the MongoDB shell and validate that you can log in using the user you just created. 57 | 58 | **Briefly document your working solution in your weekly report. Make screenshots of the Google cloud dashboard or the kubectl outputs that show that your MongoDB database has been successfully deployed. Document any major problems you had.** 59 | 60 | ## T2.4 - Write Helm charts to deploy ScalyShop [optional, 25pts] 61 | As the final task for the week, you can go back to the tutorial we followed in task T2.2. You will note that writing Helm charts to deploy the frontend and the backend of ScalyShop to our new Kubernetes cluster should really not be *that* different since it's just two more, slightly larger, Node applications. 62 | 63 | This is partially correct, but you *will* need to solve a few more open issues that the simple Christmas application kind of glanced over. One of those is how to dynamically tell the backend what database to use, and the frontend what the API endpoint of the backend is. Generally, environment variables are the common solution to such woes, and ScalyShop is implemented to make use of them (as we have already seen in Assignment 1). 64 | 65 | Write two new Helm charts, one of the frontend and one for the backend. Use them to make a test deployment of our case study app in the cluster, and test that the application still works if it's running on Google hardware. 66 | 67 | You are free modify the ScalyShop source code if you have to, but make sure that your local test deployment with docker-compose still works (as a general rule, you can make any changes you want, but you are not allowed to re-write the application in a way that would prevent you from local testing - for instance by hardcoding your Kubernetes connection strings in your application). 68 | 69 | Evidently, you do not have to write a Helm chart to deploy MongoDB. Instead, your backend should be using the MongoDB instance we just installed in task T2.3, and it should be using the "scaly" user you just created, not the root account. 70 | 71 | This is an optional task, since the CI pipeline we will build next week will use GitLab's Auto DevOps deployment rather than a custom Helm chart to install ScalyShop in the Google cloud. However, it's an excellent learning opportunity, even if we will end up deploying the application differently next week. 72 | 73 | **Commit your Helm charts to the master of the backend and frontend repositories. Document your working solution in your weekly report. Make screenshots of the Google cloud dashboard or the kubectl outputs that show that ScalyShop has been successfully deployed, and that you can browse the store running on the Google cloud.** 74 | 75 | ## T2.5 - Configure an Autoscaling Deployment [optional, 20pts] 76 | So far, we did not really use Kubernetes for the "cool stuff" - namely writing a deployment that can automatically scale out. Time to fix this! 77 | 78 | Extend one of the Helm packages you wrote (the one for the backend from T2.4 if you solved this task, or alternatively your T2.3 solution) and configure an autoscaling deployment based on CPU thresholds. Decide on reasonable CPU thresholds. 79 | 80 | Test if your autoscaler works by generating load on your backend or Node.js application (what's the best way to do this?). If you have troubles generating enough load to get the autoscaler to start new pods you maybe have to change the CPU thresholds from above for experimentation purposes. 81 | 82 | **Commit your updated Helm charts to GitLab. Document your working solution in your weekly report. Make screenshots of the Google cloud dashboard or the kubectl outputs that show that autoscaling is actually happening.** 83 | 84 | ## Extra Work [optional, open pts] 85 | 86 | In every assignment, you are free to further explore the topic. Document in the report if you have done extra work that is not related to any of the tasks above. Depending on the scope of the extra work, we may award points towards a better grade for this extra work. -------------------------------------------------------------------------------- /assignments/Assignment3.md: -------------------------------------------------------------------------------- 1 | # Assignment 3 2 | 3 | In the third assignment, our goal is to set up a CI pipeline for ScalyShop, which will allow us to quickly and seamlessly release new versions of the application directly into "production" (the Google cloud cluster we created previous week). We will make heavy use of GitLab's [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) feature and learn about GitOps in the process. 4 | 5 | **Deadline**: Wednesday Week 5 6 | 7 | **Discussion Session**: Friday Week 5 8 | 9 | **Related Lectures**: Lecture 5 10 | 11 | **Deliverables:** 12 | * Written report (submitted in Canvas, please use report template in Canvas) 13 | * Updates in GitLab 14 | 15 | ## T3.1 - Connect your Kubernetes Cluster to GitLab [basic] 16 | Now it's time to put the Kubernetes cluster we created in the previous assignment to work, and deploy ScalyShop to it. To this end, we first need to connect your GitLab projects to the cluster. This will be required to set up a seamless CI pipeline, enabling GitLab to release new software versions directly in the cluster. 17 | 18 | Log in to GitLab and again locate the "group" we created for you ("dat490-2022-groupid"). Select the entry "Kubernetes" from the navigation panel on the left. Choose to "Connect cluster with certificate" (green button), and then choose to connect an existing cluster. 19 | 20 | Choose a name for your cluster, leave the environment scope as "*" (changing this would allow you to use separate clusters for staging, pre-production, and production builds - * just means that the cluster will be used for all kinds of deployments). Fill out the remaining fields according to the [detailed instruction provided by GitLab](https://git.chalmers.se/help/user/project/clusters/add_remove_clusters.md#add-existing-cluster). Following the instructions will require you to use the `kubectl` tool you have already gotten to know last week. 21 | 22 | After successful configuration, the cluster will be listed in GitLab, along with some basic stats (number of nodes in the cluster, overall utilization, etc.). Ensure that you have added a "group" level cluster, since all of the different sub-projects (frontend, backend, etc.) will be using the same cluster and you don't want to configure it fresh for each project. 23 | 24 | If you have created multiple Google accounts and clusters last week, you need to decide on one that you want to use going forward (in principle you can also connect multiple clusters to GitLab, but all your components should deploy into the same one to avoid networking issues). 25 | 26 | **Briefly document in your weekly report using a screenshot that you have successfully connected to the cluster. You only need to write more about this in the report if you had to do some unexpected extra work.** 27 | 28 | ## T3.2 - Install a GitLab Runner [basic] 29 | 30 | Our ultimate goal for this week is to build a CI pipeline that not only deploys to the cluster, but also uses resources from the cluster itself for the various build and test steps. Hence, we now need to install (one or multiple) GitLab runners (a little helper tool that can execute GitLab jobs). 31 | 32 | Runners can be registered on different "levels" (for an individual project, a group of projects, or everybody). We want to create a runner for your group. To do so, navigate to your project group on GitLab, and open "Settings / CI/CD". Expand the entry for "Runners". **Disable** shared runners (the default ones provided by Chalmers won't work for us), and note down the two configuration entries shown on the left side of the screen (the GitLab URL and a registration token). We will need those later. 33 | 34 | Now we need to actually install a runner. We will use our old friend Helm to install the GitLab runner in our Kubernetes cluster. Check out the [extensive documentation](https://docs.gitlab.com/runner/install/kubernetes.html) before getting started, and create a `values.yaml` file to configure Helm. You will need to set the GitLab URL and registration token you saved earlier, and you will also need to enable [privileged mode](https://docs.gitlab.com/runner/install/kubernetes.html#running-privileged-containers-for-the-runners). It may also be useful to increase the [number of maximal concurrent builds](https://docs.gitlab.com/runner/install/kubernetes.html#controlling-maximum-runner-concurrency), since the default is just 1. Save your `values.yaml` file and run Helm according to the instructions (it's ok to just install your runner in the default Kubernetes namespace). Your runner should now appear in the GitLab UI under "Available runners". 35 | 36 | **Note:** 37 | Our Google Kubernetes cluster uses Role-Based Access Control (RBAC) to restrict cluster access. At the moment our new runner won't work yet, since Google won't allow it to start new pods. There are (of course) ways to set this up properly, but for simplicity you can fix this by giving all the cluster's "service accounts" superuser privileges. Run the following `kubectl` command to do so: 38 | 39 | `kubectl create clusterrolebinding serviceaccounts-cluster-admin --clusterrole=cluster-admin --group=system:serviceaccounts` 40 | 41 | Refer to [Google's documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#service-account-permissions) to learn how to set this up properly (yes, the suggestion above is the "strongly discouraged" one - feel free to use one of the better options, namely creating a separate service account with the correct rights, but then you will need to adapt your `values.yaml` file to configure this service account with your runner). 42 | 43 | **Briefly document your solution in your weekly report, and add a screenshot that demonstrates that your new runner is registered.** 44 | 45 | ## T3.3 - Set up Auto DevOps for the Backend and Frontend [basic] 46 | 47 | With these setup steps out of the way, we can start looking into setting up the CI pipelines for our two ScalyShop subprojects. 48 | 49 | We will be using GitLab's handy [Auto DevOps feature](https://docs.gitlab.com/ee/topics/autodevops/). Familiarize yourself with the capabilities of Auto DevOps based on the documentation, and then enable it in the backend following the instructions (you can either enable it separately for both projects, or simply make Auto DevOps the default for the entire group). Configure your pipeline so that all builds should be pushed straight to production, and that a new build should be triggered on every new commit to master. 50 | 51 | You can already test your CI/CD pipeline now, even though it won't entirely work yet. Navigate to CI / CD in the backend project, and press "Run Pipeline". Observe the (fairly sophisticated) default pipeline Auto DevOps has created for you. It should work until the build reaches the "Production" stage, the stage where the backend would actually be deployed to the cluster. To make this step also succeed, some additional work will be needed (which we will do a little later). 52 | 53 | **Note:** if either your build stage or all stages fail, the problem is most likely in the runner that we created in the previous task. It is important that the runner has sufficient rights to create pods (see the comment above regarding admin access), and that the runner runs in privileged mode. 54 | 55 | **Briefly document your new CI/CD pipelines in your weekly report. Add screenshots of the pipelines, and explore for each step what is actually happening there (summarize briefly in your report). As a reminder, it's ok (for now) if the build fails in the Production stage.** 56 | 57 | ## T3.4 - Customize your pipelines [optional, 10pts] 58 | 59 | So far, we have relied entirely on the "default" pipeline that we get from using Auto DevOps. However, as you have probably observed, not all stages really make sense for ScalyShop (and at least one stage takes a very long time to complete). 60 | 61 | Your task is now to do some small customizations to the build pipelines for both projects. Create a `.gitlab-ci.yml` file in both projects [following the instructions](https://docs.gitlab.com/ee/topics/autodevops/customize.html#customizing-gitlab-ciyml). Disable some of the more unnecessary or annoying default stages. Also disable the default Postgres server that Auto DevOps automatically provisions in the standard configuration (and which we are not using at all). 62 | 63 | Commit your `.gitlab-ci.yml` file and push it to master. If you have set up everything correctly, simply pushing to master should be enough to entice GitLab to run the build pipeline (and it will automatically pick up your new configuration from the file). Look at your build on GitLab and observe how your pipeline has changed from the previous default builds. 64 | 65 | **Briefly document in the report what changes you have made to your pipelines, and why.** 66 | 67 | ## T3.5 - Getting Started with GitOps [basic] 68 | 69 | [GitOps](https://about.gitlab.com/topics/gitops/) is the idea of applying standard software engineering practices to operations and infrastructure automation, and to define cluster configurations in code in a dedicated cluster configuration project (rather than by manually running `kubectl` or Helm commands). Hence, we want to use a dedicated project in GitLab, where we will define our cluster configurations going forward (and let GitLab manage the cluster by running these configuration scripts). 70 | In this task, we are going to use the project `cluster-management` that you can already find in your GitLab group. 71 | 72 | First, configure your Kubernetes cluster to use this project for GitOps. Navigate to your cluster configuration in the GitLab UI, find the "Advanced Settings", and select this project as cluster management project. Save your changes. 73 | 74 | GitLab has recently changed their approach to GitOps. For this course, we are going to stick with the older-but-simpler "Managed Applications" approach (rather than managing the cluster through an agent). Explore the code in the cluster management project following the [instructions](https://docs.gitlab.com/ee/user/clusters/applications.html). Configure the project to install two additional applications - the `certManager` application (which will handle SSL certificates) and `ingress` (an Nginx based HTTP proxy and load balancer). Both of these will be required to make our production deployment work. 75 | 76 | Commit your changes to the cluster management project and push them to the master. This should trigger the pipeline, which should install the additional applications in our cluster (this may take a while). You can follow what happens by looking at the build stage in the CI/CD pipeline of the cluster management project. From now on, when we want to install additional applications (or remove existing ones) we can do so simply by re-configuring our cluster configuration project and re-running the build. 77 | 78 | **The cluster management project should have installed two new applications ("workloads") in your cluster. Explore what has happened in the Google Kubernetes GUI or using kubectl. Demonstrate using some screenshots what has happened in your weekly report.** 79 | 80 | ## T3.6 - Fix the production deployment stages [basic] 81 | 82 | Now with the certManager and ingress installed, we are finally ready to fix the production deployment of the backend and frontend, which has been failing so far. Three steps will be required: 83 | 84 | - We need to configure a base URL for our application. 85 | - We need to configure both frontend and backend to expose their services on port 5000, since this is what [Auto DevOps expects by default](https://docs.gitlab.com/ee/topics/autodevops/stages.html#auto-build-using-a-dockerfile). 86 | - We need to find a way to dynamically configure our application correctly (tell the backend what database to use, tell the frontend the API endpoint of the backend, etc.). 87 | 88 | The first is easy enough. Navigate (yet again) to your cluster in the GitLab UI and find the (currently empty) field *base domain*. Normally we would be entering the domain name of our application here. Unfortunately, we don't own domains for this project. However, we can use [nip.io](https://nip.io), a simple service that can simulate domains for IP addresses. Find out the public IP address of your ingress controller (an easy way is to locate the ingress controller in your Google Cloud dashboard, and copy the public IP address from there - it is likely the only workload you have with the type "Load Balancer"). Now use the following as base domain name: `IPADDRESS.nip.io` (where you replace dots in the IP address with dashes, e.g., `34-88-168-112.nip.io`). Save your change. 89 | 90 | As for the second and third problems: as we know from the first assignment, ScalyShop is already implemented in a way to enable dynamically setting all important deployment parameters through environment variables. You will most likely need to use them now. 91 | 92 | Some tips: 93 | 94 | - Both front- and backend support an environment variable `PORT` to configure what port the respective service uses. Set both to 5000. It's ok now that both services use the same port, since they will be running in different Kubernetes pods! 95 | - Solving the second and third problems will likely require you to slightly extend the Dockerfiles you created in the first assignment. You can use `ARG` commands in Docker to define variables that can be passed into a Dockerfile during build, and you can use `ENV` to set environment variables that will also be available at container runtime. A short discussion is available [here](https://vsupalov.com/docker-arg-vs-env/). 96 | - You can pass arguments to the GitLab build using the [AUTO_DEVOPS_BUILD_IMAGE_EXTRA_ARGS CI variable](https://docs.gitlab.com/ee/topics/autodevops/customize.html#passing-arguments-to-docker-build). You can either set this variable in the GitLab UI under Settings / CI/CD / Variables, or, if you have already created custom `.gitlab-ci.yml` files in task T3.4, it may be nicer and easier to set the correct variables there. 97 | - In the backend, simply setting the environment variables should do the trick. However, for the frontend, you will need to dynamically generate a correct [`.env` file](https://cli.vuejs.org/guide/mode-and-env.html#environment-variables) in the root of the frontend project (this is how Vue.js expects environment variables to be passed). This can be done quite easily in the Dockerfile of the frontend project. 98 | 99 | This task is completed if you can run the builds of the backend and frontend projects end-to-end. The Production stage in the build pipeline should install the application in your cluster. If everything goes right, your backend should be publicly available at an address that looks something like this: `https://courses-dat490-example-solution-dat490-backend-example.34-88-168-112.nip.io`, and your frontend should be available under an address like `https://courses-dat490-example-solution-dat-490-frontend-example.34-88-168-112.nip.io`. You may have to accept some invalid certificates, though (our certificate manager would need a little more love to actually issue valid certificates). Click around in your frontend and ensure that everything works as expected. Particularly, ensure that the status message on the landing page reads `Your server appears to be live and well.` (if you see a `Connection Refused` there, your frontend is running but not able to connect to the backend). 100 | 101 | **Push all changes that were necessary in your Dockerfiles and other code to your repositories. Briefly document everything you have done in this task in your weekly report, and show screenshots of your frontend running in your cluster.** 102 | 103 | ## T3.7 - Reflect on your CI/CD pipeline [optional, open pts] 104 | 105 | As a last task, you may reflect in your report on what we have built in this week and the last. What's good about the setup we ended up with after task T3.5, and what limitations does our deployment pipeline still have? What extensions or changes would you like to implement if you were building ScalyShop as part of your daily work? Especially try to relate to what we discussed in the lecture, and discuss what concepts and ideas from the lecture our solution already implements. 106 | 107 | **Describe the outcomes of your analysis in your weekly report. Be specific, and refer to the lecture content or Internet resources as apprpriate.** 108 | 109 | ## T3.8 - Write a Custom CD Quality Gate [optional, 10 - 20pts, depending on complexity] (new!) 110 | 111 | So far, we have only used the quality gates (steps in our CD pipeline) that come automatically with Auto DevOps. As you have seen, this already gives us access to most of the "typical" activities one wants to conduct when deploying a Web-based system. 112 | 113 | Now, as a learning opportunity, you can try to write your own additional quality gate (what this step entails is up to you). You do this by extending the file `.gitlab-ci.yml` (see the documentation [here](https://docs.gitlab.com/ee/ci/yaml/gitlab_ci_yaml.html)). This should not completely replace the pipeline you built using Auto DevOps, it should just be an additional step in the Auto DevOps pipeline. 114 | 115 | **Describe the goals of your custom quality gate as well as its implementation. Document it using a screenshot in your weekly report.** 116 | 117 | ## Extra Work [optional, open pts] 118 | 119 | In every assignment, you are free to further explore the topic. Document in the report if you have done extra work that is not related to any of the tasks above. Depending on the scope of the extra work, we may award points towards a better grade for this extra work. -------------------------------------------------------------------------------- /assignments/Assignment4.md: -------------------------------------------------------------------------------- 1 | # Assignment 4 2 | 3 | In the fourth assignment, we will re-architect ScalyShop for more fine-grained scalability. Our focus here will be on micro- and nanoservices. We will extract and deploy a microservice, and add simple new functionality using a [Google Cloud Function](https://cloud.google.com/functions). 4 | 5 | **Deadline**: Wednesday Week 6 6 | 7 | **Discussion Session**: Friday Week 6 8 | 9 | **Related Lectures**: Primarily Lecture 3 and 6 10 | 11 | **Deliverables:** 12 | * Written report (submitted in Canvas, please use report template in Canvas) 13 | * Updates in GitLab 14 | 15 | ## T4.1 - Extracting a Microservice [optional, 25pts] 16 | Go back to the analysis of ScalyShop's architecture from assignment 1, and identify one feature that could be extracted into a microservice (in this assignment we are not going to worry too much if it would actually be _useful_ in practice to split up such a small application). Your next task is to implement this extraction. 17 | 18 | Create a new code repository on GitLab and implement the new service. It's perfectly fine to copy over some of the existing code (in the sense of a "copy migration" as discussed in the lecture). Your new microservice should be a "true" microservice - it should manage its own data (which should be stored in its own database), and should offer access to this data through its REST API (if necessary). However, the database can be a different MongoDB database managed by the same MongoDB instance (you do not need to set up a new database management server in your Kubernetes cluster - but of course you can if you want to). 19 | 20 | You are free to implement your new microservice using Express (just like the original backend), or choose any other technology stack. Using NPM / Express / Javascript may ease code reuse from the original implementation, but if you are already familiar with some other Web technology stack (e.g., Java EJB, or Ruby on Rails) it may be faster to just use what you already know. 21 | 22 | Once your new service is implemented, switch the original implementation (e.g., by commenting out the relevant parts of the original code and replacing it by invocations to your new service). It is suggested that you do not actually remove code in this step, as two alternative implementations (one monolithic, one microservice-based) will be useful in the next assignment sheet. Write a Dockerfile for your new service, and update the `docker-compose.yml` you developed in the first assignment. Test your new service locally, and check if everything appears to be working correctly. 23 | 24 | Once you have validated that ScalyShop still operates correctly with your newly cut-out microservice, you should set up a deployment pipeline for your new service (similar to what you did in Assignment 3, but now for your new service repository) and push your new service to GitLab. Validate that your new service correctly gets deployed to your Kubernetes cluster, and then also push your changed front- and backend (so that the version deployed in Kubernetes is now using your new service instead of the original monolithic implementation). Validate that your new deployable architecture still works correctly end-to-end in your Kubernetes cluster. 25 | 26 | **Briefly document in your weekly report what feature you have selected for microservice extraction, and describe your new implementation.** 27 | 28 | ## T4.2 - Integrating a Cloud Function [optional, 25pts] 29 | As a second optional step this week, we want to look at [Google cloud functions](https://cloud.google.com/functions) as a simple way to write, deploy, and use small, directly cloud-hosted nanoservices in Web projects. 30 | 31 | First decide on a (small) additional feature for ScalyShop that you could implement using a FaaS nanoservice. If you can think of something useful that can make good use of the power of functions that's perfect, but if not you can also implement a demonstration that's a bit more artificial. 32 | 33 | Follow the [tutorial](https://codelabs.developers.google.com/codelabs/cloud-starting-cloudfunctions#0) to set up, implement, and deploy your function. You can choose to implement your function in a wide variety of programming languages and select between different trigger types (for our project, you will most likely want to use an HTTP trigger, since that's the most straight-forward to integrate into ScalyShop). If your function is very simple you can implement it directly in the cloud IDE in the Google dashboard. However, even if you choose to do that, please save your function code (e.g., in a new Git repository) and submit it. Of course you can also edit your function code in a file and simply copy it into the Google dashboard once you are happy. 34 | 35 | Once you have successfully deployed your function you'll need to test that it works appropriately. Depending on how complicated your function is and what types of HTTP requests it expects you can either do this very simply using a browser or a command line tool such as `wget` or `curl`, or use a full-blown API testing tool such as [Postman](http://postman.com/). 36 | 37 | Once your function works as expected, you can integrate it into your ScalyShop frontend. You can use Axios, the same library that the frontend also uses to send HTTP requests to the ScalyShop backend, to send HTTP requests to your function. As usual, first test if everything works locally (even though it's not entirely "locally" any longer), and then push to master to have the CI/CD pipeline we set up in Assignment 3 automatically deploy your changes to production. Then validate that your new feature also works when deployed to your Kubernetes cluster. 38 | 39 | **Create a new project in your GitLab group and commit the source code of your function there. Briefly document in your weekly report what feature you have implemented and how. Also briefly elaborate how you have validated that your cloud function works correctly.** 40 | 41 | ## Extra Work [optional, open pts] 42 | 43 | In every assignment, you are free to further explore the topic. Document in the report if you have done extra work that is not related to any of the tasks above. Depending on the scope of the extra work, we may award points towards a better grade for this extra work. -------------------------------------------------------------------------------- /assignments/Assignment5.md: -------------------------------------------------------------------------------- 1 | # Assignment 5 2 | 3 | In the fifth and last assignment, we will now integrate a Prometheus based monitoring solution. We will also investigate the feature toggling functionality integrated into GitLab. 4 | 5 | **Deadline**: Wednesday Week 7 6 | 7 | **Discussion Session**: Friday Week 7 8 | 9 | **Related Lectures**: Lectures 8, 9, and 10 10 | 11 | **Deliverables:** 12 | * Written report (submitted in Canvas, please use report template in Canvas) 13 | * Updates in GitLab 14 | 15 | ## T5.1 - Setting up Prometheus [basic] 16 | Your first task in this assignment will be to install yet another tool into your Kubernetes cluster, namely a [Prometheus](https://prometheus.io) monitoring solution. The easiest way to do so is to use our cluster management project from T3.3 in assignment 3. Simply open the configuration in the project, and enable Prometheus there, and push your changes. Your cluster management project should now install Prometheus in the cluster. 17 | 18 | After that, we also need to enable a Prometheus integration in the GitLab UI. Navigate to your cluster configuration, select the rider "Integrations", and enable the checkbox "Enable Prometheus integration". GitLab should now be able to automatically assess the health of the cluster and of the applications deployed to the cluster. Find the "Health" tab of your cluster page in GitLab, and observe that you are now also able to track cluster CPU and memory usage directly in GitLab. 19 | 20 | More importantly, Prometheus will also track performance and error rates of our deployed services. Select one of your services (e.g., the backend) and navigate to Operations / Metrics (it's normal if data for some metrics is not available, e.g., we are not using an AWS ELB so no data will be shown in this entry). It plausible that the data looks rather "boring" at the moment (many straight lines, and no errors) - after all, our application is not actually used by anybody, so at the moment there will probably be few requests to show here. 21 | 22 | Experiment a bit with your monitoring solution - check what happens if you use the application. The admin view in the frontend has a few buttons that should help you to experiment with your monitoring: 23 | 24 | - The buttons "Trigger 404 Fault" and "Trigger 500 Fault" ask the backend explicitly to send back an error (a 404 Not Found and 500 Internal Server Error, respectively). 25 | - "Trigger Long-Running Request" can be used to batch-add a large amount of data to the database (to increase memory consumption and page load times). The slider below the button can be used to configure how many entries should be added to the database. 26 | - "Remove Test Data" deletes all the test data that the previous button adds. Deleting a lot of data is a rather expensive operation. 27 | 28 | **Experiment with your monitoring solution (for all your deployed services) and document your findings in your report. Show screenshots of your dashboard, and try to interpret what you see. What is expected, what is unexpected? What happens if you crash your server?** 29 | 30 | ## T5.2 - Setting Up a Custom Dashboard [optional, 10 - 25pts, depending on complexity] 31 | To actually generate the the dashboard we used so far, GitLab uses a system called [Grafana](https://grafana.com). Grafana (and GitLab) of course allow us to customize our monitoring dashboard, or define entirely new dashboards. 32 | 33 | Refer to the GitLab [documentation about custom dashboards](https://docs.gitlab.com/ee/operations/metrics/dashboards/). Duplicate the default dashboard as a starting point (there is a button in the UI that allows you to do this). Remove the panels we are not actually using in your custom dashboard, and add at least one or two custom views that show metrics that the default view does not display. 34 | 35 | **Document your custom dashboard in your report - include both, the necessary YAML code to define it and some screenshots of the custom dashbord in action.** 36 | 37 | ## T5.3 - Exploring Canary Releases with GitLab [basic] 38 | So far, whenever we pushed a change to our master branch, the change immediately got rolled out to all customers. This is often not the behavior we actually want for real applications. Instead, we often want to be able to "test" a change first on a subset of requests before exposing it to everybody. Let's implement this idea for the frontend service. 39 | 40 | Our existing CD pipeline can be extended quite easily in this manner. Refer to the GitLab [documentation about customizing AutoDevOps](https://docs.gitlab.com/ee/topics/autodevops/customize.html). If you have not done so previously, now is the time where you need to create a `.gitlab-ci.yml` file in the root of the frontend project, as this is the place where we customize our deployment pipeline. 41 | 42 | Configure an incremental rollout with 10 replicas by editing your `.gitlab-ci.yml` file. You can choose either a _timed_ or a _manual_ rollout strategy, and it's up to you how fine-grained the rollout should be (GitLab's default is to create 10%, 25%, 50%, and 100% deployments, which is probably not a bad starting point). Push your changes to master, and observe in the "CI / CD" rider of the GitLab UI how deployments now behave. Also check in "Operations / Environments" if you can see your canaries in action. 43 | 44 | **Briefly document (e.g., using screenshots) your working CD pipeline using canary releases. Show your pipeline and also your active environments during different stages of the rollout. Validate that your application still works. If you want to _really_ see your canary in action, make a small change to the frontend (e.g., change the color of an element), push the change, roll it out to 50% of your pods, and test by reloading (or using different browsers) if you can see both versions of the application at different times.** 45 | 46 | ## T5.4 - Experimenting with Feature Flags in the Backend [optional, 20pts] 47 | Canary releases are useful if we want to test a risky change first on a subset of users. A related concept are feature toggles (also known as feature flags), which are source code level constructs (e.g., new features or UI changes) that can be turned on and off from GitLab (without redeploying the application). In this task we are going to experiment with feature toggling in our backend service. 48 | 49 | GitLab implements feature toggling through support for the [Unleash library](https://github.com/Unleash/unleash), which is available for many different programming languages (including Javascript / Node.js, which we use to build our backend service). Inspect the [Getting Started guide](https://github.com/Unleash/unleash-client-node) for the Node.js Unleash client, and also check out [GitLab's documentation](https://docs.gitlab.com/ee/operations/feature_flags.html) on the topic. 50 | 51 | Now add at least one simple feature toggle to your backend implementation as a demonstration (the easiest demonstration is probably to simply change the message that is being returned by the endpoint `/api/serverstatus` in `app.js`). The right version of the Unleash library is already added to the backend's dependencies in `package.json`, but you will need to initialize the Unleash client correctly in `app.js` (the [documentation](https://docs.gitlab.com/ee/operations/feature_flags.html#get-access-credentials) will tell you how to retrieve the correct configuration to use for your project). Once the Unleash client is set up correctly, you can create one or multiple feature toggles in your code, and enable / disable them at runtime through the GitLab UI (under "Operations / Feature Flags"). 52 | 53 | **Push your code changes to your master project, and validate that your feature toggle(s) actually work. Document briefly what code changes you had to do, and add screenshots that demonstrate your feature toggles at work.** 54 | 55 | ## T5.5 - Doing a Small Resilience Experiment [optional, 15pts] 56 | We can now to a small resilience experiment in the spirit of chaos engineering. Specifically, we want to check if our frontend (which should be replicated across 10 Kubernetes pods, after the work we did in task T5.3) is resilient towards a single pod stopping to respond. 57 | 58 | The backend project already has one endpoint that is explicitly designed to crash the server by starting an endless loop (`/api/crash`, see `app.js` lines 78 to 83). Add a button to the frontend (e.g., in the Admin view) that explicitly invokes this endpoint for testing purposes, test it locally, and then push your change to production. Remember that with the canary release changes we did in task T5.3 you now need to manually trigger a rollout to 100% of your pods in the GitLab UI. 59 | 60 | Now formulate a hypothesis what will happen in production when you press this button. Test your hypothesis (that is, actually press the button) and validate what actually happens in your system. Is your system resilient towards this outage? Does your system eventually recover on its own? If so, how long does it take to recover? 61 | 62 | **Document your findings in your report. Specifically reflect on the impact that the outage of a single pod has on your customers, if your system can recover without manual intervention, and how long it takes to recover.** 63 | 64 | ## T5.6 - Planning an A/B Experiment [optional, open pts] 65 | 66 | As a final optional task for the project, we can now at least think about how we would conduct an A/B test in our setup. Which parts of our setup would we need to use, and how could you set it up? 67 | 68 | Can you do at least a proof-of-concept of an A/B experiment in the system you have implemented? 69 | 70 | **Describe in your report how you could do an A/B test using our experiments, and how you would set it up technically. Specifically focus on the technical implementation (e.g., which parts of our solution could we re-use, and how - and what new technologies or features we would need). If you have done a proof-of-concept implementation then describe it and also provide some screenshots and reflections.** 71 | 72 | ## Extra Work [optional, open pts] 73 | 74 | In every assignment, you are free to further explore the topic. Document in the report if you have done extra work that is not related to any of the tasks above. Depending on the scope of the extra work, we may award points towards a better grade for this extra work. -------------------------------------------------------------------------------- /lecture_slides/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/.DS_Store -------------------------------------------------------------------------------- /lecture_slides/keynote/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/keynote/.DS_Store -------------------------------------------------------------------------------- /lecture_slides/keynote/01_intro.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/keynote/01_intro.key -------------------------------------------------------------------------------- /lecture_slides/keynote/02_webdev.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/keynote/02_webdev.key -------------------------------------------------------------------------------- /lecture_slides/keynote/03_cloudcomputing.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/keynote/03_cloudcomputing.key -------------------------------------------------------------------------------- /lecture_slides/keynote/04_kubernetes.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/keynote/04_kubernetes.key -------------------------------------------------------------------------------- /lecture_slides/keynote/05_devops.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/keynote/05_devops.key -------------------------------------------------------------------------------- /lecture_slides/keynote/07_techniques.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/keynote/07_techniques.key -------------------------------------------------------------------------------- /lecture_slides/keynote/08_monitoring.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/keynote/08_monitoring.key -------------------------------------------------------------------------------- /lecture_slides/keynote/09_productiontesting.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/keynote/09_productiontesting.key -------------------------------------------------------------------------------- /lecture_slides/pdf/01_intro.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/pdf/01_intro.pdf -------------------------------------------------------------------------------- /lecture_slides/pdf/02_webdev.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/pdf/02_webdev.pdf -------------------------------------------------------------------------------- /lecture_slides/pdf/03_cloudcomputing.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/pdf/03_cloudcomputing.pdf -------------------------------------------------------------------------------- /lecture_slides/pdf/04_kubernetes.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/pdf/04_kubernetes.pdf -------------------------------------------------------------------------------- /lecture_slides/pdf/05_devops.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/pdf/05_devops.pdf -------------------------------------------------------------------------------- /lecture_slides/pdf/07_techniques.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/pdf/07_techniques.pdf -------------------------------------------------------------------------------- /lecture_slides/pdf/08_monitoring.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/pdf/08_monitoring.pdf -------------------------------------------------------------------------------- /lecture_slides/pdf/09_productiontesting.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/lecture_slides/pdf/09_productiontesting.pdf -------------------------------------------------------------------------------- /project_templates/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/project_templates/.DS_Store -------------------------------------------------------------------------------- /project_templates/clustermanagement/.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | include: 2 | - template: Managed-Cluster-Applications.gitlab-ci.yml -------------------------------------------------------------------------------- /project_templates/clustermanagement/.gitlab/managed-apps/cert-manager/values.yaml: -------------------------------------------------------------------------------- 1 | # See https://hub.helm.sh/charts/jetstack/cert-manager for available values 2 | -------------------------------------------------------------------------------- /project_templates/clustermanagement/.gitlab/managed-apps/cilium/values.yaml: -------------------------------------------------------------------------------- 1 | # See https://gitlab.com/gitlab-org/defend/cilium/-/blob/v1.6.6-backport/install/kubernetes/cilium/values.yaml for available values 2 | -------------------------------------------------------------------------------- /project_templates/clustermanagement/.gitlab/managed-apps/config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | certManager: 3 | installed: false 4 | letsEncryptClusterIssuer: 5 | installed: false 6 | cilium: 7 | installed: false 8 | crossplane: 9 | installed: false 10 | elasticStack: 11 | installed: false 12 | gitlabRunner: 13 | installed: false 14 | ingress: 15 | installed: false 16 | jupyterhub: 17 | installed: false 18 | prometheus: 19 | installed: false 20 | sentry: 21 | installed: false 22 | vault: 23 | installed: false 24 | -------------------------------------------------------------------------------- /project_templates/clustermanagement/.gitlab/managed-apps/crossplane/values.yaml: -------------------------------------------------------------------------------- 1 | # See https://github.com/crossplane/crossplane/blob/master/cluster/charts/crossplane/values.yaml.tmpl for available values 2 | -------------------------------------------------------------------------------- /project_templates/clustermanagement/.gitlab/managed-apps/elastic-stack/values.yaml: -------------------------------------------------------------------------------- 1 | # See https://github.com/helm/charts/blob/master/stable/elastic-stack/values.yaml for available values 2 | -------------------------------------------------------------------------------- /project_templates/clustermanagement/.gitlab/managed-apps/gitlab-runner/values.yaml: -------------------------------------------------------------------------------- 1 | # See https://gitlab.com/gitlab-org/charts/gitlab-runner for available values 2 | -------------------------------------------------------------------------------- /project_templates/clustermanagement/.gitlab/managed-apps/ingress/values.yaml: -------------------------------------------------------------------------------- 1 | # See https://github.com/helm/charts/tree/master/stable/nginx-ingress#configuration for available values 2 | -------------------------------------------------------------------------------- /project_templates/clustermanagement/.gitlab/managed-apps/jupyterhub/values.yaml: -------------------------------------------------------------------------------- 1 | # See https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/jupyterhub/values.yaml for available values 2 | -------------------------------------------------------------------------------- /project_templates/clustermanagement/.gitlab/managed-apps/prometheus/values.yaml: -------------------------------------------------------------------------------- 1 | # See https://github.com/helm/charts/blob/master/stable/prometheus/values.yaml for available values 2 | -------------------------------------------------------------------------------- /project_templates/clustermanagement/.gitlab/managed-apps/sentry/values.yaml: -------------------------------------------------------------------------------- 1 | # See https://github.com/helm/charts/tree/master/stable/sentry#configuration for available options 2 | 3 | # Admin user to create 4 | # user: 5 | # Indicated to create the admin user or not, 6 | # Default is true as the initial installation. 7 | # create: true 8 | # email: "" 9 | # password: "" 10 | 11 | # email: 12 | # from_address: "" 13 | # host: smtp 14 | # port: 25 15 | # use_tls: false 16 | # user: "" 17 | # password: "" 18 | # enable_replies: false 19 | 20 | # ingress: 21 | # enabled: true 22 | # hostname: "" 23 | 24 | # Needs to be here between runs. 25 | # See https://github.com/helm/charts/tree/master/stable/postgresql#upgrade for more info 26 | # postgresql: 27 | # postgresqlPassword: example-postgresql-password 28 | -------------------------------------------------------------------------------- /project_templates/clustermanagement/.gitlab/managed-apps/vault/values.yaml: -------------------------------------------------------------------------------- 1 | # See https://github.com/hashicorp/vault-helm/blob/master/values.yaml for available values 2 | -------------------------------------------------------------------------------- /project_templates/clustermanagement/README.md: -------------------------------------------------------------------------------- 1 | ### Cluster management template project 2 | 3 | Example [cluster management](https://docs.gitlab.com/ee/user/clusters/management_project.html) project. 4 | 5 | This project is based on a GitLab [Project Template](https://docs.gitlab.com/ee/gitlab-basics/create-project.html). 6 | 7 | Improvements can be proposed in the [original project](https://gitlab.com/gitlab-org/project-templates/cluster-management). 8 | -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/README.md: -------------------------------------------------------------------------------- 1 | # ScalyShop Backend 2 | 3 | Scaly Shop is a three-tier Web application built using [Vue.js](https://vuejs.org/) in combination with [BootstrapVue](https://bootstrap-vue.org) on the frontend and [Express](https://expressjs.com) on the backend that provides the basic infrastructure for a JSON API with MongoDB persistency with [Mongoose](https://mongoosejs.com/). 4 | 5 | ## Backend Structure 6 | 7 | | File | Purpose | What you do? | 8 | | ------------- | ------------- | ----- | 9 | | [README.md](./README.md) | Everything about the backend server | **READ ME** carefully! | 10 | | [app.js](./app.js) | JavaScript entry point for Express application | Import new routes/controllers | 11 | | [controllers/orders.js](controllers/orders.js) and [controllers/products.js](controllers/products.js) | Implementation of Express endpoints | Define new route handlers | 12 | | [models/order.js](models/order.js) and [models/product.js](models/product.js) | [Mongoose](https://mongoosejs.com/) models | Define data schema | 13 | | [tests/server.postman_collection.json](tests/server.postman_collection.json) | [Postman test scripts](https://learning.postman.com/docs/postman/scripts/test-scripts/) | - | 14 | | [stress_data/stress_test_product.json](stress_data/stress_test_product.json) | Product test example for stress tests | — | 15 | | [dummy_data/](dummy_data/) | Product test data | — | 16 | | [package.json](package.json) | Project meta-information | — | 17 | 18 | ## Requirements 19 | 20 | * [Node.js](https://nodejs.org/en/download/) (v12) => installation instructions for [Linux](https://github.com/nodesource/distributions), use installers for macOS and Windows (don't forget to restart your Bash shell) 21 | * [MongoDB](https://www.mongodb.com/download-center/community?jmp=nav) (v4) must be running locally on port 27017 => installation instructions for [macOS](https://github.com/joe4dev/dit032-setup/blob/master/macOS.md#mongodb), [Windows](https://github.com/joe4dev/dit032-setup/blob/master/Windows.md#mongodb), [Linux](https://github.com/joe4dev/dit032-setup/blob/master/Linux.md#mongodb) 22 | * [Docker](https://www.docker.com) => installation instructions for [macOS](https://docs.docker.com/desktop/mac/install/), [Linux](https://docs.docker.com/engine/install/) and [Windows](https://docs.docker.com/desktop/windows/install/) 23 | 24 | ## Project setup 25 | 26 | Make sure, you are in the backend directory `cd scalyshop-backend` 27 | 28 | Installs all project dependencies specified in [package.json](./package.json). 29 | 30 | ```bash 31 | npm install 32 | ``` 33 | 34 | ## Start the server with auto-restarts for development 35 | 36 | Automatically restarts your server if you save any changes to local files. 37 | 38 | ```bash 39 | npm run dev 40 | ``` 41 | 42 | ## Start the server 43 | ```bash 44 | npm start 45 | ``` 46 | 47 | ## MongoDB Setup 48 | #### Run Mongo Daemon in the background 49 | ```bash 50 | mongod 51 | ``` 52 | #### Connect to Mongo Shell 53 | ```bash 54 | mongo # On Windows 55 | mongosh # On MacOS 56 | ``` 57 | #### Create a New Database with Authentication 58 | ```bash 59 | use scalyDB 60 | db.createUser({user:"<>", pwd:"<>", roles:["readWrite"]}) 61 | ``` 62 | #### Connect to Mongo Shell with authentication 63 | ```bash 64 | mongo -u <> -p <> scalyDB # On Windows 65 | mongo -u <> -p <> scalyDB # On MacOS 66 | ``` 67 | ## Debugging with VSCode 68 | 69 | Set a breakpoint and click *Debug > Start Debugging* 70 | 71 | > Learn more in the [VSCode Debugging Docs](https://code.visualstudio.com/docs/editor/debugging). 72 | -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/app.js: -------------------------------------------------------------------------------- 1 | var express = require('express'); 2 | var bodyParser = require('body-parser'); 3 | var mongoose = require('mongoose'); 4 | var morgan = require('morgan'); 5 | var path = require('path'); 6 | var cors = require('cors'); 7 | var history = require('connect-history-api-fallback'); 8 | 9 | var productsController = require('./controllers/products'); 10 | var ordersController = require('./controllers/orders'); 11 | 12 | // Variables 13 | var mongoHost = process.env.MONGODB_HOST || 'localhost'; 14 | var mongoDB = process.env.MONGODB_DB || 'scalyDB'; 15 | var mongoPort = process.env.MONGODB_PORT || '27017'; 16 | var mongoUser = process.env.MONGODB_USER || undefined; 17 | var mongoPW = process.env.MONGODB_PW || undefined; 18 | var port = process.env.PORT || 5045; 19 | 20 | // Connect to MongoDB 21 | // Connection string format: mongodb://root:hugo@localhost:27017/scalyDB 22 | // (or alternatively, without authentication: mongodb://localhost:27017/scalyDB) 23 | if(mongoUser) { 24 | var mongoUri = "mongodb://"+mongoUser+":"+mongoPW+"@"+mongoHost+":"+mongoPort+"/"+mongoDB 25 | } else { 26 | var mongoUri = "mongodb://"+mongoHost+":"+mongoPort+"/"+mongoDB 27 | } 28 | 29 | console.log("Trying to connect to "+mongoUri) 30 | mongoose.connect(mongoUri, { useNewUrlParser: true }, function(err) { 31 | if (err) { 32 | console.error(`Failed to connect to MongoDB with URI: ${mongoUri}`); 33 | console.error(err.stack); 34 | process.exit(1); 35 | } 36 | console.log(`Connected to MongoDB with URI: ${mongoUri}`); 37 | }); 38 | 39 | // Create Express app 40 | var app = express(); 41 | // Parse requests of content-type 'application/json' 42 | app.use(bodyParser.json()); 43 | // HTTP request logger 44 | app.use(morgan('dev')); 45 | // Enable cross-origin resource sharing for frontend must be registered before api 46 | app.options('*', cors()); 47 | app.use(cors()); 48 | 49 | // these are some testing routes that may come in handy during the project 50 | 51 | app.get('/', function(req, res) { 52 | res.json({'message': 'OK'}); 53 | }); 54 | 55 | app.get('/api/serverstatus', function(req, res) { 56 | res.json({'message': 'Your server appears to be live and well.'}); 57 | }); 58 | 59 | // return an error (on purpose, for monitoring testing) 60 | app.post('/api/error', function(req, res, next) { 61 | var errorcode = req.query.statuscode || 404; 62 | console.log("Generating error on purpose: "+errorcode); 63 | res.status(errorcode).json({'Error': true}); 64 | }); 65 | 66 | // go into an endless loop (this will block the entire server) 67 | app.post('/api/crash', function(req, res, next) { 68 | console.log("Crash server via an endless loop"); 69 | for(;;); 70 | }); 71 | 72 | app.use(productsController); 73 | app.use(ordersController); 74 | 75 | // Catch all non-error handler for api (i.e., 404 Not Found) 76 | app.use('/api/*', function (req, res) { 77 | res.status(404).json({ 'message': 'Not Found' }); 78 | }); 79 | 80 | // Configuration for serving frontend in production mode 81 | // Support Vuejs HTML 5 history mode 82 | app.use(history()); 83 | // Serve static assets 84 | var root = path.normalize(__dirname + '/..'); 85 | var client = path.join(root, 'client', 'dist'); 86 | app.use(express.static(client)); 87 | 88 | // Error handler (i.e., when exception is thrown) must be registered last 89 | var env = app.get('env'); 90 | // eslint-disable-next-line no-unused-vars 91 | app.use(function(err, req, res, next) { 92 | console.error(err.stack); 93 | var err_res = { 94 | 'message': err.message, 95 | 'error': {} 96 | }; 97 | if (env === 'development') { 98 | err_res['error'] = err; 99 | } 100 | res.status(err.status || 30); 101 | res.json(err_res); 102 | }); 103 | 104 | app.listen(port, function(err) { 105 | if (err) throw err; 106 | console.log(`Express server listening on port ${port}, in ${env} mode`); 107 | console.log(`Backend: http://localhost:${port}/api/`); 108 | console.log(`Frontend (production): http://localhost:${port}/`); 109 | }); 110 | 111 | module.exports = app; -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/controllers/orders.js: -------------------------------------------------------------------------------- 1 | var express = require('express'); 2 | var glob = require("glob"); 3 | var fs = require("fs"); 4 | var Order = require('../models/order'); 5 | 6 | var router = express.Router(); 7 | 8 | // Return all orders 9 | router.get('/api/orders', function(req, res, next) { 10 | Order.find(function(err, orders) { 11 | if (err) { return next(err); } 12 | res.json({'orders': orders}); 13 | }); 14 | }); 15 | 16 | // Return specific order by ID 17 | router.get('/api/orders/:id', function(req, res, next) { 18 | var id = req.params.id; 19 | Order.findById(id, function(err, order) { 20 | if (err) { return next(err); } 21 | if (order === null) { 22 | return res.status(404).json({'message': 'Order with id ${id} not found'}); 23 | } 24 | res.json(order); 25 | }); 26 | }); 27 | 28 | // Delete all orders 29 | router.delete('/api/orders', function(req, res, next) { 30 | Order.deleteMany({}) 31 | .catch(function(error){ 32 | console.log(error); 33 | return res.status(500).json({'message': 'Error while clearing database: '+error}); 34 | }); 35 | return res.status(200).json({'message': 'ok'}); 36 | }); 37 | 38 | // Delete an order given an ID 39 | router.delete('/api/orders/:id', function(req, res, next) { 40 | var id = req.params.id; 41 | Order.findOneAndDelete({_id: id}, function(err, order) { 42 | if (err) { return next(err); } 43 | if (order === null) { 44 | return res.status(404).json({'message': 'Order not found'}); 45 | } 46 | res.json(order); 47 | }); 48 | }); 49 | 50 | // Add a new order 51 | router.post('/api/orders', function(req, res, next) { 52 | var neworder = new Order(req.body); 53 | neworder.save(function (error) { 54 | if (error) { 55 | console.log('Error storing object: '+error); 56 | return res.status(400).json({'message': 'Error storing object: '+error}); 57 | } 58 | }); 59 | return res.status(201).json(neworder); 60 | }); 61 | 62 | // Update an order given an ID 63 | router.patch('/api/orders/:id', function(req, res, next) { 64 | var id = req.params.id; 65 | Order.findById(id, function(err, order){ 66 | if(err) { return next(err); } 67 | if(order == null) { 68 | return res.status(404).json({'message': 'Order with id ${id} not found'}); 69 | } 70 | order.orderRef = (req.body.orderRef || order.orderRef); 71 | order.totalPrice = (req.body.totalPrice || order.totalPrice); 72 | order.orderStatus = (req.body.orderStatus || order.orderStatus); 73 | order.productsList = (req.body.productsList || order.productsList); 74 | order.save(); 75 | res.json(order); 76 | }); 77 | }); 78 | 79 | module.exports = router; 80 | -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/controllers/products.js: -------------------------------------------------------------------------------- 1 | var express = require('express'); 2 | var glob = require("glob"); 3 | var fs = require("fs"); 4 | var Product = require('../models/product'); 5 | 6 | var router = express.Router(); 7 | 8 | const JSON_DUMMY_FILES='dummy_data'; 9 | const JSON_STRESS_FILES='stress_data'; 10 | 11 | // Return all products 12 | router.get('/api/products', function(req, res, next) { 13 | Product.find(function(err, products) { 14 | if (err) { return next(err); } 15 | res.json({'products': products}); 16 | }); 17 | }); 18 | 19 | // Return specific product by ID 20 | router.get('/api/products/:id', function(req, res, next) { 21 | var id = req.params.id; 22 | Product.findById(id, function(err, product) { 23 | if (err) { return next(err); } 24 | if (product === null) { 25 | return res.status(404).json({'message': 'Product with id ${id} not found'}); 26 | } 27 | res.json(product); 28 | }); 29 | }); 30 | 31 | // Add a new product 32 | router.post('/api/products', function(req, res, next) { 33 | var newproduct = new Product(req.body); 34 | newproduct.save(function (error) { 35 | if (error) { 36 | console.log('Error storing object: '+error); 37 | return res.status(400).json({'message': 'Error storing object: '+error}); 38 | } 39 | }); 40 | return res.status(201).json(newproduct); 41 | }); 42 | 43 | // Update a product given an ID 44 | router.patch('/api/products/:id', function(req, res, next) { 45 | var id = req.params.id; 46 | Product.findById(id, function(err, product){ 47 | if(err) { return next(err); } 48 | if(product == null) { 49 | return res.status(404).json({'message': 'Product with id ${id} not found'}); 50 | } 51 | product.name = (req.body.name || product.name); 52 | product.category = (req.body.category || product.category); 53 | product.price = (req.body.price || product.price); 54 | product.nrReserved = (req.body.nrReserved || product.nrReserved); 55 | product.nrOrdered = (req.body.nrOrdered || product.nrOrdered); 56 | product.save().then(() => res.json(product)) 57 | // res.json(product); 58 | }); 59 | }); 60 | 61 | // Delete all products 62 | router.delete('/api/products', function(req, res, next) { 63 | Product.deleteMany({}) 64 | .catch(function(error){ 65 | console.log(error); 66 | return res.status(500).json({'message': 'Error while clearing database: '+error}); 67 | }); 68 | return res.status(200).json({'message': 'ok'}); 69 | }); 70 | 71 | // Delete a product given an ID 72 | router.delete('/api/products/:id', function(req, res, next) { 73 | var id = req.params.id; 74 | Product.findOneAndDelete({_id: id}, function(err, product) { 75 | if (err) { return next(err); } 76 | if (product === null) { 77 | return res.status(404).json({'message': 'Product not found'}); 78 | } 79 | res.json(product); 80 | }); 81 | }); 82 | 83 | // Batch-add some predefined products 84 | router.post('/api/products/testdata', function(req, res, next) { 85 | var newproducts = []; 86 | // load JSON files from dir 87 | glob.sync('*.json', {cwd : JSON_DUMMY_FILES}).map(f => { 88 | var json_string = fs.readFileSync(JSON_DUMMY_FILES+'/'+f); 89 | // uses the Mongoose magic to create a new product DTO from JSON data 90 | var raw = JSON.parse(json_string); 91 | var product = new Product(raw); 92 | product.save(function(err) { 93 | if(err) { console.log('Error storing object: '+err) } 94 | }); 95 | newproducts.push(product); 96 | }); 97 | res.status(201).json({'products': newproducts}); 98 | }); 99 | 100 | // Batch-add a bunch of products (generate a non-blocking long running request) 101 | router.post('/api/products/stress', function(req, res, next) { 102 | var loadparam = req.query.loadparam || 100; 103 | console.log('Starting stress test'); 104 | var raw; 105 | // load one example JSON file 106 | glob.sync('*.json', {cwd : JSON_STRESS_FILES}).map(f => { 107 | var json_string = fs.readFileSync(JSON_STRESS_FILES + '/' + f); 108 | raw = JSON.parse(json_string); 109 | }); 110 | console.log(raw); 111 | console.log('Load: ' + loadparam); 112 | // create X copies of this object 113 | var products = []; 114 | for(var i = 0; i < loadparam; i++) { 115 | console.log('Saving product ' + i); 116 | var product = new Product(raw); 117 | product.save(function(err) { 118 | if(err) { console.log('Error storing object: ' + err) } 119 | }); 120 | products[i] = product; 121 | console.log(products[i]); 122 | } 123 | }); 124 | 125 | // Batch-remove stress test data 126 | router.post('/api/products/unstress', function(req, res, next) { 127 | var raw; 128 | // load one example JSON file 129 | glob.sync('*.json', {cwd : JSON_STRESS_FILES}).map(f => { 130 | var json_string = fs.readFileSync(JSON_STRESS_FILES + '/' + f); 131 | raw = JSON.parse(json_string); 132 | }); 133 | // remove the objects again 134 | Product.deleteMany({name: raw.name}, function(err, product) { 135 | if (err) { return next(err); } 136 | }); 137 | }); 138 | 139 | module.exports = router; 140 | -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/dummy_data/product1.json: -------------------------------------------------------------------------------- 1 | { 2 | "name" : "Minecraft", 3 | "category" : "Sandbox", 4 | "price" : 14.99, 5 | "nrOrdered" : 0, 6 | "nrReserved" : 0 7 | } -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/dummy_data/product2.json: -------------------------------------------------------------------------------- 1 | { 2 | "name" : "Assassin’s Creed", 3 | "category" : "Action-adventure", 4 | "price" : 29.99, 5 | "nrOrdered" : 0, 6 | "nrReserved" : 0 7 | } -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/dummy_data/product3.json: -------------------------------------------------------------------------------- 1 | { 2 | "name" : "Super Mario Bros", 3 | "category" : "Platformer", 4 | "price" : 11.50, 5 | "nrOrdered" : 0, 6 | "nrReserved" : 0 7 | } -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/dummy_data/product4.json: -------------------------------------------------------------------------------- 1 | { 2 | "name" : "Need For Speed", 3 | "category" : "Vehicle simulation", 4 | "price" : 31.29, 5 | "nrOrdered" : 0, 6 | "nrReserved" : 0 7 | } -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/dummy_data/product5.json: -------------------------------------------------------------------------------- 1 | { 2 | "name" : "Mario Kart", 3 | "category" : "Vehicle simulation", 4 | "price" : 12.85, 5 | "nrOrdered" : 0, 6 | "nrReserved" : 0 7 | } -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/models/order.js: -------------------------------------------------------------------------------- 1 | var mongoose = require('mongoose'); 2 | var Schema = mongoose.Schema; 3 | 4 | var orderSchema = new Schema({ 5 | orderRef : { type: String }, 6 | totalPrice : { type: Number }, 7 | productsList : [ 8 | { 9 | type: String, 10 | ref: "Product" 11 | } 12 | ], 13 | orderStatus : { type: String } 14 | }); 15 | 16 | module.exports = mongoose.model('orders', orderSchema); 17 | 18 | -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/models/product.js: -------------------------------------------------------------------------------- 1 | var mongoose = require('mongoose'); 2 | var Schema = mongoose.Schema; 3 | 4 | var productSchema = new Schema({ 5 | name : { type: String }, 6 | category : { type: String }, 7 | price : { type: Number }, 8 | nrReserved: { type: Number }, 9 | nrOrdered: { type: Number } 10 | }); 11 | 12 | module.exports = mongoose.model('products', productSchema); 13 | -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "backend", 3 | "version": "1.0.0", 4 | "engines": { 5 | "node": "14.x" 6 | }, 7 | "private": true, 8 | "description": "Example Application for ExpressJS API with Mongoose for Architectures for Scale-Out Systems (DAT490)", 9 | "main": "./app.js", 10 | "scripts": { 11 | "start": "node ./app.js", 12 | "dev": "nodemon ./app.js", 13 | "server-test": "cross-env-shell MONGODB_URI=mongodb://localhost:27017/serverTestDB MONGODB_DB=serverTestDB \"npm run newman-server\"", 14 | "ci-test": "npm run newman-server", 15 | "newman-server": "cross-env-shell PORT=5046 \"npm run dropdb && run-p --race start newman-wait\"", 16 | "newman-wait": "wait-on http://localhost:5001 && npm run newman", 17 | "newman": "newman run ./tests/ScalyShop.postman_collection.json --env-var host=http://localhost:5001", 18 | "dropdb": "node ./tests/dropdb.js --env-var mongoURI=mongodb://localhost:27017/serverTestDB" 19 | }, 20 | "repository": { 21 | "type": "git", 22 | "url": "git+https://git.chalmers.se/courses/dat490/group-00-backend.git" 23 | }, 24 | "dependencies": { 25 | "body-parser": "^1.19.0", 26 | "connect-history-api-fallback": "^1.6.0", 27 | "cors": "^2.8.5", 28 | "express": "^4.17.1", 29 | "glob": "^7.1.7", 30 | "mongoose": "^5.13.9", 31 | "morgan": "^1.10.0", 32 | "unleash-client": "^3.10.1" 33 | }, 34 | "devDependencies": { 35 | "cross-env": "^7.0.2", 36 | "newman": "^5.1.2", 37 | "nodemon": "^2.0.13", 38 | "npm-run-all": "^4.1.5", 39 | "wait-on": "^5.2.0", 40 | "postman-collection": "v4.1.0" 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/stress_data/stress_test_product.json: -------------------------------------------------------------------------------- 1 | { 2 | "name" : "Stress Test Example", 3 | "category" : "Sandbox", 4 | "price" : 14.99, 5 | "nrOrdered" : 0, 6 | "nrReserved" : 0 7 | } -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/tests/ScalyShop.postman_collection.json: -------------------------------------------------------------------------------- 1 | { 2 | "info": { 3 | "_postman_id": "90268579-fa88-4d72-90d6-bc9e9f01e814", 4 | "name": "ScalyShop", 5 | "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" 6 | }, 7 | "item": [ 8 | { 9 | "name": "getBackendStatus", 10 | "request": { 11 | "method": "GET", 12 | "header": [], 13 | "url": { 14 | "raw": "{{host}}", 15 | "host": [ 16 | "{{host}}" 17 | ] 18 | } 19 | }, 20 | "response": [] 21 | }, 22 | { 23 | "name": "getProds", 24 | "event": [ 25 | { 26 | "listen": "test", 27 | "script": { 28 | "exec": [ 29 | "" 30 | ], 31 | "type": "text/javascript" 32 | } 33 | } 34 | ], 35 | "request": { 36 | "method": "GET", 37 | "header": [], 38 | "url": { 39 | "raw": "{{host}}/api/products", 40 | "host": [ 41 | "{{host}}" 42 | ], 43 | "path": [ 44 | "api", 45 | "products" 46 | ] 47 | } 48 | }, 49 | "response": [] 50 | }, 51 | { 52 | "name": "getOrders", 53 | "request": { 54 | "method": "GET", 55 | "header": [], 56 | "url": { 57 | "raw": "{{host}}/api/orders", 58 | "host": [ 59 | "{{host}}" 60 | ], 61 | "path": [ 62 | "api", 63 | "orders" 64 | ] 65 | } 66 | }, 67 | "response": [] 68 | } 69 | ], 70 | "event": [ 71 | { 72 | "listen": "prerequest", 73 | "script": { 74 | "type": "text/javascript", 75 | "exec": [ 76 | "" 77 | ] 78 | } 79 | }, 80 | { 81 | "listen": "test", 82 | "script": { 83 | "type": "text/javascript", 84 | "exec": [ 85 | "" 86 | ] 87 | } 88 | } 89 | ], 90 | "variable": [ 91 | { 92 | "key": "host", 93 | "value": "http://localhost:5045" 94 | } 95 | ] 96 | } -------------------------------------------------------------------------------- /project_templates/scalyshop-backend/tests/dropdb.js: -------------------------------------------------------------------------------- 1 | var mongoose = require('mongoose'); 2 | 3 | // Variables 4 | var mongoURI = process.env.MONGODB_URI; 5 | 6 | if (!mongoURI) { 7 | console.error('Missing MONGODB_URI for dropping test database.'); 8 | process.exit(1); 9 | } 10 | 11 | // Drop database 12 | mongoose.connect(mongoURI, { useNewUrlParser: true }, function (err) { 13 | if (err) { 14 | console.error(`Failed to connect to MongoDB with URI: ${mongoURI}`); 15 | console.error(err.stack); 16 | process.exit(1); 17 | } 18 | mongoose.connection.db.dropDatabase(function () { 19 | console.log(`Dropped database: ${mongoURI}`); 20 | process.exit(0); 21 | }); 22 | }); 23 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/.eslintrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | root: true, 3 | env: { 4 | node: true 5 | }, 6 | 'extends': [ 7 | 'plugin:vue/essential', 8 | '@vue/standard' 9 | ], 10 | rules: { 11 | 'space-before-function-paren': [2, { 'anonymous': 'always', 'named': 'never' }], 12 | 'no-console': 'off', 13 | 'no-debugger': process.env.NODE_ENV === 'production' ? 'error' : 'off' 14 | }, 15 | parserOptions: { 16 | parser: 'babel-eslint' 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/README.md: -------------------------------------------------------------------------------- 1 | # Client – Vue.js Frontend 2 | 3 | This [Vue.js](https://vuejs.org/) template provides sample code how to connect to the ExpressJS backend. 4 | 5 | ## Client Structure 6 | 7 | | File | Purpose | What you do? | 8 | | ------------- | ------------- | ----- | 9 | | [README.md](./README.md) | Everything about the client | **READ ME** carefully! | 10 | | [public/favicon.ico](public/favicon.ico) | [Favicon](https://en.wikipedia.org/wiki/Favicon) website icon | — | 11 | | [public/index.html](public/index.html) | Static HTML entry point page | — | 12 | | [src/](src/) | src (i.e., source code) | All your code goes in here | 13 | | [src/Api.js](src/Api.js) | Configures HTTP library to communicate with backend | — | 14 | | [src/App.vue](src/App.vue) | Main Vue layout template for all view (or pages) | Change your global template for all views | 15 | | [src/assets/](src/assets/) | Graphical resources | Add your images, logos, etc | 16 | | [src/components/](src/components) | Vue components that are reusable LEGO blocks | Add your custom components here | 17 | | [src/views/](src/views) | Vue components that are separate pages/views | Add new routes/pages/views | 18 | | [src/main.js](src/main.js) | Main JavaScript entry point | — | 19 | | [src/router.js](src/router.js) | Vue routes configuration | Register new routes/pages/views | 20 | | [src/views/Home.vue](src/views/Home.vue) | Home page/view | Replace with your home page/view | 21 | | [package.json](package.json) | Project meta-information | —| 22 | | [.eslintrc.js](.eslintrc.js) | ESLINT configuration | —| 23 | | [vue.config.js](vue.config.js) | Vue configuration | — | 24 | 25 | ## Requirements 26 | 27 | * [Server](../server/README.md) backend running on `http://localhost:5000` 28 | * [Node.js](https://nodejs.org/en/download/) (v12) => installation instructions for [Linux](https://github.com/nodesource/distributions) 29 | * [Visual Studio Code (VSCode)](https://code.visualstudio.com/) as IDE 30 | * [Vetur](https://marketplace.visualstudio.com/items?itemName=octref.vetur) plugin for Vue tooling 31 | * [ESLint](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) plugin for linting Vue, JS, and HTML code 32 | * [Debugger for Chrome](https://marketplace.visualstudio.com/items?itemName=msjsdiag.debugger-for-chrome) plugin for debugging 33 | * [Google Chrome](https://www.google.com/chrome/) as web browser 34 | * [Vue.js devtools](https://chrome.google.com/webstore/detail/vuejs-devtools/nhdogjmejiglipccpnnnanhbledajbpd?hl=en) plugin for debugging 35 | 36 | ## Project setup 37 | 38 | Make sure, you are in the client directory `cd scalyshop-frontend` 39 | 40 | Installs all project dependencies specified in [package.json](./package.json). 41 | 42 | ```sh 43 | npm install 44 | ``` 45 | 46 | ### Compiles and hot-reloads for development 47 | 48 | Automatically recompiles and refreshes the browser tab if you save any changes to local files. 49 | 50 | ```sh 51 | npm run serve 52 | ``` 53 | 54 | ### Compiles and minifies for production 55 | 56 | Builds the production-ready website into the `dist` directory. Would be the way to actually bundle the application for hosting in a separate web server such as Apache (which we are not planning to do in this class). 57 | 58 | ```sh 59 | npm run build 60 | ``` 61 | 62 | * [JavaScript Standard Style](https://standardjs.com/rules-en.html) 63 | * [Are Semicolons Necessary in JavaScript? (8' background explanation)](https://youtu.be/gsfbh17Ax9I) 64 | 65 | > The Vue.js community [favors](https://forum.vuejs.org/t/semicolon-less-code-my-thoughts/4229) omitting optional semicolons `;` in Javascript. 66 | 67 | ## Axios HTTP Library 68 | 69 | * [Documentation with Examples](https://github.com/axios/axios#axios) 70 | 71 | ## Bootstrap 4 and BootstrapVue 72 | 73 | * [BootstrapVue Components](https://bootstrap-vue.js.org/docs/components) 74 | * [Layout and Grid System](https://bootstrap-vue.js.org/docs/components/layout/) 75 | * [Link](https://bootstrap-vue.js.org/docs/components/link) 76 | * [Button](https://bootstrap-vue.js.org/docs/components/button) 77 | * [Form](https://bootstrap-vue.js.org/docs/components/form) 78 | * [BootstrapVue Online Playground](https://bootstrap-vue.js.org/play/) 79 | 80 | > Plain [Bootstrap 4](https://getbootstrap.com/) uses a popular JS library called [jQuery](http://jquery.com/) for dynamic components (e.g., dropdowns). However, using jQuery with Vue is [problematic](https://vuejsdevelopers.com/2017/05/20/vue-js-safely-jquery-plugin/) and therefore we use BootstrapVue here. 81 | 82 | ## Debug in VSCode with Chrome 83 | 84 | 1. **[VSCode]** Set a breakpoint in your Javascript code 85 | 2. **[Terminal]** Run `npm run serve` to serve the client 86 | 3. **[VSCode]** Select *Debug > Start Debugging (F5)* to automatically start a debug session in Chrome[1](#1) 87 | 4. **[Chrome]** Browse in Chrome to trigger your breakpoint and the focus will jump back to VSCode 88 | 89 | Find illustrated instructions in the [Vuejs Debug Docs](https://vuejs.org/v2/cookbook/debugging-in-vscode.html). 90 | 91 | 1 Chrome will launch with a separate user profile (not to mess up with your familiar daily Chrome profile) in a temp folder as described in the VSCode [Debugger for Chrome](https://marketplace.visualstudio.com/items?itemName=msjsdiag.debugger-for-chrome). It is recommended to install the [vue-devtools](https://github.com/vuejs/vue-devtools) [Chrome Extension](https://chrome.google.com/webstore/detail/vuejs-devtools/nhdogjmejiglipccpnnnanhbledajbpd) there. 92 | 93 | #### Copyright Disclaimer 94 | ###### The product and basket icons were downloaded from [flaticon.com](https://www.flaticon.com/free-icon/game-console_1083364?term=video%20game&page=1&position=17&page=1&position=17&related_id=1083364&origin=search). -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "client", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "serve": "vue-cli-service serve", 7 | "build": "vue-cli-service build" 8 | }, 9 | "dependencies": { 10 | "axios": "^0.24.0", 11 | "bootstrap": "^4.5.2", 12 | "bootstrap-vue": "^2.16.0", 13 | "vue": "^2.6.12", 14 | "vue-router": "^3.4.3" 15 | }, 16 | "devDependencies": { 17 | "@vue/cli-plugin-eslint": "^4.5.4", 18 | "@vue/cli-service": "^4.5.4", 19 | "@vue/eslint-config-standard": "^5.1.2", 20 | "babel-eslint": "^10.1.0", 21 | "eslint": "^7.7.0", 22 | "eslint-plugin-import": "^2.22.0", 23 | "eslint-plugin-node": "^11.1.0", 24 | "eslint-plugin-promise": "^4.2.1", 25 | "eslint-plugin-standard": "^4.0.1", 26 | "eslint-plugin-vue": "^6.2.2", 27 | "vue-template-compiler": "^2.6.12" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/project_templates/scalyshop-frontend/public/favicon.ico -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | client 9 | 10 | 11 | 14 |
15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/project_templates/scalyshop-frontend/src/.DS_Store -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/Api.js: -------------------------------------------------------------------------------- 1 | import axios from 'axios' 2 | import https from 'https' 3 | 4 | // note that environment variables that should be available in the browser 5 | // need to start with VUE_APP_* 6 | // (see https://cli.vuejs.org/guide/mode-and-env.html#environment-variables) 7 | var backend = process.env.VUE_APP_BACKEND_HOST || 'localhost' 8 | var port = process.env.VUE_APP_BACKEND_PORT || '5045' 9 | var protocol = process.env.VUE_APP_BACKEND_PROTOCOL || 'http' 10 | 11 | // format: http://localhost:5000/api 12 | var backendApiEndpoint = protocol + '://' + backend + ':' + port + '/api' 13 | console.log('Using backend endpoint: ' + backendApiEndpoint) 14 | 15 | export const Api = axios.create({ 16 | baseURL: backendApiEndpoint, 17 | // necessary since we don't have valid https certificates for our backend / frontend 18 | // VERY BAD IDEA IN A REAL APP 19 | httpsAgent: new https.Agent({ 20 | rejectUnauthorized: false 21 | }) 22 | }) 23 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/App.vue: -------------------------------------------------------------------------------- 1 | 14 | 15 | 34 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/assets/addtocart.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/project_templates/scalyshop-frontend/src/assets/addtocart.jpg -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/assets/close.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/project_templates/scalyshop-frontend/src/assets/close.png -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/assets/edit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/project_templates/scalyshop-frontend/src/assets/edit.png -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/assets/game.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/icetlab/devopscourse/7ec84224c1bf938407fb0664ae3c126238cf0ce1/project_templates/scalyshop-frontend/src/assets/game.png -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/components/OrderItem.vue: -------------------------------------------------------------------------------- 1 | 10 | 11 | 17 | 18 | 32 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/components/ProductItem.vue: -------------------------------------------------------------------------------- 1 | 10 | 11 | 18 | 19 | 25 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/main.js: -------------------------------------------------------------------------------- 1 | import Vue from 'vue' 2 | import App from './App.vue' 3 | import router from './router' 4 | import { BootstrapVue, BVToastPlugin } from 'bootstrap-vue' 5 | 6 | import 'bootstrap/dist/css/bootstrap.css' 7 | import 'bootstrap-vue/dist/bootstrap-vue.css' 8 | 9 | Vue.use(BootstrapVue) 10 | Vue.use(BVToastPlugin) 11 | 12 | Vue.config.productionTip = false 13 | 14 | new Vue({ 15 | router, 16 | render: function (h) { return h(App) } 17 | }).$mount('#app') 18 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/router.js: -------------------------------------------------------------------------------- 1 | import Vue from 'vue' 2 | import Router from 'vue-router' 3 | import Home from './views/Home.vue' 4 | import Customer from './views/Customer.vue' 5 | import Basket from './views/Basket.vue' 6 | import Admin from './views/Admin.vue' 7 | import History from './views/History.vue' 8 | 9 | Vue.use(Router) 10 | 11 | export default new Router({ 12 | mode: 'history', 13 | base: process.env.BASE_URL, 14 | routes: [ 15 | { 16 | path: '/', 17 | name: 'home', 18 | component: Home 19 | }, 20 | { 21 | path: '/customer', 22 | name: 'customer', 23 | component: Customer 24 | }, 25 | { 26 | path: '/customer/finalize', 27 | name: 'basket', 28 | component: Basket 29 | }, 30 | { 31 | path: '/customer/orders', 32 | name: 'history', 33 | component: History 34 | }, 35 | { 36 | path: '/admin', 37 | name: 'admin', 38 | component: Admin 39 | } 40 | ] 41 | }) 42 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/views/Admin.vue: -------------------------------------------------------------------------------- 1 | 108 | 109 | 272 | 273 | 274 | 304 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/views/Basket.vue: -------------------------------------------------------------------------------- 1 | 21 | 22 | 138 | 139 | 140 | 150 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/views/Customer.vue: -------------------------------------------------------------------------------- 1 | 28 | 29 | 148 | 149 | 150 | 160 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/views/History.vue: -------------------------------------------------------------------------------- 1 | 11 | 12 | 54 | 55 | 56 | 63 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/src/views/Home.vue: -------------------------------------------------------------------------------- 1 | 25 | 26 | 53 | 54 | 62 | -------------------------------------------------------------------------------- /project_templates/scalyshop-frontend/vue.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | configureWebpack: { 3 | devtool: 'source-map' 4 | }, 5 | devServer: { 6 | disableHostCheck: true, 7 | port: 5046 8 | } 9 | } 10 | --------------------------------------------------------------------------------