├── movie
├── frontend
│ ├── index.html
│ ├── babel.config.js
│ ├── .DS_Store
│ ├── public
│ │ ├── favicon.ico
│ │ └── index.html
│ ├── src
│ │ ├── assets
│ │ │ └── logo.png
│ │ ├── controller
│ │ │ ├── movieapi.js
│ │ │ ├── authController.js
│ │ │ ├── movieController.js
│ │ │ └── api.js
│ │ ├── App.vue
│ │ ├── main.js
│ │ ├── router.js
│ │ ├── components
│ │ │ └── Index.vue
│ │ └── views
│ │ │ ├── Register.vue
│ │ │ ├── Login.vue
│ │ │ ├── MovieInfo.vue
│ │ │ └── Profile.vue
│ ├── README.md
│ └── package.json
└── backend
│ ├── user
│ ├── test.js
│ ├── app.js
│ ├── package.json
│ ├── router.js
│ └── package-lock.json
│ ├── predict.py
│ ├── train.py
│ └── back.py
├── deploy
├── master
│ ├── configs
│ │ ├── master
│ │ ├── slaves
│ │ ├── ssh_config
│ │ ├── yarn-site.xml
│ │ ├── mapred-site.xml
│ │ ├── core-site.xml
│ │ ├── hdfs-site.xml
│ │ ├── log4j.properties
│ │ ├── hive-env.sh
│ │ ├── hadoop-env.sh
│ │ └── spark-env.sh
│ ├── clean_all_containers.sh
│ ├── create_container_master.sh
│ ├── start-all.sh
│ └── create_containers.sh
├── slave1
│ ├── configs
│ │ ├── master
│ │ ├── slaves
│ │ ├── ssh_config
│ │ ├── yarn-site.xml
│ │ ├── mapred-site.xml
│ │ ├── core-site.xml
│ │ ├── hdfs-site.xml
│ │ ├── log4j.properties
│ │ ├── hadoop-env.sh
│ │ └── spark-env.sh
│ ├── start-nginx.sh
│ ├── copypub.sh
│ ├── docker_copypub.sh
│ ├── create_container_slave.sh
│ └── Dockerfile
├── slave2
│ ├── configs
│ │ ├── master
│ │ ├── slaves
│ │ ├── ssh_config
│ │ ├── yarn-site.xml
│ │ ├── mapred-site.xml
│ │ ├── core-site.xml
│ │ ├── hdfs-site.xml
│ │ ├── log4j.properties
│ │ ├── hadoop-env.sh
│ │ └── spark-env.sh
│ ├── copypub.sh
│ ├── docker_copypub.sh
│ ├── create_container_slave.sh
│ └── Dockerfile
├── db
│ ├── start-mysql.sh
│ └── docker-compose.yml
└── web
│ └── create_containers_web.sh
├── .gitignore
├── log
├── frontend
│ ├── public
│ │ ├── robots.txt
│ │ ├── favicon.ico
│ │ ├── img
│ │ │ └── icons
│ │ │ │ ├── favicon-16x16.png
│ │ │ │ ├── favicon-32x32.png
│ │ │ │ ├── mstile-150x150.png
│ │ │ │ ├── apple-touch-icon.png
│ │ │ │ ├── android-chrome-192x192.png
│ │ │ │ ├── android-chrome-512x512.png
│ │ │ │ ├── apple-touch-icon-60x60.png
│ │ │ │ ├── apple-touch-icon-76x76.png
│ │ │ │ ├── apple-touch-icon-120x120.png
│ │ │ │ ├── apple-touch-icon-152x152.png
│ │ │ │ ├── apple-touch-icon-180x180.png
│ │ │ │ ├── msapplication-icon-144x144.png
│ │ │ │ ├── android-chrome-maskable-192x192.png
│ │ │ │ ├── android-chrome-maskable-512x512.png
│ │ │ │ └── safari-pinned-tab.svg
│ │ └── index.html
│ ├── .browserslistrc
│ ├── babel.config.js
│ ├── src
│ │ ├── assets
│ │ │ └── logo.png
│ │ ├── api
│ │ │ ├── api.js
│ │ │ └── controller.js
│ │ ├── store
│ │ │ └── index.js
│ │ ├── router
│ │ │ └── index.js
│ │ ├── main.js
│ │ ├── App.vue
│ │ ├── registerServiceWorker.js
│ │ └── views
│ │ │ └── Home.vue
│ ├── .gitignore
│ ├── .eslintrc.js
│ ├── README.md
│ └── package.json
└── backend
│ └── backend.py
└── README.md
/movie/frontend/index.html:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/deploy/master/configs/master:
--------------------------------------------------------------------------------
1 | master
2 |
--------------------------------------------------------------------------------
/deploy/slave1/configs/master:
--------------------------------------------------------------------------------
1 | master
2 |
--------------------------------------------------------------------------------
/deploy/slave2/configs/master:
--------------------------------------------------------------------------------
1 | master
2 |
--------------------------------------------------------------------------------
/deploy/master/configs/slaves:
--------------------------------------------------------------------------------
1 | slave1
2 | slave2
3 |
--------------------------------------------------------------------------------
/deploy/slave1/configs/slaves:
--------------------------------------------------------------------------------
1 | slave1
2 | slave2
3 |
--------------------------------------------------------------------------------
/deploy/slave2/configs/slaves:
--------------------------------------------------------------------------------
1 | slave1
2 | slave2
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | node_modules
3 | dist
4 | notice.txt
5 |
--------------------------------------------------------------------------------
/log/frontend/public/robots.txt:
--------------------------------------------------------------------------------
1 | User-agent: *
2 | Disallow:
3 |
--------------------------------------------------------------------------------
/log/frontend/.browserslistrc:
--------------------------------------------------------------------------------
1 | > 1%
2 | last 2 versions
3 | not dead
4 |
--------------------------------------------------------------------------------
/deploy/db/start-mysql.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker rm -f $(docker ps -q)
3 | docker-compose up -d
4 |
--------------------------------------------------------------------------------
/movie/frontend/babel.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | presets: [
3 | '@vue/app'
4 | ]
5 | }
6 |
--------------------------------------------------------------------------------
/movie/frontend/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/movie/frontend/.DS_Store
--------------------------------------------------------------------------------
/deploy/slave1/start-nginx.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker run -d --name nn -p 8880:80 --privileged my-nginx:0527
3 |
--------------------------------------------------------------------------------
/log/frontend/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/public/favicon.ico
--------------------------------------------------------------------------------
/log/frontend/babel.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | presets: [
3 | '@vue/cli-plugin-babel/preset'
4 | ]
5 | }
6 |
--------------------------------------------------------------------------------
/log/frontend/src/assets/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/src/assets/logo.png
--------------------------------------------------------------------------------
/movie/frontend/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/movie/frontend/public/favicon.ico
--------------------------------------------------------------------------------
/movie/frontend/src/assets/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/movie/frontend/src/assets/logo.png
--------------------------------------------------------------------------------
/log/frontend/public/img/icons/favicon-16x16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/public/img/icons/favicon-16x16.png
--------------------------------------------------------------------------------
/log/frontend/public/img/icons/favicon-32x32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/public/img/icons/favicon-32x32.png
--------------------------------------------------------------------------------
/log/frontend/public/img/icons/mstile-150x150.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/public/img/icons/mstile-150x150.png
--------------------------------------------------------------------------------
/log/frontend/public/img/icons/apple-touch-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/public/img/icons/apple-touch-icon.png
--------------------------------------------------------------------------------
/deploy/slave1/copypub.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | cat ~/1.pub >> ~/.ssh/authorized_keys
3 | rm ~/1.pub
4 | cat ~/2.pub >> ~/.ssh/authorized_keys
5 | rm ~/2.pub
6 |
7 |
--------------------------------------------------------------------------------
/deploy/slave2/copypub.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | cat ~/1.pub >> ~/.ssh/authorized_keys
3 | rm ~/1.pub
4 | cat ~/2.pub >> ~/.ssh/authorized_keys
5 | rm ~/2.pub
6 |
7 |
--------------------------------------------------------------------------------
/deploy/slave1/docker_copypub.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker cp /root/2.pub slave$1:/root/2.pub
3 | rm -f /root/2.pub
4 | docker exec -i slave$1 bash /root/copypub.sh
5 |
--------------------------------------------------------------------------------
/deploy/slave2/docker_copypub.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker cp /root/2.pub slave$1:/root/2.pub
3 | rm -f /root/2.pub
4 | docker exec -i slave$1 bash /root/copypub.sh
5 |
--------------------------------------------------------------------------------
/log/frontend/public/img/icons/android-chrome-192x192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/public/img/icons/android-chrome-192x192.png
--------------------------------------------------------------------------------
/log/frontend/public/img/icons/android-chrome-512x512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/public/img/icons/android-chrome-512x512.png
--------------------------------------------------------------------------------
/log/frontend/public/img/icons/apple-touch-icon-60x60.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/public/img/icons/apple-touch-icon-60x60.png
--------------------------------------------------------------------------------
/log/frontend/public/img/icons/apple-touch-icon-76x76.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/public/img/icons/apple-touch-icon-76x76.png
--------------------------------------------------------------------------------
/log/frontend/public/img/icons/apple-touch-icon-120x120.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/public/img/icons/apple-touch-icon-120x120.png
--------------------------------------------------------------------------------
/log/frontend/public/img/icons/apple-touch-icon-152x152.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/public/img/icons/apple-touch-icon-152x152.png
--------------------------------------------------------------------------------
/log/frontend/public/img/icons/apple-touch-icon-180x180.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/public/img/icons/apple-touch-icon-180x180.png
--------------------------------------------------------------------------------
/log/frontend/public/img/icons/msapplication-icon-144x144.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/public/img/icons/msapplication-icon-144x144.png
--------------------------------------------------------------------------------
/log/frontend/public/img/icons/android-chrome-maskable-192x192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/public/img/icons/android-chrome-maskable-192x192.png
--------------------------------------------------------------------------------
/log/frontend/public/img/icons/android-chrome-maskable-512x512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wsxst/MovieRecommend/HEAD/log/frontend/public/img/icons/android-chrome-maskable-512x512.png
--------------------------------------------------------------------------------
/log/frontend/src/api/api.js:
--------------------------------------------------------------------------------
1 | import axios from 'axios';
2 |
3 | const service = axios.create({
4 | baseURL: 'http://121.36.133.80:16666', // url = base url + request url
5 | });
6 |
7 | export default service;
8 |
--------------------------------------------------------------------------------
/deploy/master/clean_all_containers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ssh root@s1 docker rm -f slave1 nn
3 | ssh root@s2 docker rm -f slave2
4 | ssh -p 22 root@db docker rm -f root_mysql_1
5 | ssh -p 22 root@web docker rm -f nn back
6 |
7 |
--------------------------------------------------------------------------------
/deploy/master/configs/ssh_config:
--------------------------------------------------------------------------------
1 | Host localhost
2 | StrictHostKeyChecking no
3 |
4 | Host 0.0.0.0
5 | StrictHostKeyChecking no
6 |
7 | Host slave*
8 | StrictHostKeyChecking no
9 | Host master
10 | StrictHostKeyChecking no
11 |
--------------------------------------------------------------------------------
/deploy/slave1/configs/ssh_config:
--------------------------------------------------------------------------------
1 | Host localhost
2 | StrictHostKeyChecking no
3 |
4 | Host 0.0.0.0
5 | StrictHostKeyChecking no
6 |
7 | Host slave*
8 | StrictHostKeyChecking no
9 | Host master
10 | StrictHostKeyChecking no
11 |
--------------------------------------------------------------------------------
/deploy/slave2/configs/ssh_config:
--------------------------------------------------------------------------------
1 | Host localhost
2 | StrictHostKeyChecking no
3 |
4 | Host 0.0.0.0
5 | StrictHostKeyChecking no
6 |
7 | Host slave*
8 | StrictHostKeyChecking no
9 | Host master
10 | StrictHostKeyChecking no
11 |
--------------------------------------------------------------------------------
/log/frontend/src/store/index.js:
--------------------------------------------------------------------------------
1 | import Vue from 'vue'
2 | import Vuex from 'vuex'
3 |
4 | Vue.use(Vuex)
5 |
6 | export default new Vuex.Store({
7 | state: {
8 | },
9 | mutations: {
10 | },
11 | actions: {
12 | },
13 | modules: {
14 | }
15 | })
16 |
--------------------------------------------------------------------------------
/deploy/web/create_containers_web.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #IMG=centos:7
3 | IMG=my-flask:0527
4 | docker run -d --name nn -p 8080:80 --privileged my-nginx:0527
5 | docker run -itd \
6 | --restart=always \
7 | --privileged \
8 | --net="host" \
9 | --name back \
10 | --hostname back \
11 | $IMG
12 |
13 |
--------------------------------------------------------------------------------
/movie/frontend/src/controller/movieapi.js:
--------------------------------------------------------------------------------
1 | import axios from 'axios'
2 | let baseUrl = "http://121.36.137.213:18999";
3 |
4 | const service = axios.create({
5 | baseURL: baseUrl, // url = base url + request url
6 | timeout: 5000 // request timeout
7 | });
8 |
9 | export default service;
10 |
--------------------------------------------------------------------------------
/deploy/db/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | mysql:
4 | environment:
5 | MYSQL_ROOT_PASSWORD: 123456
6 | image: "docker.io/mysql:5.6"
7 | restart: always
8 | volumes:
9 | - /usr/local/docker/mysql:/var/lib/mysql
10 | ports:
11 | - "9777:3306"
12 |
--------------------------------------------------------------------------------
/movie/frontend/README.md:
--------------------------------------------------------------------------------
1 | # vue-loginx
2 |
3 | ## Project setup
4 |
5 | ### Start Backend Server
6 |
7 | $cd backendAPI
8 | $npm install
9 | $node app.js
10 |
11 |
12 | ### Start Vue frontend
13 |
14 | change directory to main project vue-loginx
15 | $npm install
16 | $npm run serve
17 |
18 |
19 |
--------------------------------------------------------------------------------
/log/frontend/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | node_modules
3 | /dist
4 |
5 | # local env files
6 | .env.local
7 | .env.*.local
8 |
9 | # Log files
10 | npm-debug.log*
11 | yarn-debug.log*
12 | yarn-error.log*
13 |
14 | # Editor directories and files
15 | .idea
16 | .vscode
17 | *.suo
18 | *.ntvs*
19 | *.njsproj
20 | *.sln
21 | *.sw?
22 |
--------------------------------------------------------------------------------
/movie/frontend/src/controller/authController.js:
--------------------------------------------------------------------------------
1 | import api from './api'
2 |
3 | export default {
4 | login(data){
5 | return api.callApi(`post`,`/login`, data)
6 | },
7 | logout(){
8 | return api.callApi(`get`,`/logout`)
9 | },
10 | register(data){
11 | return api.callApi(`post`, '/register', data)
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/log/frontend/src/router/index.js:
--------------------------------------------------------------------------------
1 | import Vue from 'vue'
2 | import VueRouter from 'vue-router'
3 | import Home from '../views/Home.vue'
4 |
5 | Vue.use(VueRouter)
6 |
7 | const routes = [
8 | {
9 | path: '/',
10 | name: 'Home',
11 | component: Home
12 | }
13 | ]
14 |
15 | const router = new VueRouter({
16 | routes
17 | })
18 |
19 | export default router
20 |
--------------------------------------------------------------------------------
/movie/frontend/src/App.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
17 |
--------------------------------------------------------------------------------
/log/frontend/src/api/controller.js:
--------------------------------------------------------------------------------
1 | import service from "./api";
2 |
3 | export function get_user_data(data) {
4 | return service({
5 | url:"/user?s="+data.start_time+"&e="+data.end_time,
6 | method:"get"
7 | })
8 | }
9 |
10 | export function get_movie_data(data) {
11 | return service({
12 | url:"/movie?s="+data.start_time+"&e="+data.end_time,
13 | method:"get"
14 | })
15 | }
16 |
--------------------------------------------------------------------------------
/movie/backend/user/test.js:
--------------------------------------------------------------------------------
1 | const http = require('http');
2 | const hostname = '0.0.0.0';
3 | const port = 3000;
4 | const server = http.createServer((req, res) => {
5 | res.statusCode = 200;
6 | res.setHeader('Content-Type', 'text/plain');
7 | res.end('Hello World\n');
8 | });
9 | server.listen(port, hostname, () => {
10 | console.log(`Server running at http://${hostname}:${port}/`);
11 | });
12 |
--------------------------------------------------------------------------------
/log/frontend/src/main.js:
--------------------------------------------------------------------------------
1 | import Vue from 'vue'
2 | import App from './App.vue'
3 | import './registerServiceWorker'
4 | import router from './router'
5 | import store from './store'
6 | import echarts from "echarts"
7 |
8 | Vue.config.productionTip = false;
9 |
10 | Vue.prototype.$echarts=echarts;
11 |
12 | new Vue({
13 | router,
14 | store,
15 | render: h => h(App)
16 | }).$mount('#app');
17 |
--------------------------------------------------------------------------------
/movie/backend/predict.py:
--------------------------------------------------------------------------------
1 | from pyspark import SparkContext
2 |
3 | sc = SparkContext("local")
4 |
5 | from pyspark.mllib.recommendation import MatrixFactorizationModel
6 | model = MatrixFactorizationModel.load(sc, '/data/model1')
7 |
8 | def movie_predict(userid,movieNum):
9 | tmp = model.recommendProducts(userid, movieNum)
10 | result = []
11 | for r in tmp:
12 | result.append(r.product)
13 | return result
14 |
15 |
16 |
--------------------------------------------------------------------------------
/movie/frontend/src/main.js:
--------------------------------------------------------------------------------
1 | import Vue from 'vue'
2 | import App from './App.vue'
3 | import router from './router'
4 | import ElementUI from 'element-ui';
5 | import 'element-ui/lib/theme-chalk/index.css';
6 |
7 | var VueCookie = require('vue-cookie');
8 | Vue.use(VueCookie);
9 | Vue.use(ElementUI);
10 |
11 | Vue.config.productionTip = false;
12 |
13 | new Vue({
14 | router,
15 | render: h => h(App)
16 | }).$mount('#app');
17 |
--------------------------------------------------------------------------------
/deploy/master/create_container_master.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #IMG=centos-bigdata:v$1
3 | IMG=centos-bigdata-master:0527
4 |
5 | docker rm -f master
6 |
7 | echo "Create and start container..."
8 | docker run -d \
9 | --restart=always \
10 | --privileged \
11 | --net="host" \
12 | --name $1 \
13 | --hostname $1 \
14 | --add-host master:192.168.0.222 \
15 | --add-host slave1:192.168.0.145 \
16 | --add-host slave2:192.168.0.122 \
17 | $IMG
18 |
19 |
--------------------------------------------------------------------------------
/deploy/master/configs/yarn-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | yarn.nodemanager.aux-services
5 | mapreduce_shuffle
6 |
7 |
8 | yarn.resourcemanager.hostname
9 | master
10 |
11 |
12 |
--------------------------------------------------------------------------------
/deploy/slave1/configs/yarn-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | yarn.nodemanager.aux-services
5 | mapreduce_shuffle
6 |
7 |
8 | yarn.resourcemanager.hostname
9 | master
10 |
11 |
12 |
--------------------------------------------------------------------------------
/deploy/slave2/configs/yarn-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | yarn.nodemanager.aux-services
5 | mapreduce_shuffle
6 |
7 |
8 | yarn.resourcemanager.hostname
9 | master
10 |
11 |
12 |
--------------------------------------------------------------------------------
/log/frontend/.eslintrc.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | root: true,
3 | env: {
4 | node: true
5 | },
6 | 'extends': [
7 | 'plugin:vue/essential',
8 | 'eslint:recommended'
9 | ],
10 | parserOptions: {
11 | parser: 'babel-eslint'
12 | },
13 | rules: {
14 | 'no-console': process.env.NODE_ENV === 'production' ? 'warn' : 'off',
15 | 'no-debugger': process.env.NODE_ENV === 'production' ? 'warn' : 'off'
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/log/frontend/README.md:
--------------------------------------------------------------------------------
1 | # frontend
2 |
3 | ## Project setup
4 | ```
5 | npm install
6 | ```
7 |
8 | ### Compiles and hot-reloads for development
9 | ```
10 | npm run serve
11 | ```
12 |
13 | ### Compiles and minifies for production
14 | ```
15 | npm run build
16 | ```
17 |
18 | ### Lints and fixes files
19 | ```
20 | npm run lint
21 | ```
22 |
23 | ### Customize configuration
24 | See [Configuration Reference](https://cli.vuejs.org/config/).
25 |
--------------------------------------------------------------------------------
/movie/backend/user/app.js:
--------------------------------------------------------------------------------
1 | const express = require('express')
2 | var bodyParser = require('body-parser')
3 | var cors = require('cors')
4 |
5 | const app = express()
6 | const port = 3000
7 |
8 | app.use(bodyParser.urlencoded({ extended: false }))
9 | app.use(bodyParser.json())
10 | app.use(cors())
11 | const routers = require('./router')
12 |
13 | app.use('/api', routers)
14 |
15 | app.listen(port, () => console.log(`Server listening on port ${port}!`))
16 |
--------------------------------------------------------------------------------
/movie/backend/user/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "lognixapi",
3 | "version": "0.0.1",
4 | "description": "BackEnd API ",
5 | "main": "app.js",
6 | "scripts": {
7 | "test": "echo \"Error: no test specified\" && exit 1"
8 | },
9 | "author": "Vb",
10 | "license": "ISC",
11 | "dependencies": {
12 | "body-parser": "^1.19.0",
13 | "cors": "^2.8.5",
14 | "express": "^4.17.1",
15 | "mysql": "^2.18.1"
16 | },
17 | "devDependencies": {}
18 | }
19 |
--------------------------------------------------------------------------------
/movie/frontend/src/controller/movieController.js:
--------------------------------------------------------------------------------
1 | import service from "./movieapi";
2 |
3 | export function getRecommendList(data) {
4 | return service({
5 | url:"/get_recommend?userid="+data.userid,
6 | method:"get"
7 | })
8 | }
9 |
10 | export function getMovieInfo(data) {
11 | return service({
12 | url:"/get_movie?movieid="+data.movieid+"&userid="+data.userid,
13 | method:"get"
14 | })
15 | }
16 |
17 | export function rateMovie(data) {
18 | return service({
19 | url:"/user_rate?userid="+data.userid+"&movieid="+data.movieid+"&star="+data.star,
20 | method:"get"
21 | })
22 | }
23 |
--------------------------------------------------------------------------------
/movie/frontend/src/controller/api.js:
--------------------------------------------------------------------------------
1 | import axios from 'axios'
2 | let baseUrl = `http://47.97.200.236:3000/api`
3 |
4 | const client = axios.create({
5 | baseURL: baseUrl,
6 | });
7 |
8 | export default {
9 | async callApi(method, uri, data){
10 | try {
11 | let req =await client({
12 | method,
13 | url : uri,
14 | data : data
15 | })
16 |
17 | return req.data
18 | } catch (err) {
19 | console.log('Error in getting Server uri')
20 | console.log(err)
21 | }
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/log/frontend/src/App.vue:
--------------------------------------------------------------------------------
1 |
2 |
8 |
9 |
10 |
32 |
--------------------------------------------------------------------------------
/log/frontend/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | <%= htmlWebpackPlugin.options.title %>
9 |
10 |
11 |
12 | We're sorry but <%= htmlWebpackPlugin.options.title %> doesn't work properly without JavaScript enabled. Please enable it to continue.
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/movie/frontend/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | vue-loginx
12 |
13 |
14 |
15 | We're sorry but vue-loginx doesn't work properly without JavaScript enabled. Please enable it to continue.
16 |
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/movie/frontend/src/router.js:
--------------------------------------------------------------------------------
1 | import Vue from 'vue'
2 | import Router from 'vue-router'
3 | import Index from './components/Index.vue'
4 | Vue.use(Router)
5 |
6 | export default new Router({
7 | routes: [
8 | {
9 | path: '/',
10 | name: 'index',
11 | component: () => import('./views/Login')
12 | },
13 | {
14 | path: '/login',
15 | name: 'login',
16 | component: () => import('./views/Login')
17 | },
18 | {
19 | path: '/register',
20 | name: 'register',
21 | component: () => import('./views/Register')
22 | },
23 | {
24 | path: '/profile',
25 | name: 'profile',
26 | component: () => import('./views/Profile')
27 | },
28 | {mode: 'hash',
29 | path: "/movie_info",
30 | name: "movie_info",
31 | component: () => import('./views/MovieInfo')
32 | },
33 | ],
34 | mode: 'hash',
35 | base: '/',
36 | })
37 |
--------------------------------------------------------------------------------
/log/frontend/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "frontend",
3 | "version": "0.1.0",
4 | "private": true,
5 | "scripts": {
6 | "serve": "vue-cli-service serve",
7 | "build": "vue-cli-service build",
8 | "lint": "vue-cli-service lint"
9 | },
10 | "dependencies": {
11 | "core-js": "^3.6.4",
12 | "echarts": "^4.8.0",
13 | "register-service-worker": "^1.7.1",
14 | "vue": "^2.6.11",
15 | "vue-router": "^3.1.6",
16 | "vuex": "^3.1.3"
17 | },
18 | "devDependencies": {
19 | "@vue/cli-plugin-babel": "^4.3.0",
20 | "@vue/cli-plugin-eslint": "^4.3.0",
21 | "@vue/cli-plugin-pwa": "^4.3.0",
22 | "@vue/cli-plugin-router": "^4.3.0",
23 | "@vue/cli-plugin-vuex": "^4.3.0",
24 | "@vue/cli-service": "^4.3.0",
25 | "axios": "^0.19.2",
26 | "babel-eslint": "^10.1.0",
27 | "eslint": "^6.7.2",
28 | "eslint-plugin-vue": "^6.2.2",
29 | "vue-template-compiler": "^2.6.11"
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/movie/backend/train.py:
--------------------------------------------------------------------------------
1 | from pyspark import SparkContext, SparkConf
2 | from pyspark.sql import SparkSession
3 |
4 | # delete the old file and put the new file
5 | import os
6 | os.system("hadoop fs -rm -r -skipTrash /data/ratings_small.csv")
7 | os.system("hdfs dfs -put ratings_small.csv /data")
8 |
9 | print("start")
10 |
11 | conf = SparkConf()
12 | conf.setMaster("spark://master:7077")
13 | conf.setAppName("recommend_train")
14 | conf.setExecutorEnv(key="executor-memory",value="3g")
15 | conf.setExecutorEnv(key="driver-memory",value="9g")
16 |
17 | sc = SparkContext(conf=conf)
18 | #sc = SparkContext("local")
19 |
20 | text = sc.textFile("/data/ratings_small.csv")
21 | text=text .filter(lambda x: "movieId" not in x)
22 | movieRatings=text.map(lambda x: x.split(",")[:3])
23 |
24 | print("start counting")
25 | from pyspark.mllib.recommendation import ALS
26 | model = ALS.train(movieRatings, 10, 10, 0.01)
27 |
28 | model.save(sc,"/data/model1")
29 | print(model.recommendProducts(1, 5))
30 |
--------------------------------------------------------------------------------
/deploy/master/configs/mapred-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
16 |
17 |
18 |
19 |
20 |
21 | mapreduce.framework.name
22 | yarn
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/deploy/slave1/configs/mapred-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
16 |
17 |
18 |
19 |
20 |
21 | mapreduce.framework.name
22 | yarn
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/deploy/slave2/configs/mapred-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
16 |
17 |
18 |
19 |
20 |
21 | mapreduce.framework.name
22 | yarn
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/log/frontend/src/registerServiceWorker.js:
--------------------------------------------------------------------------------
1 | /* eslint-disable no-console */
2 |
3 | import { register } from 'register-service-worker'
4 |
5 | if (process.env.NODE_ENV === 'production') {
6 | register(`${process.env.BASE_URL}service-worker.js`, {
7 | ready () {
8 | console.log(
9 | 'App is being served from cache by a service worker.\n' +
10 | 'For more details, visit https://goo.gl/AFskqB'
11 | )
12 | },
13 | registered () {
14 | console.log('Service worker has been registered.')
15 | },
16 | cached () {
17 | console.log('Content has been cached for offline use.')
18 | },
19 | updatefound () {
20 | console.log('New content is downloading.')
21 | },
22 | updated () {
23 | console.log('New content is available; please refresh.')
24 | },
25 | offline () {
26 | console.log('No internet connection found. App is running in offline mode.')
27 | },
28 | error (error) {
29 | console.error('Error during service worker registration:', error)
30 | }
31 | })
32 | }
33 |
--------------------------------------------------------------------------------
/movie/frontend/src/components/Index.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
{{ msg }}
6 |
7 |
Login or Register with:
8 |
9 |
Local Login
10 |
Local Signup
11 |
12 |
13 |
14 |
15 |
35 |
36 |
39 |
--------------------------------------------------------------------------------
/deploy/slave1/create_container_slave.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | CODE_HOME=/opt/homework
4 |
5 | #IMG=centos-bigdata:v$1
6 | IMG=centos-bigdata-slave:0527
7 |
8 | SLAVE=slave$1
9 | echo "Create and start container..."
10 | docker run -d \
11 | --restart=always \
12 | --privileged \
13 | --net="host" \
14 | --name $SLAVE \
15 | --hostname $SLAVE \
16 | --add-host master:192.168.0.222 \
17 | --add-host slave1:192.168.0.145 \
18 | --add-host slave2:192.168.0.122 \
19 | $IMG
20 |
21 | #HADOOP_HOME=/usr/local/hadoop-2.7.3
22 | #SPARK_HOME=/usr/local/spark-2.4.5-bin-without-hadoop
23 | #docker cp ./configs/hadoop-env.sh $SLAVE:$HADOOP_HOME/etc/hadoop/hadoop-env.sh
24 | #docker cp ./configs/spark-env.sh $SLAVE:$SPARK_HOME/conf/spark-env.sh
25 | #docker cp ./configs/slaves $SLAVE:$HADOOP_HOME/etc/hadoop/slaves
26 | #docker cp ./configs/slaves $SLAVE:$SPARK_HOME/conf/slaves
27 | docker exec -i $SLAVE bash -c "echo \"export SPARK_LOCAL_IP=slave$1\" >> /usr/local/spark-2.4.5-bin-without-hadoop/conf/spark-env.sh"
28 |
29 | docker cp $CODE_HOME/copypub.sh $SLAVE:/root/
30 |
31 | echo "Finished!"
32 | docker ps
33 |
34 |
--------------------------------------------------------------------------------
/deploy/slave2/create_container_slave.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | CODE_HOME=/opt/homework
4 |
5 | #IMG=centos-bigdata:v$1
6 | IMG=centos-bigdata-slave:0527
7 |
8 | SLAVE=slave$1
9 | echo "Create and start container..."
10 | docker run -d \
11 | --restart=always \
12 | --privileged \
13 | --net="host" \
14 | --name $SLAVE \
15 | --hostname $SLAVE \
16 | --add-host master:192.168.0.222 \
17 | --add-host slave1:192.168.0.145 \
18 | --add-host slave2:192.168.0.122 \
19 | $IMG
20 |
21 | #HADOOP_HOME=/usr/local/hadoop-2.7.3
22 | #SPARK_HOME=/usr/local/spark-2.4.5-bin-without-hadoop
23 | #docker cp ./configs/hadoop-env.sh $SLAVE:$HADOOP_HOME/etc/hadoop/hadoop-env.sh
24 | #docker cp ./configs/spark-env.sh $SLAVE:$SPARK_HOME/conf/spark-env.sh
25 | #docker cp ./configs/slaves $SLAVE:$HADOOP_HOME/etc/hadoop/slaves
26 | #docker cp ./configs/slaves $SLAVE:$SPARK_HOME/conf/slaves
27 | docker exec -i $SLAVE bash -c "echo \"export SPARK_LOCAL_IP=slave$1\" >> /usr/local/spark-2.4.5-bin-without-hadoop/conf/spark-env.sh"
28 |
29 | docker cp $CODE_HOME/copypub.sh $SLAVE:/root/
30 |
31 | echo "Finished!"
32 | docker ps
33 |
34 |
--------------------------------------------------------------------------------
/deploy/master/configs/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
16 |
17 |
18 |
19 |
20 | fs.defaultFS
21 | hdfs://master:9000/
22 |
23 |
24 | hadoop.tmp.dir
25 | file:/data/hadoop/tmp
26 |
27 |
28 |
--------------------------------------------------------------------------------
/deploy/slave1/configs/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
16 |
17 |
18 |
19 |
20 | fs.defaultFS
21 | hdfs://master:9000/
22 |
23 |
24 | hadoop.tmp.dir
25 | file:/data/hadoop/tmp
26 |
27 |
28 |
--------------------------------------------------------------------------------
/deploy/slave2/configs/core-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
16 |
17 |
18 |
19 |
20 | fs.defaultFS
21 | hdfs://master:9000/
22 |
23 |
24 | hadoop.tmp.dir
25 | file:/data/hadoop/tmp
26 |
27 |
28 |
--------------------------------------------------------------------------------
/deploy/master/start-all.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #docker exec -i master bash -c "ssh slave1 rm ~/id_rsa.pub"
4 | #docker exec -i node1 /usr/local/hadoop-2.8.5/start-hadoop.sh
5 | docker exec -i master /usr/local/hadoop-2.7.3/sbin/start-dfs.sh
6 | docker exec -i master /usr/local/hadoop-2.7.3/sbin/start-yarn.sh
7 |
8 | docker exec -i master hdfs dfs -mkdir -p /hive
9 | docker exec -i master hdfs dfs -mkdir -p /hive/warehouse
10 | docker exec -i master hdfs dfs -chmod 777 /hive
11 | docker exec -i master hdfs dfs -chmod 777 /hive/warehouse
12 | docker exec -i master hdfs dfs -mkdir -p /tmp/hive/
13 | docker exec -i master hdfs dfs -chmod 777 /tmp/hive/
14 |
15 | docker exec -i master /usr/local/spark-2.4.5-bin-without-hadoop/sbin/start-all.sh
16 |
17 | #docker exec -i master schematool -dbType mysql -initSchema --verbose
18 |
19 | docker exec -i master /usr/local/hbase/bin/start-hbase.sh
20 |
21 | docker exec -d master hiveserver2
22 |
23 |
24 | while :
25 | do
26 | p=$(netstat -nlp|grep 10002|grep -v grep)
27 | if [ -n "$p" ]
28 | then
29 | break
30 | fi
31 | done
32 |
33 | echo "ok"
34 | ssh -p 22 root@web docker exec -d back python3 /code/backend.py > /backend.log
35 |
36 | docker exec -d master python3 /data/back.py > /backend.log
37 |
--------------------------------------------------------------------------------
/movie/frontend/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "vue-loginx",
3 | "version": "0.1.0",
4 | "private": true,
5 | "scripts": {
6 | "serve": "vue-cli-service serve",
7 | "build": "vue-cli-service build",
8 | "lint": "vue-cli-service lint"
9 | },
10 | "dependencies": {
11 | "axios": "^0.19.0",
12 | "core-js": "^2.6.5",
13 | "echarts": "^4.8.0",
14 | "element-ui": "^2.13.2",
15 | "vue": "^2.6.10",
16 | "vue-cookie": "^1.1.4",
17 | "vue-router": "^3.0.3"
18 | },
19 | "devDependencies": {
20 | "@vue/cli-plugin-babel": "^3.11.0",
21 | "@vue/cli-plugin-eslint": "^3.11.0",
22 | "@vue/cli-service": "^3.11.0",
23 | "babel-eslint": "^10.0.1",
24 | "eslint": "^5.16.0",
25 | "eslint-plugin-vue": "^5.0.0",
26 | "vue-template-compiler": "^2.6.10"
27 | },
28 | "eslintConfig": {
29 | "root": true,
30 | "env": {
31 | "node": true
32 | },
33 | "extends": [
34 | "plugin:vue/essential",
35 | "eslint:recommended"
36 | ],
37 | "rules": {},
38 | "parserOptions": {
39 | "parser": "babel-eslint"
40 | }
41 | },
42 | "postcss": {
43 | "plugins": {
44 | "autoprefixer": {}
45 | }
46 | },
47 | "browserslist": [
48 | "> 1%",
49 | "last 2 versions"
50 | ]
51 | }
52 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # MovieRecommend
2 |
3 | ## 电影推荐系统
4 |
5 | ```
6 | -- movie
7 | |-- frontend
8 | |-- backend
9 | ```
10 |
11 | ## 日志分析系统
12 |
13 | ```
14 | -- log
15 | |-- frontend
16 | |-- backend
17 | ```
18 |
19 | ## 部署步骤
20 |
21 | 1.将deploy文件夹中的脚本、docker镜像文件拷贝到对应的主机上
22 | 2.在各个节点上从文件加载docker镜像,分别在各主机上执行相应命令
23 | ```bash
24 | #master
25 | docker load < master.tar
26 | #slave1
27 | docker load < slave1.tar
28 | #slave2
29 | docker load < slave2.tar
30 | #web
31 | docker load < web.tar
32 | #db
33 | docker load < db.tar
34 | ```
35 | 3.为master节点配置IP与主机名的映射
36 | ```bash
37 | echo "192.168.0.222 master" >> /etc/hosts
38 | echo "192.168.0.145 slave1" >> /etc/hosts
39 | echo "192.168.0.122 slave2" >> /etc/hosts
40 | echo "192.168.0.157 web" >> /etc/hosts
41 | echo "192.168.0.187 db" >> /etc/hosts
42 | ```
43 | 4.配置master免密登录其他主机,中间会要求输入其他主机的密码
44 | ```bash
45 | ssh-copy-id master
46 | ssh-copy-id slave1
47 | ssh-copy-id slave2
48 | ssh-copy-id web
49 | ssh-copy-id db
50 | ```
51 | 5.在master节点上执行以下脚本,创建并启动所有主机上的容器,并启动集群中的大数据组件以及各web服务
52 | ```bash
53 | cd /opt/homework
54 | bash create_containers.sh && bash start-all.sh
55 | ```
56 |
57 | ## 集群参数配置
58 |
59 | 集群中大数据组件的配置文件可分别参见以下路径:
60 | ```bash
61 | #hadoop: master节点master容器
62 | $HADOOP_HOME/etc/hadoop
63 | #spark: master节点master容器、slave1节点slave1容器、slave2节点slave2容器
64 | $SPARK_HOME/conf
65 | #hbase: master节点master容器
66 | $HABSE_HOME/conf
67 | #flume: master节点master容器
68 | $FLUME_HOME/conf
69 | #hive: master节点master容器
70 | $HIVE_HOME/conf
71 | ```
--------------------------------------------------------------------------------
/deploy/master/configs/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
16 |
17 |
18 |
19 |
20 |
21 | dfs.replication
22 | 2
23 |
24 |
25 | dfs.permissions.enabled
26 | false
27 |
28 |
29 | dfs.namenode.name.dir
30 | file:/data/hadoop/dfs/name
31 |
32 |
33 | dfs.datanode.data.dir
34 | file:/data/hadoop/dfs/data
35 |
36 |
37 | dfs.webhdfs.enabled
38 | true
39 |
40 |
41 |
--------------------------------------------------------------------------------
/deploy/slave1/configs/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
16 |
17 |
18 |
19 |
20 |
21 | dfs.replication
22 | 2
23 |
24 |
25 | dfs.permissions.enabled
26 | false
27 |
28 |
29 | dfs.namenode.name.dir
30 | file:/data/hadoop/dfs/name
31 |
32 |
33 | dfs.datanode.data.dir
34 | file:/data/hadoop/dfs/data
35 |
36 |
37 | dfs.webhdfs.enabled
38 | true
39 |
40 |
41 |
--------------------------------------------------------------------------------
/deploy/slave2/configs/hdfs-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
16 |
17 |
18 |
19 |
20 |
21 | dfs.replication
22 | 2
23 |
24 |
25 | dfs.permissions.enabled
26 | false
27 |
28 |
29 | dfs.namenode.name.dir
30 | file:/data/hadoop/dfs/name
31 |
32 |
33 | dfs.datanode.data.dir
34 | file:/data/hadoop/dfs/data
35 |
36 |
37 | dfs.webhdfs.enabled
38 | true
39 |
40 |
41 |
--------------------------------------------------------------------------------
/deploy/master/create_containers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | CODE_HOME=/opt/homework
4 | MASTER=master
5 |
6 | bash $CODE_HOME/create_container_master.sh $MASTER
7 |
8 | #HADOOP_HOME=/usr/local/hadoop-2.7.3
9 | #SPARK_HOME=/usr/local/spark-2.4.5-bin-without-hadoop
10 | #HIVE_HOME=/usr/local/hive
11 | #docker cp ./configs/hadoop-env.sh $MASTER:$HADOOP_HOME/etc/hadoop/hadoop-env.sh
12 | #docker cp ./configs/spark-env.sh $MASTER:$SPARK_HOME/conf/spark-env.sh
13 | #docker cp ./configs/slaves $MASTER:$HADOOP_HOME/etc/hadoop/slaves
14 | #docker cp ./configs/slaves $MASTER:$SPARK_HOME/conf/slaves
15 | #docker cp ./configs/hive-site.xml $MASTER:$HIVE_HOME/conf/hive-site.xml
16 | #sshpass -p "sspku1234*" ssh-copy-id -p 22 db
17 | #sshpass -p "sspku1234*" ssh-copy-id s2
18 | #sshpass -p "sspku1234*" ssh-copy-id s1
19 | #sshpass -p "dashuju123" ssh-copy-id -p 22
20 |
21 | docker exec -it $MASTER bash -c "echo \"export SPARK_LOCAL_IP=master\" >> /usr/local/spark-2.4.5-bin-without-hadoop/conf/spark-env.sh"
22 |
23 | bash $CODE_HOME/clean_all_containers.sh
24 |
25 | docker exec -i $MASTER bash -c "yum install -y sshpass"
26 | ip_arr=("192.168.0.145" "192.168.0.122")
27 | for i in 1 2
28 | do
29 | ssh root@s$i bash $CODE_HOME/create_container_slave.sh $i && \
30 | docker exec -i $MASTER sshpass -p "ss123456" ssh-copy-id s$i && \
31 | sshpass -p "ss123456" ssh-copy-id -p 22 s$i
32 | #docker exec -i $MASTER sshpass -p "ss123456" scp ~/.ssh/id_rsa.pub root@slave$j:/root/1.pub && \
33 | #scp ~/.ssh/id_rsa.pub root@$h:/root/2.pub && \
34 | #ssh root@$h bash $CODE_HOME/docker_copypub.sh $j
35 | done
36 |
37 | ssh -p 22 root@db bash /root/start-mysql.sh
38 | ssh -p 22 root@web bash $CODE_HOME/create_containers_web.sh
39 | ssh root@s1 bash $CODE_HOME/start-nginx.sh
40 |
41 | echo "Finished!"
42 | docker ps
43 |
44 |
--------------------------------------------------------------------------------
/log/backend/backend.py:
--------------------------------------------------------------------------------
1 | from pyhive import hive
2 | import flask
3 | from flask_cors import CORS
4 | from flask import request, Flask, jsonify
5 |
6 | app = Flask(__name__)
7 | CORS(app, resources='/*')
8 |
9 | @app.route('/user', methods=['GET'])
10 | def get_users():
11 | start_time=request.args.get("s")
12 | end_time=request.args.get("e")
13 | sql="select user_name,count(*) as count from logs where log_id>='"+start_time+"' and log_id<='"+end_time+"' group by user_name order by count desc limit 10"
14 | cursor=conn.cursor()
15 | cursor.execute(sql)
16 | res_json={}
17 | res_json["user_id_list"]=[]
18 | res_json["user_access_num_list"]=[]
19 | print("start!!!")
20 | for res_row in cursor.fetchall():
21 | res_json["user_id_list"].append(res_row[0])
22 | res_json["user_access_num_list"].append(res_row[1])
23 | cursor.close()
24 | print("end!!!")
25 | return jsonify(res_json)
26 |
27 | @app.route('/movie', methods=['GET'])
28 | def get_movies():
29 | start_time=request.args.get("s")
30 | end_time=request.args.get("e")
31 | sql="select movie_name,count(*) as count from logs where log_id>='"+start_time+"' and log_id<='"+end_time+"' group by movie_name order by count desc limit 10"
32 | cursor=conn.cursor()
33 | cursor.execute(sql)
34 | res_json={}
35 | res_json["movie_id_list"]=[]
36 | res_json["movie_access_num_list"]=[]
37 | for res_row in cursor.fetchall():
38 | res_json["movie_id_list"].append(res_row[0])
39 | res_json["movie_access_num_list"].append(res_row[1])
40 | cursor.close()
41 | return jsonify(res_json)
42 |
43 | if __name__ == "__main__":
44 | conn = hive.Connection(host='192.168.0.222', port=10000, username='hive', auth='NOSASL',database='recommend')
45 | app.run("0.0.0.0", port=16666, debug=True)
46 |
47 |
--------------------------------------------------------------------------------
/deploy/master/configs/log4j.properties:
--------------------------------------------------------------------------------
1 | #
2 | # Licensed to the Apache Software Foundation (ASF) under one or more
3 | # contributor license agreements. See the NOTICE file distributed with
4 | # this work for additional information regarding copyright ownership.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 |
18 | # Set everything to be logged to the console
19 | #[Modified] INFO -> ERROR
20 | log4j.rootCategory=ERROR, console
21 | log4j.appender.console=org.apache.log4j.ConsoleAppender
22 | log4j.appender.console.target=System.err
23 | log4j.appender.console.layout=org.apache.log4j.PatternLayout
24 | log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
25 |
26 | # Set the default spark-shell log level to WARN. When running the spark-shell, the
27 | # log level for this class is used to overwrite the root logger's log level, so that
28 | # the user can have different defaults for the shell and regular Spark apps.
29 | log4j.logger.org.apache.spark.repl.Main=WARN
30 |
31 | # Settings to quiet third party logs that are too verbose
32 | log4j.logger.org.spark_project.jetty=WARN
33 | log4j.logger.org.spark_project.jetty.util.component.AbstractLifeCycle=ERROR
34 | log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
35 | log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
36 | log4j.logger.org.apache.parquet=ERROR
37 | log4j.logger.parquet=ERROR
38 |
39 | # SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
40 | log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
41 | log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
42 |
--------------------------------------------------------------------------------
/deploy/slave1/configs/log4j.properties:
--------------------------------------------------------------------------------
1 | #
2 | # Licensed to the Apache Software Foundation (ASF) under one or more
3 | # contributor license agreements. See the NOTICE file distributed with
4 | # this work for additional information regarding copyright ownership.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 |
18 | # Set everything to be logged to the console
19 | #[Modified] INFO -> ERROR
20 | log4j.rootCategory=ERROR, console
21 | log4j.appender.console=org.apache.log4j.ConsoleAppender
22 | log4j.appender.console.target=System.err
23 | log4j.appender.console.layout=org.apache.log4j.PatternLayout
24 | log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
25 |
26 | # Set the default spark-shell log level to WARN. When running the spark-shell, the
27 | # log level for this class is used to overwrite the root logger's log level, so that
28 | # the user can have different defaults for the shell and regular Spark apps.
29 | log4j.logger.org.apache.spark.repl.Main=WARN
30 |
31 | # Settings to quiet third party logs that are too verbose
32 | log4j.logger.org.spark_project.jetty=WARN
33 | log4j.logger.org.spark_project.jetty.util.component.AbstractLifeCycle=ERROR
34 | log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
35 | log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
36 | log4j.logger.org.apache.parquet=ERROR
37 | log4j.logger.parquet=ERROR
38 |
39 | # SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
40 | log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
41 | log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
42 |
--------------------------------------------------------------------------------
/deploy/slave2/configs/log4j.properties:
--------------------------------------------------------------------------------
1 | #
2 | # Licensed to the Apache Software Foundation (ASF) under one or more
3 | # contributor license agreements. See the NOTICE file distributed with
4 | # this work for additional information regarding copyright ownership.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 |
18 | # Set everything to be logged to the console
19 | #[Modified] INFO -> ERROR
20 | log4j.rootCategory=ERROR, console
21 | log4j.appender.console=org.apache.log4j.ConsoleAppender
22 | log4j.appender.console.target=System.err
23 | log4j.appender.console.layout=org.apache.log4j.PatternLayout
24 | log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
25 |
26 | # Set the default spark-shell log level to WARN. When running the spark-shell, the
27 | # log level for this class is used to overwrite the root logger's log level, so that
28 | # the user can have different defaults for the shell and regular Spark apps.
29 | log4j.logger.org.apache.spark.repl.Main=WARN
30 |
31 | # Settings to quiet third party logs that are too verbose
32 | log4j.logger.org.spark_project.jetty=WARN
33 | log4j.logger.org.spark_project.jetty.util.component.AbstractLifeCycle=ERROR
34 | log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
35 | log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
36 | log4j.logger.org.apache.parquet=ERROR
37 | log4j.logger.parquet=ERROR
38 |
39 | # SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
40 | log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
41 | log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
42 |
--------------------------------------------------------------------------------
/movie/frontend/src/views/Register.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
{{alertMessage}}
5 |
6 |
7 |
Signup
8 |
9 |
10 |
11 | UserName
12 |
13 |
14 |
15 | Password
16 |
17 |
18 |
19 |
register
20 |
21 |
22 |
23 |
Already have an account? Go Login
24 |
25 |
26 |
27 |
28 |
29 |
30 |
69 |
70 |
73 |
--------------------------------------------------------------------------------
/movie/backend/user/router.js:
--------------------------------------------------------------------------------
1 | var express = require('express')
2 | var router = express.Router()
3 |
4 |
5 | var mysql = require('mysql');
6 | var connection = mysql.createConnection({
7 | host : '121.36.152.15',
8 | user : 'root',
9 | password : '123456',
10 | database : 'recommend',
11 | port : '9777',
12 | });
13 |
14 | connection.connect();
15 | var crypto = require('crypto');
16 | function cryptPwd(password) {
17 | var md5 = crypto.createHash('md5');
18 | return md5.update(password).digest('hex');
19 | }
20 | router.post('/login', function (req, res) {
21 | let username = req.body.username,
22 | password = cryptPwd(req.body.password)
23 |
24 | console.log(`username ${username} password ${password}`)
25 | connection.query(`SELECT * FROM user where username='${username}'`, function (error, results, fields) {
26 | if (error){
27 | res.send(false)
28 | console.log(error)
29 | return
30 | }
31 | if(results.length == 0){
32 | res.send(false)
33 | console.log(`username ${username} not exist`)
34 | return
35 | }else if(results[0]['password'] == password){
36 | let userObject = {
37 | username: username,
38 | userid: results[0]['userid'],
39 | }
40 | console.log("good")
41 | res.send(userObject)
42 | return
43 | }else if(results[0]['password'] != password){
44 | res.send(false)
45 | console.log("password should be" + results[0]['password'])
46 | return
47 | }else{
48 | res.send(false)
49 | console.log('unknown')
50 | return
51 | }
52 |
53 | });
54 | //if(email == userObject.email && password == userObject.password){
55 | //res.send(true)
56 | //} else {
57 | //res.send(false)
58 | //}
59 | //res.send(true)
60 |
61 | })
62 |
63 | router.post('/register', function (req, res) {
64 | let username = req.body.username,
65 | password = cryptPwd(req.body.password)
66 | connection.query(`INSERT INTO user(username,password) VALUES('${username}','${password}')`, function (error, results, fields) {
67 | if (error){
68 | res.send(false)
69 | console.log(error)
70 | return
71 | }
72 | res.send(true)
73 | return
74 | })
75 | })
76 |
77 | module.exports = router
78 |
--------------------------------------------------------------------------------
/deploy/master/configs/hive-env.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # Set Hive and Hadoop environment variables here. These variables can be used
18 | # to control the execution of Hive. It should be used by admins to configure
19 | # the Hive installation (so that users do not have to set environment variables
20 | # or set command line parameters to get correct behavior).
21 | #
22 | # The hive service being invoked (CLI etc.) is available via the environment
23 | # variable SERVICE
24 |
25 |
26 | # Hive Client memory usage can be an issue if a large number of clients
27 | # are running at the same time. The flags below have been useful in
28 | # reducing memory usage:
29 | #
30 | # if [ "$SERVICE" = "cli" ]; then
31 | # if [ -z "$DEBUG" ]; then
32 | # export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
33 | # else
34 | # export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
35 | # fi
36 | # fi
37 |
38 | # The heap size of the jvm stared by hive shell script can be controlled via:
39 | #
40 | # export HADOOP_HEAPSIZE=1024
41 | #
42 | # Larger heap size may be required when running queries over large number of files or partitions.
43 | # By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be
44 | # appropriate for hive server.
45 |
46 |
47 | # Set HADOOP_HOME to point to a specific hadoop install directory
48 | export HADOOP_HOME=/usr/local/hadoop-2.7.3
49 |
50 | # Hive Configuration Directory can be controlled by:
51 | export HIVE_CONF_DIR=/usr/local/hive/conf
52 |
53 | # Folder containing extra libraries required for hive compilation/execution can be controlled by:
54 | export HIVE_AUX_JARS_PATH=/usr/local/hive/lib
55 |
--------------------------------------------------------------------------------
/movie/frontend/src/views/Login.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
Welcome
8 |
9 |
10 |
11 |
12 | Username
13 |
14 |
15 |
16 | Password
17 |
18 |
19 |
20 |
Login
21 |
Go Signup
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
83 |
84 |
87 |
--------------------------------------------------------------------------------
/movie/frontend/src/views/MovieInfo.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | 电影名称:{{movieInfo.moviename}}
8 |
9 |
10 | 电影上映时间🗓:{{movieInfo.moviename}}
11 |
12 |
13 | 导演🎬:{{movieInfo.director}}
14 |
15 |
16 | 领衔主演🕴:{{movieInfo.leadactors}}
17 |
18 |
19 | 平均得分🌟:{{movieInfo.averating}}
20 |
21 |
22 | 评分人数:{{movieInfo.numrating}}
23 |
24 |
25 | 简介:{{movieInfo.description}}
26 |
27 |
28 | 类型:{{movieInfo.typelist}}
29 |
30 |
31 |
32 | 评分
33 |
34 |
35 | 您对这部电影的评分为:{{movieInfo.user_rating}}
36 |
37 |
38 |
39 |
40 |
92 |
93 |
102 |
--------------------------------------------------------------------------------
/deploy/slave1/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM centos:7
2 |
3 | LABEL Discription="spark+hadoop on centos7" version="1.0"
4 |
5 | #安装必备的软件包
6 | RUN yum -y install net-tools
7 | RUN yum -y install which
8 | RUN yum -y install openssh-server openssh-clients
9 | RUN yum -y install python3
10 | RUN yum -y install vim
11 | RUN yum install -y gcc-c++
12 | RUN yum install -y pcre pcre-devel
13 | RUN yum install -y zlib zlib-devel
14 | RUN yum install -y openssl openssl-devel
15 | RUN yum clean all
16 |
17 | #配置SSH免密登录
18 | RUN ssh-keygen -q -t rsa -b 2048 -f /etc/ssh/ssh_host_rsa_key -N ''
19 | RUN ssh-keygen -q -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key -N ''
20 | RUN ssh-keygen -q -t dsa -f /etc/ssh/ssh_host_ed25519_key -N ''
21 | RUN ssh-keygen -f /root/.ssh/id_rsa -N ''
22 | RUN touch /root/.ssh/authorized_keys
23 | RUN cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
24 | RUN echo "root:ss123456" | chpasswd
25 | COPY ./configs/ssh_config /etc/ssh/ssh_config
26 |
27 | #添加JDK 增加JAVA_HOME环境变量
28 | ADD ./tools/jdk-8u212-linux-x64.tar.gz /usr/local/
29 | ENV JAVA_HOME /usr/local/jdk1.8.0_212/
30 | ENV CLASSPATH $JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
31 |
32 | #添加Hadoop并设置环境变量
33 | #ADD ./tools/hadoop-2.8.5.tar.gz /usr/local
34 | ADD ./tools/hadoop-2.7.3.tar.gz /usr/local
35 | #ENV HADOOP_HOME /usr/local/hadoop-2.8.5
36 | ENV HADOOP_HOME /usr/local/hadoop-2.7.3
37 |
38 | #添加Spark并设置环境变量
39 | ADD ./tools/scala-2.12.7.tgz /usr/share
40 | ADD ./tools/spark-2.4.5-bin-without-hadoop.tgz /usr/local
41 | ENV SPARK_HOME /usr/local/spark-2.4.5-bin-without-hadoop
42 |
43 | #将环境变量添加到系统变量中
44 | #ENV PATH $HADOOP_HOME/bin:$JAVA_HOME/bin:$SPARK_HOME/bin:$SPARK_HOME/sbin:$NGINX_HOME/sbin:$PATH
45 | ENV PATH $HADOOP_HOME/bin:$JAVA_HOME/bin:$PATH
46 |
47 | #拷贝Hadoop和Spark相关的配置文件到镜像中
48 | COPY ./configs/hadoop-env.sh $HADOOP_HOME/etc/hadoop/hadoop-env.sh
49 | COPY ./configs/log4j.properties $SPARK_HOME/conf/log4j.properties
50 | COPY ./configs/spark-env.sh $SPARK_HOME/conf/spark-env.sh
51 | COPY ./configs/hdfs-site.xml $HADOOP_HOME/etc/hadoop/hdfs-site.xml
52 | COPY ./configs/core-site.xml $HADOOP_HOME/etc/hadoop/core-site.xml
53 | COPY ./configs/yarn-site.xml $HADOOP_HOME/etc/hadoop/yarn-site.xml
54 | COPY ./configs/mapred-site.xml $HADOOP_HOME/etc/hadoop/mapred-site.xml
55 | COPY ./configs/master $HADOOP_HOME/etc/hadoop/master
56 | COPY ./configs/slaves $HADOOP_HOME/etc/hadoop/slaves
57 | COPY ./configs/slaves $SPARK_HOME/conf/slaves
58 |
59 | #创建数据目录
60 | RUN mkdir -p /data/hadoop/dfs/data && \
61 | mkdir -p /data/hadoop/dfs/name && \
62 | mkdir -p /data/hadoop/tmp
63 | RUN mkdir -p /mnt/spark/tmp
64 |
65 |
66 | #配置python相关
67 | #RUN cp -r $SPARK_HOME/python/pyspark /usr/lib64/python3.6/site-packages
68 | #RUN rm -f /usr/bin/python
69 | #RUN ln -s /usr/bin/python3 /usr/bin/python
70 | ENV PYTHONPATH $SPARK_HOME/python:$SPARK_HOME/python/lib/py4j-0.10.4-src.zip:$PYTHONPATH
71 | ENV PYSPARK_PYTHON /usr/bin/python3
72 | ENV SPARK_PYTHONPATH /usr/bin/python3
73 | RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade pip
74 | #RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple py4j==0.10.7
75 | RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple py4j
76 | RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple hdfs
77 | RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple tqdm
78 | RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple flask
79 | RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple flask_cors
80 |
81 |
82 | #开启SSH 22 端口
83 | EXPOSE 22
84 |
85 | #启动容器时执行的脚本文件
86 | CMD ["/usr/sbin/sshd","-D"]
87 |
88 |
--------------------------------------------------------------------------------
/deploy/slave2/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM centos:7
2 |
3 | LABEL Discription="spark+hadoop on centos7" version="1.0"
4 |
5 | #安装必备的软件包
6 | RUN yum -y install net-tools
7 | RUN yum -y install which
8 | RUN yum -y install openssh-server openssh-clients
9 | RUN yum -y install python3
10 | RUN yum -y install vim
11 | RUN yum install -y gcc-c++
12 | RUN yum install -y pcre pcre-devel
13 | RUN yum install -y zlib zlib-devel
14 | RUN yum install -y openssl openssl-devel
15 | RUN yum clean all
16 |
17 | #配置SSH免密登录
18 | RUN ssh-keygen -q -t rsa -b 2048 -f /etc/ssh/ssh_host_rsa_key -N ''
19 | RUN ssh-keygen -q -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key -N ''
20 | RUN ssh-keygen -q -t dsa -f /etc/ssh/ssh_host_ed25519_key -N ''
21 | RUN ssh-keygen -f /root/.ssh/id_rsa -N ''
22 | RUN touch /root/.ssh/authorized_keys
23 | RUN cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
24 | RUN echo "root:ss123456" | chpasswd
25 | COPY ./configs/ssh_config /etc/ssh/ssh_config
26 |
27 | #添加JDK 增加JAVA_HOME环境变量
28 | ADD ./tools/jdk-8u212-linux-x64.tar.gz /usr/local/
29 | ENV JAVA_HOME /usr/local/jdk1.8.0_212/
30 | ENV CLASSPATH $JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
31 |
32 | #添加Hadoop并设置环境变量
33 | #ADD ./tools/hadoop-2.8.5.tar.gz /usr/local
34 | ADD ./tools/hadoop-2.7.3.tar.gz /usr/local
35 | #ENV HADOOP_HOME /usr/local/hadoop-2.8.5
36 | ENV HADOOP_HOME /usr/local/hadoop-2.7.3
37 |
38 | #添加Spark并设置环境变量
39 | ADD ./tools/scala-2.12.7.tgz /usr/share
40 | ADD ./tools/spark-2.4.5-bin-without-hadoop.tgz /usr/local
41 | ENV SPARK_HOME /usr/local/spark-2.4.5-bin-without-hadoop
42 |
43 | #将环境变量添加到系统变量中
44 | #ENV PATH $HADOOP_HOME/bin:$JAVA_HOME/bin:$SPARK_HOME/bin:$SPARK_HOME/sbin:$NGINX_HOME/sbin:$PATH
45 | ENV PATH $HADOOP_HOME/bin:$JAVA_HOME/bin:$PATH
46 |
47 | #拷贝Hadoop和Spark相关的配置文件到镜像中
48 | COPY ./configs/hadoop-env.sh $HADOOP_HOME/etc/hadoop/hadoop-env.sh
49 | COPY ./configs/log4j.properties $SPARK_HOME/conf/log4j.properties
50 | COPY ./configs/spark-env.sh $SPARK_HOME/conf/spark-env.sh
51 | COPY ./configs/hdfs-site.xml $HADOOP_HOME/etc/hadoop/hdfs-site.xml
52 | COPY ./configs/core-site.xml $HADOOP_HOME/etc/hadoop/core-site.xml
53 | COPY ./configs/yarn-site.xml $HADOOP_HOME/etc/hadoop/yarn-site.xml
54 | COPY ./configs/mapred-site.xml $HADOOP_HOME/etc/hadoop/mapred-site.xml
55 | COPY ./configs/master $HADOOP_HOME/etc/hadoop/master
56 | COPY ./configs/slaves $HADOOP_HOME/etc/hadoop/slaves
57 | COPY ./configs/slaves $SPARK_HOME/conf/slaves
58 |
59 | #创建数据目录
60 | RUN mkdir -p /data/hadoop/dfs/data && \
61 | mkdir -p /data/hadoop/dfs/name && \
62 | mkdir -p /data/hadoop/tmp
63 | RUN mkdir -p /mnt/spark/tmp
64 |
65 |
66 | #配置python相关
67 | #RUN cp -r $SPARK_HOME/python/pyspark /usr/lib64/python3.6/site-packages
68 | #RUN rm -f /usr/bin/python
69 | #RUN ln -s /usr/bin/python3 /usr/bin/python
70 | ENV PYTHONPATH $SPARK_HOME/python:$SPARK_HOME/python/lib/py4j-0.10.4-src.zip:$PYTHONPATH
71 | ENV PYSPARK_PYTHON /usr/bin/python3
72 | ENV SPARK_PYTHONPATH /usr/bin/python3
73 | RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade pip
74 | #RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple py4j==0.10.7
75 | RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple py4j
76 | RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple hdfs
77 | RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple tqdm
78 | RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple flask
79 | RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple flask_cors
80 |
81 |
82 | #开启SSH 22 端口
83 | EXPOSE 22
84 |
85 | #启动容器时执行的脚本文件
86 | CMD ["/usr/sbin/sshd","-D"]
87 |
88 |
--------------------------------------------------------------------------------
/log/frontend/src/views/Home.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
开始时间
4 |
5 |
结束时间
6 |
7 |
查询
8 |
9 |
开始时间
10 |
11 |
结束时间
12 |
13 |
查询
14 |
15 |
16 |
17 |
18 |
107 |
--------------------------------------------------------------------------------
/deploy/master/configs/hadoop-env.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # Set Hadoop-specific environment variables here.
18 |
19 | # The only required environment variable is JAVA_HOME. All others are
20 | # optional. When running a distributed configuration it is best to
21 | # set JAVA_HOME in this file, so that it is correctly defined on
22 | # remote nodes.
23 |
24 | # The java implementation to use.
25 | export JAVA_HOME=/usr/local/jdk1.8.0_212
26 |
27 | # The jsvc implementation to use. Jsvc is required to run secure datanodes
28 | # that bind to privileged ports to provide authentication of data transfer
29 | # protocol. Jsvc is not required if SASL is configured for authentication of
30 | # data transfer protocol using non-privileged ports.
31 | #export JSVC_HOME=${JSVC_HOME}
32 |
33 | export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
34 |
35 | # Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
36 | for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
37 | if [ "$HADOOP_CLASSPATH" ]; then
38 | export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
39 | else
40 | export HADOOP_CLASSPATH=$f
41 | fi
42 | done
43 |
44 | # The maximum amount of heap to use, in MB. Default is 1000.
45 | #export HADOOP_HEAPSIZE=
46 | #export HADOOP_NAMENODE_INIT_HEAPSIZE=""
47 |
48 | # Extra Java runtime options. Empty by default.
49 | export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
50 |
51 | # Command specific options appended to HADOOP_OPTS when specified
52 | export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
53 | export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
54 |
55 | export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
56 |
57 | export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
58 | export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
59 |
60 | # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
61 | export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
62 | #HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
63 |
64 | # On secure datanodes, user to run the datanode as after dropping privileges.
65 | # This **MUST** be uncommented to enable secure HDFS if using privileged ports
66 | # to provide authentication of data transfer protocol. This **MUST NOT** be
67 | # defined if SASL is configured for authentication of data transfer protocol
68 | # using non-privileged ports.
69 | export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
70 |
71 | # Where log files are stored. $HADOOP_HOME/logs by default.
72 | #export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
73 |
74 | # Where log files are stored in the secure data environment.
75 | export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
76 |
77 | ###
78 | # HDFS Mover specific parameters
79 | ###
80 | # Specify the JVM options to be used when starting the HDFS Mover.
81 | # These options will be appended to the options specified as HADOOP_OPTS
82 | # and therefore may override any similar flags set in HADOOP_OPTS
83 | #
84 | # export HADOOP_MOVER_OPTS=""
85 |
86 | ###
87 | # Advanced Users Only!
88 | ###
89 |
90 | # The directory where pid files are stored. /tmp by default.
91 | # NOTE: this should be set to a directory that can only be written to by
92 | # the user that will run the hadoop daemons. Otherwise there is the
93 | # potential for a symlink attack.
94 | export HADOOP_PID_DIR=${HADOOP_PID_DIR}
95 | export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
96 |
97 | # A string representing this instance of hadoop. $USER by default.
98 | export HADOOP_IDENT_STRING=$USER
99 | #export HADOOP_SSH_OPTS="-p 10022"
100 |
--------------------------------------------------------------------------------
/deploy/slave1/configs/hadoop-env.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # Set Hadoop-specific environment variables here.
18 |
19 | # The only required environment variable is JAVA_HOME. All others are
20 | # optional. When running a distributed configuration it is best to
21 | # set JAVA_HOME in this file, so that it is correctly defined on
22 | # remote nodes.
23 |
24 | # The java implementation to use.
25 | export JAVA_HOME=/usr/local/jdk1.8.0_212
26 |
27 | # The jsvc implementation to use. Jsvc is required to run secure datanodes
28 | # that bind to privileged ports to provide authentication of data transfer
29 | # protocol. Jsvc is not required if SASL is configured for authentication of
30 | # data transfer protocol using non-privileged ports.
31 | #export JSVC_HOME=${JSVC_HOME}
32 |
33 | export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
34 |
35 | # Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
36 | for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
37 | if [ "$HADOOP_CLASSPATH" ]; then
38 | export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
39 | else
40 | export HADOOP_CLASSPATH=$f
41 | fi
42 | done
43 |
44 | # The maximum amount of heap to use, in MB. Default is 1000.
45 | #export HADOOP_HEAPSIZE=
46 | #export HADOOP_NAMENODE_INIT_HEAPSIZE=""
47 |
48 | # Extra Java runtime options. Empty by default.
49 | export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
50 |
51 | # Command specific options appended to HADOOP_OPTS when specified
52 | export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
53 | export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
54 |
55 | export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
56 |
57 | export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
58 | export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
59 |
60 | # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
61 | export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
62 | #HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
63 |
64 | # On secure datanodes, user to run the datanode as after dropping privileges.
65 | # This **MUST** be uncommented to enable secure HDFS if using privileged ports
66 | # to provide authentication of data transfer protocol. This **MUST NOT** be
67 | # defined if SASL is configured for authentication of data transfer protocol
68 | # using non-privileged ports.
69 | export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
70 |
71 | # Where log files are stored. $HADOOP_HOME/logs by default.
72 | #export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
73 |
74 | # Where log files are stored in the secure data environment.
75 | export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
76 |
77 | ###
78 | # HDFS Mover specific parameters
79 | ###
80 | # Specify the JVM options to be used when starting the HDFS Mover.
81 | # These options will be appended to the options specified as HADOOP_OPTS
82 | # and therefore may override any similar flags set in HADOOP_OPTS
83 | #
84 | # export HADOOP_MOVER_OPTS=""
85 |
86 | ###
87 | # Advanced Users Only!
88 | ###
89 |
90 | # The directory where pid files are stored. /tmp by default.
91 | # NOTE: this should be set to a directory that can only be written to by
92 | # the user that will run the hadoop daemons. Otherwise there is the
93 | # potential for a symlink attack.
94 | export HADOOP_PID_DIR=${HADOOP_PID_DIR}
95 | export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
96 |
97 | # A string representing this instance of hadoop. $USER by default.
98 | export HADOOP_IDENT_STRING=$USER
99 | #export HADOOP_SSH_OPTS="-p 10022"
100 |
--------------------------------------------------------------------------------
/deploy/slave2/configs/hadoop-env.sh:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one
2 | # or more contributor license agreements. See the NOTICE file
3 | # distributed with this work for additional information
4 | # regarding copyright ownership. The ASF licenses this file
5 | # to you under the Apache License, Version 2.0 (the
6 | # "License"); you may not use this file except in compliance
7 | # with the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # Set Hadoop-specific environment variables here.
18 |
19 | # The only required environment variable is JAVA_HOME. All others are
20 | # optional. When running a distributed configuration it is best to
21 | # set JAVA_HOME in this file, so that it is correctly defined on
22 | # remote nodes.
23 |
24 | # The java implementation to use.
25 | export JAVA_HOME=/usr/local/jdk1.8.0_212
26 |
27 | # The jsvc implementation to use. Jsvc is required to run secure datanodes
28 | # that bind to privileged ports to provide authentication of data transfer
29 | # protocol. Jsvc is not required if SASL is configured for authentication of
30 | # data transfer protocol using non-privileged ports.
31 | #export JSVC_HOME=${JSVC_HOME}
32 |
33 | export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
34 |
35 | # Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
36 | for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
37 | if [ "$HADOOP_CLASSPATH" ]; then
38 | export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
39 | else
40 | export HADOOP_CLASSPATH=$f
41 | fi
42 | done
43 |
44 | # The maximum amount of heap to use, in MB. Default is 1000.
45 | #export HADOOP_HEAPSIZE=
46 | #export HADOOP_NAMENODE_INIT_HEAPSIZE=""
47 |
48 | # Extra Java runtime options. Empty by default.
49 | export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
50 |
51 | # Command specific options appended to HADOOP_OPTS when specified
52 | export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
53 | export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
54 |
55 | export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
56 |
57 | export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
58 | export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
59 |
60 | # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
61 | export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
62 | #HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
63 |
64 | # On secure datanodes, user to run the datanode as after dropping privileges.
65 | # This **MUST** be uncommented to enable secure HDFS if using privileged ports
66 | # to provide authentication of data transfer protocol. This **MUST NOT** be
67 | # defined if SASL is configured for authentication of data transfer protocol
68 | # using non-privileged ports.
69 | export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
70 |
71 | # Where log files are stored. $HADOOP_HOME/logs by default.
72 | #export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
73 |
74 | # Where log files are stored in the secure data environment.
75 | export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
76 |
77 | ###
78 | # HDFS Mover specific parameters
79 | ###
80 | # Specify the JVM options to be used when starting the HDFS Mover.
81 | # These options will be appended to the options specified as HADOOP_OPTS
82 | # and therefore may override any similar flags set in HADOOP_OPTS
83 | #
84 | # export HADOOP_MOVER_OPTS=""
85 |
86 | ###
87 | # Advanced Users Only!
88 | ###
89 |
90 | # The directory where pid files are stored. /tmp by default.
91 | # NOTE: this should be set to a directory that can only be written to by
92 | # the user that will run the hadoop daemons. Otherwise there is the
93 | # potential for a symlink attack.
94 | export HADOOP_PID_DIR=${HADOOP_PID_DIR}
95 | export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
96 |
97 | # A string representing this instance of hadoop. $USER by default.
98 | export HADOOP_IDENT_STRING=$USER
99 | #export HADOOP_SSH_OPTS="-p 10022"
100 |
--------------------------------------------------------------------------------
/movie/frontend/src/views/Profile.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
电影推荐系统
4 |
用户名:{{userDetail.username}}
5 |
用户ID:{{userDetail.userid}}
6 |
7 | Logout
8 |
9 |
10 |
11 |
12 |
13 |
14 |
{{movie.moviename}}
15 |
16 | 🗓:
17 | {{movie.showyear.substring(0,17)}}
18 |
19 | 🎬:
20 | {{movie.director}}
21 |
22 | 🕴:
23 | {{movie.leadactors}}
24 | {{movie.leadactors.substr(0,10)+"..."}}
25 |
26 | 🌟:
27 | {{movie.averating}}/5
28 |
29 | ℹ️:
30 | {{movie.typelist}}
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
53 |
54 |
75 |
76 |
77 |
78 |
120 |
121 |
147 |
--------------------------------------------------------------------------------
/deploy/master/configs/spark-env.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | #
4 | # Licensed to the Apache Software Foundation (ASF) under one or more
5 | # contributor license agreements. See the NOTICE file distributed with
6 | # this work for additional information regarding copyright ownership.
7 | # The ASF licenses this file to You under the Apache License, Version 2.0
8 | # (the "License"); you may not use this file except in compliance with
9 | # the License. You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 |
20 | # This file is sourced when running various Spark programs.
21 | # Copy it as spark-env.sh and edit that to configure Spark for your site.
22 |
23 | # Options read when launching programs locally with
24 | # ./bin/run-example or ./bin/spark-submit
25 | # - HADOOP_CONF_DIR, to point Spark towards Hadoop configuration files
26 | # - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node
27 | # - SPARK_PUBLIC_DNS, to set the public dns name of the driver program
28 |
29 | # Options read by executors and drivers running inside the cluster
30 | # - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node
31 | # - SPARK_PUBLIC_DNS, to set the public DNS name of the driver program
32 | # - SPARK_LOCAL_DIRS, storage directories to use on this node for shuffle and RDD data
33 | # - MESOS_NATIVE_JAVA_LIBRARY, to point to your libmesos.so if you use Mesos
34 |
35 | # Options read in YARN client/cluster mode
36 | # - SPARK_CONF_DIR, Alternate conf dir. (Default: ${SPARK_HOME}/conf)
37 | # - HADOOP_CONF_DIR, to point Spark towards Hadoop configuration files
38 | # - YARN_CONF_DIR, to point Spark towards YARN configuration files when you use YARN
39 | # - SPARK_EXECUTOR_CORES, Number of cores for the executors (Default: 1).
40 | # - SPARK_EXECUTOR_MEMORY, Memory per Executor (e.g. 1000M, 2G) (Default: 1G)
41 | # - SPARK_DRIVER_MEMORY, Memory for Driver (e.g. 1000M, 2G) (Default: 1G)
42 |
43 | # Options for the daemons used in the standalone deploy mode
44 | # - SPARK_MASTER_HOST, to bind the master to a different IP address or hostname
45 | # - SPARK_MASTER_PORT / SPARK_MASTER_WEBUI_PORT, to use non-default ports for the master
46 | # - SPARK_MASTER_OPTS, to set config properties only for the master (e.g. "-Dx=y")
47 | # - SPARK_WORKER_CORES, to set the number of cores to use on this machine
48 | # - SPARK_WORKER_MEMORY, to set how much total memory workers have to give executors (e.g. 1000m, 2g)
49 | # - SPARK_WORKER_PORT / SPARK_WORKER_WEBUI_PORT, to use non-default ports for the worker
50 | # - SPARK_WORKER_DIR, to set the working directory of worker processes
51 | # - SPARK_WORKER_OPTS, to set config properties only for the worker (e.g. "-Dx=y")
52 | # - SPARK_DAEMON_MEMORY, to allocate to the master, worker and history server themselves (default: 1g).
53 | # - SPARK_HISTORY_OPTS, to set config properties only for the history server (e.g. "-Dx=y")
54 | # - SPARK_SHUFFLE_OPTS, to set config properties only for the external shuffle service (e.g. "-Dx=y")
55 | # - SPARK_DAEMON_JAVA_OPTS, to set config properties for all daemons (e.g. "-Dx=y")
56 | # - SPARK_DAEMON_CLASSPATH, to set the classpath for all daemons
57 | # - SPARK_PUBLIC_DNS, to set the public dns name of the master or workers
58 |
59 | # Generic options for the daemons used in the standalone deploy mode
60 | # - SPARK_CONF_DIR Alternate conf dir. (Default: ${SPARK_HOME}/conf)
61 | # - SPARK_LOG_DIR Where log files are stored. (Default: ${SPARK_HOME}/logs)
62 | # - SPARK_PID_DIR Where the pid file is stored. (Default: /tmp)
63 | # - SPARK_IDENT_STRING A string representing this instance of spark. (Default: $USER)
64 | # - SPARK_NICENESS The scheduling priority for daemons. (Default: 0)
65 | # - SPARK_NO_DAEMONIZE Run the proposed command in the foreground. It will not output a PID file.
66 | # Options for native BLAS, like Intel MKL, OpenBLAS, and so on.
67 | # You might get better performance to enable these options if using native BLAS (see SPARK-21305).
68 | # - MKL_NUM_THREADS=1 Disable multi-threading of Intel MKL
69 | # - OPENBLAS_NUM_THREADS=1 Disable multi-threading of OpenBLAS
70 | export PYSPARK_PYTHON=/usr/bin/python3
71 | export PYSPARK_DRIVER_PYTHON=/usr/bin/python3
72 | export SPARK_HOME=/usr/local/spark-2.4.5-bin-without-hadoop
73 | export PYTHONPATH=$SPARK_HOME/python:$SPARK_HOME/python/lib/py4j-0.10.4-src.zip:$PYTHONPATH
74 | export JAVA_HOME=/usr/local/jdk1.8.0_212/
75 | export HADOOP_HOME=/usr/local/hadoop-2.7.3
76 | export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
77 | export SPARK_DIST_CLASSPATH=$(/usr/local/hadoop-2.7.3/bin/hadoop classpath)
78 | export SCALA_HOME=/usr/share/scala-2.12.7
79 | #export SPARK_MASTER_HOST=192.168.0.222
80 | #export SPARK_MASTER_HOST=172.17.1.2
81 | export SPARK_MASTER_IP=192.168.0.222
82 | #export SPARK_MASTER_IP=172.17.1.2
83 | #export SPARK_MASTER_PORT=7077
84 | #export SPARK_MASTER_WEBUI_PORT=7070
85 | export SPARK_WORKER_CORES=1
86 | export SPARK_WORKER_MEMORY=2g
87 | export SPARK_WORKER_INSTANCES=1
88 | #export SPARK_SSH_OPTS="-p 10022"
89 | #export SPARK_YARN_USER_ENV="CLASSPATH=/usr/local/hadoop/hadoop-2.8.5/etc/hadoop"
90 | #export SPARK_CLASSPATH=$HBASE_HOME/lib/hbase-protocol-1.2.4.jar:$HBASE_HOME/lib/hbase-common-1.2.4.jar:$HBASE_HOME/lib/htrace-core-3.1.0-incubating.jar:$HBASE_HOME/lib/hbase-server-1.2.4.jar:$HBASE_HOME/lib/hbase-client-1.2.4.jar:$HBASE_HOME/lib/metrics-core-2.2.0.jar:$SPARK_CLASSPATH
91 | #export SPARK_LOCAL_DIR="/mnt/spark/tmp"
92 | #export YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop
93 | #export SPARK_JAVA_OPTS="-Dspark.storage.blockManagerHeartBeatMs=60000-Dspark.local.dir=$SPARK_LOCAL_DIR -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -Xloggc:$SPARK_HOME/logs/gc.log -XX:+UseConcMarkSweepGC -XX:+UseCMSCompactAtFullCollection -XX:CMSInitiatingOccupancyFraction=60"
94 | #export SPARK_DAEMON_JAVA_OPTS="-Dspark.deploy.recoveryMode=FILESYSTEM -Dspark.deploy.recoveryDirectory=/nfs/spark/recovery"
95 | #export SPARK_LOCAL_IP=192.168.0.222
96 |
--------------------------------------------------------------------------------
/deploy/slave1/configs/spark-env.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | #
4 | # Licensed to the Apache Software Foundation (ASF) under one or more
5 | # contributor license agreements. See the NOTICE file distributed with
6 | # this work for additional information regarding copyright ownership.
7 | # The ASF licenses this file to You under the Apache License, Version 2.0
8 | # (the "License"); you may not use this file except in compliance with
9 | # the License. You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 |
20 | # This file is sourced when running various Spark programs.
21 | # Copy it as spark-env.sh and edit that to configure Spark for your site.
22 |
23 | # Options read when launching programs locally with
24 | # ./bin/run-example or ./bin/spark-submit
25 | # - HADOOP_CONF_DIR, to point Spark towards Hadoop configuration files
26 | # - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node
27 | # - SPARK_PUBLIC_DNS, to set the public dns name of the driver program
28 |
29 | # Options read by executors and drivers running inside the cluster
30 | # - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node
31 | # - SPARK_PUBLIC_DNS, to set the public DNS name of the driver program
32 | # - SPARK_LOCAL_DIRS, storage directories to use on this node for shuffle and RDD data
33 | # - MESOS_NATIVE_JAVA_LIBRARY, to point to your libmesos.so if you use Mesos
34 |
35 | # Options read in YARN client/cluster mode
36 | # - SPARK_CONF_DIR, Alternate conf dir. (Default: ${SPARK_HOME}/conf)
37 | # - HADOOP_CONF_DIR, to point Spark towards Hadoop configuration files
38 | # - YARN_CONF_DIR, to point Spark towards YARN configuration files when you use YARN
39 | # - SPARK_EXECUTOR_CORES, Number of cores for the executors (Default: 1).
40 | # - SPARK_EXECUTOR_MEMORY, Memory per Executor (e.g. 1000M, 2G) (Default: 1G)
41 | # - SPARK_DRIVER_MEMORY, Memory for Driver (e.g. 1000M, 2G) (Default: 1G)
42 |
43 | # Options for the daemons used in the standalone deploy mode
44 | # - SPARK_MASTER_HOST, to bind the master to a different IP address or hostname
45 | # - SPARK_MASTER_PORT / SPARK_MASTER_WEBUI_PORT, to use non-default ports for the master
46 | # - SPARK_MASTER_OPTS, to set config properties only for the master (e.g. "-Dx=y")
47 | # - SPARK_WORKER_CORES, to set the number of cores to use on this machine
48 | # - SPARK_WORKER_MEMORY, to set how much total memory workers have to give executors (e.g. 1000m, 2g)
49 | # - SPARK_WORKER_PORT / SPARK_WORKER_WEBUI_PORT, to use non-default ports for the worker
50 | # - SPARK_WORKER_DIR, to set the working directory of worker processes
51 | # - SPARK_WORKER_OPTS, to set config properties only for the worker (e.g. "-Dx=y")
52 | # - SPARK_DAEMON_MEMORY, to allocate to the master, worker and history server themselves (default: 1g).
53 | # - SPARK_HISTORY_OPTS, to set config properties only for the history server (e.g. "-Dx=y")
54 | # - SPARK_SHUFFLE_OPTS, to set config properties only for the external shuffle service (e.g. "-Dx=y")
55 | # - SPARK_DAEMON_JAVA_OPTS, to set config properties for all daemons (e.g. "-Dx=y")
56 | # - SPARK_DAEMON_CLASSPATH, to set the classpath for all daemons
57 | # - SPARK_PUBLIC_DNS, to set the public dns name of the master or workers
58 |
59 | # Generic options for the daemons used in the standalone deploy mode
60 | # - SPARK_CONF_DIR Alternate conf dir. (Default: ${SPARK_HOME}/conf)
61 | # - SPARK_LOG_DIR Where log files are stored. (Default: ${SPARK_HOME}/logs)
62 | # - SPARK_PID_DIR Where the pid file is stored. (Default: /tmp)
63 | # - SPARK_IDENT_STRING A string representing this instance of spark. (Default: $USER)
64 | # - SPARK_NICENESS The scheduling priority for daemons. (Default: 0)
65 | # - SPARK_NO_DAEMONIZE Run the proposed command in the foreground. It will not output a PID file.
66 | # Options for native BLAS, like Intel MKL, OpenBLAS, and so on.
67 | # You might get better performance to enable these options if using native BLAS (see SPARK-21305).
68 | # - MKL_NUM_THREADS=1 Disable multi-threading of Intel MKL
69 | # - OPENBLAS_NUM_THREADS=1 Disable multi-threading of OpenBLAS
70 | export PYSPARK_PYTHON=/usr/bin/python3
71 | export PYSPARK_DRIVER_PYTHON=/usr/bin/python3
72 | export SPARK_HOME=/usr/local/spark-2.4.5-bin-without-hadoop
73 | export PYTHONPATH=$SPARK_HOME/python:$SPARK_HOME/python/lib/py4j-0.10.4-src.zip:$PYTHONPATH
74 | export JAVA_HOME=/usr/local/jdk1.8.0_212/
75 | export HADOOP_HOME=/usr/local/hadoop-2.7.3
76 | export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
77 | export SPARK_DIST_CLASSPATH=$(/usr/local/hadoop-2.7.3/bin/hadoop classpath)
78 | export SCALA_HOME=/usr/share/scala-2.12.7
79 | #export SPARK_MASTER_HOST=192.168.0.222
80 | #export SPARK_MASTER_HOST=172.17.1.2
81 | export SPARK_MASTER_IP=192.168.0.222
82 | #export SPARK_MASTER_IP=172.17.1.2
83 | #export SPARK_MASTER_PORT=7077
84 | #export SPARK_MASTER_WEBUI_PORT=7070
85 | export SPARK_WORKER_CORES=1
86 | export SPARK_WORKER_MEMORY=2g
87 | export SPARK_WORKER_INSTANCES=1
88 | #export SPARK_SSH_OPTS="-p 10022"
89 | #export SPARK_YARN_USER_ENV="CLASSPATH=/usr/local/hadoop/hadoop-2.8.5/etc/hadoop"
90 | #export SPARK_CLASSPATH=$HBASE_HOME/lib/hbase-protocol-1.2.4.jar:$HBASE_HOME/lib/hbase-common-1.2.4.jar:$HBASE_HOME/lib/htrace-core-3.1.0-incubating.jar:$HBASE_HOME/lib/hbase-server-1.2.4.jar:$HBASE_HOME/lib/hbase-client-1.2.4.jar:$HBASE_HOME/lib/metrics-core-2.2.0.jar:$SPARK_CLASSPATH
91 | #export SPARK_LOCAL_DIR="/mnt/spark/tmp"
92 | #export YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop
93 | #export SPARK_JAVA_OPTS="-Dspark.storage.blockManagerHeartBeatMs=60000-Dspark.local.dir=$SPARK_LOCAL_DIR -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -Xloggc:$SPARK_HOME/logs/gc.log -XX:+UseConcMarkSweepGC -XX:+UseCMSCompactAtFullCollection -XX:CMSInitiatingOccupancyFraction=60"
94 | #export SPARK_DAEMON_JAVA_OPTS="-Dspark.deploy.recoveryMode=FILESYSTEM -Dspark.deploy.recoveryDirectory=/nfs/spark/recovery"
95 | #export SPARK_LOCAL_IP=192.168.0.222
96 |
--------------------------------------------------------------------------------
/deploy/slave2/configs/spark-env.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | #
4 | # Licensed to the Apache Software Foundation (ASF) under one or more
5 | # contributor license agreements. See the NOTICE file distributed with
6 | # this work for additional information regarding copyright ownership.
7 | # The ASF licenses this file to You under the Apache License, Version 2.0
8 | # (the "License"); you may not use this file except in compliance with
9 | # the License. You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 | #
19 |
20 | # This file is sourced when running various Spark programs.
21 | # Copy it as spark-env.sh and edit that to configure Spark for your site.
22 |
23 | # Options read when launching programs locally with
24 | # ./bin/run-example or ./bin/spark-submit
25 | # - HADOOP_CONF_DIR, to point Spark towards Hadoop configuration files
26 | # - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node
27 | # - SPARK_PUBLIC_DNS, to set the public dns name of the driver program
28 |
29 | # Options read by executors and drivers running inside the cluster
30 | # - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node
31 | # - SPARK_PUBLIC_DNS, to set the public DNS name of the driver program
32 | # - SPARK_LOCAL_DIRS, storage directories to use on this node for shuffle and RDD data
33 | # - MESOS_NATIVE_JAVA_LIBRARY, to point to your libmesos.so if you use Mesos
34 |
35 | # Options read in YARN client/cluster mode
36 | # - SPARK_CONF_DIR, Alternate conf dir. (Default: ${SPARK_HOME}/conf)
37 | # - HADOOP_CONF_DIR, to point Spark towards Hadoop configuration files
38 | # - YARN_CONF_DIR, to point Spark towards YARN configuration files when you use YARN
39 | # - SPARK_EXECUTOR_CORES, Number of cores for the executors (Default: 1).
40 | # - SPARK_EXECUTOR_MEMORY, Memory per Executor (e.g. 1000M, 2G) (Default: 1G)
41 | # - SPARK_DRIVER_MEMORY, Memory for Driver (e.g. 1000M, 2G) (Default: 1G)
42 |
43 | # Options for the daemons used in the standalone deploy mode
44 | # - SPARK_MASTER_HOST, to bind the master to a different IP address or hostname
45 | # - SPARK_MASTER_PORT / SPARK_MASTER_WEBUI_PORT, to use non-default ports for the master
46 | # - SPARK_MASTER_OPTS, to set config properties only for the master (e.g. "-Dx=y")
47 | # - SPARK_WORKER_CORES, to set the number of cores to use on this machine
48 | # - SPARK_WORKER_MEMORY, to set how much total memory workers have to give executors (e.g. 1000m, 2g)
49 | # - SPARK_WORKER_PORT / SPARK_WORKER_WEBUI_PORT, to use non-default ports for the worker
50 | # - SPARK_WORKER_DIR, to set the working directory of worker processes
51 | # - SPARK_WORKER_OPTS, to set config properties only for the worker (e.g. "-Dx=y")
52 | # - SPARK_DAEMON_MEMORY, to allocate to the master, worker and history server themselves (default: 1g).
53 | # - SPARK_HISTORY_OPTS, to set config properties only for the history server (e.g. "-Dx=y")
54 | # - SPARK_SHUFFLE_OPTS, to set config properties only for the external shuffle service (e.g. "-Dx=y")
55 | # - SPARK_DAEMON_JAVA_OPTS, to set config properties for all daemons (e.g. "-Dx=y")
56 | # - SPARK_DAEMON_CLASSPATH, to set the classpath for all daemons
57 | # - SPARK_PUBLIC_DNS, to set the public dns name of the master or workers
58 |
59 | # Generic options for the daemons used in the standalone deploy mode
60 | # - SPARK_CONF_DIR Alternate conf dir. (Default: ${SPARK_HOME}/conf)
61 | # - SPARK_LOG_DIR Where log files are stored. (Default: ${SPARK_HOME}/logs)
62 | # - SPARK_PID_DIR Where the pid file is stored. (Default: /tmp)
63 | # - SPARK_IDENT_STRING A string representing this instance of spark. (Default: $USER)
64 | # - SPARK_NICENESS The scheduling priority for daemons. (Default: 0)
65 | # - SPARK_NO_DAEMONIZE Run the proposed command in the foreground. It will not output a PID file.
66 | # Options for native BLAS, like Intel MKL, OpenBLAS, and so on.
67 | # You might get better performance to enable these options if using native BLAS (see SPARK-21305).
68 | # - MKL_NUM_THREADS=1 Disable multi-threading of Intel MKL
69 | # - OPENBLAS_NUM_THREADS=1 Disable multi-threading of OpenBLAS
70 | export PYSPARK_PYTHON=/usr/bin/python3
71 | export PYSPARK_DRIVER_PYTHON=/usr/bin/python3
72 | export SPARK_HOME=/usr/local/spark-2.4.5-bin-without-hadoop
73 | export PYTHONPATH=$SPARK_HOME/python:$SPARK_HOME/python/lib/py4j-0.10.4-src.zip:$PYTHONPATH
74 | export JAVA_HOME=/usr/local/jdk1.8.0_212/
75 | export HADOOP_HOME=/usr/local/hadoop-2.7.3
76 | export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
77 | export SPARK_DIST_CLASSPATH=$(/usr/local/hadoop-2.7.3/bin/hadoop classpath)
78 | export SCALA_HOME=/usr/share/scala-2.12.7
79 | #export SPARK_MASTER_HOST=192.168.0.222
80 | #export SPARK_MASTER_HOST=172.17.1.2
81 | export SPARK_MASTER_IP=192.168.0.222
82 | #export SPARK_MASTER_IP=172.17.1.2
83 | #export SPARK_MASTER_PORT=7077
84 | #export SPARK_MASTER_WEBUI_PORT=7070
85 | export SPARK_WORKER_CORES=1
86 | export SPARK_WORKER_MEMORY=2g
87 | export SPARK_WORKER_INSTANCES=1
88 | #export SPARK_SSH_OPTS="-p 10022"
89 | #export SPARK_YARN_USER_ENV="CLASSPATH=/usr/local/hadoop/hadoop-2.8.5/etc/hadoop"
90 | #export SPARK_CLASSPATH=$HBASE_HOME/lib/hbase-protocol-1.2.4.jar:$HBASE_HOME/lib/hbase-common-1.2.4.jar:$HBASE_HOME/lib/htrace-core-3.1.0-incubating.jar:$HBASE_HOME/lib/hbase-server-1.2.4.jar:$HBASE_HOME/lib/hbase-client-1.2.4.jar:$HBASE_HOME/lib/metrics-core-2.2.0.jar:$SPARK_CLASSPATH
91 | #export SPARK_LOCAL_DIR="/mnt/spark/tmp"
92 | #export YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop
93 | #export SPARK_JAVA_OPTS="-Dspark.storage.blockManagerHeartBeatMs=60000-Dspark.local.dir=$SPARK_LOCAL_DIR -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -Xloggc:$SPARK_HOME/logs/gc.log -XX:+UseConcMarkSweepGC -XX:+UseCMSCompactAtFullCollection -XX:CMSInitiatingOccupancyFraction=60"
94 | #export SPARK_DAEMON_JAVA_OPTS="-Dspark.deploy.recoveryMode=FILESYSTEM -Dspark.deploy.recoveryDirectory=/nfs/spark/recovery"
95 | #export SPARK_LOCAL_IP=192.168.0.222
96 |
--------------------------------------------------------------------------------
/movie/backend/back.py:
--------------------------------------------------------------------------------
1 | from flask import Flask,request,jsonify,make_response
2 | import json
3 | import copy
4 | import datetime
5 | import random
6 | from flask_sqlalchemy import SQLAlchemy
7 | import pymysql
8 | import predict
9 | import time
10 | import csv
11 | import os
12 | from flask_cors import *
13 |
14 | host = '127.0.0.1'
15 | username = 'root'
16 | password = 'password'
17 | database = 'recommend'
18 | port = 3306
19 | app = Flask(__name__)
20 | CORS(app, supports_credentials=True)
21 | @app.route('/')
22 | def hello_world():
23 | return 'Hello World'
24 |
25 | def movie_info(movie_id):
26 | # 打开数据库连接
27 | db = pymysql.connect(host,username,password,port)
28 | # 使用 cursor() 方法创建一个游标对象 cursor
29 | cursor = db.cursor()
30 | sql = """select * from movie where movieid = %s"""%(int(movie_id))
31 | print(sql)
32 | data = {}
33 | try:
34 | cursor.execute(sql)
35 | results = cursor.fetchall()
36 | row = results[0]
37 | data["movieid"] = row[0]
38 | data["moviename"] = row[1]
39 | data["showyear"] = row[2]
40 | #data["nation"] = row[3]
41 | data["director"] = row[4]
42 | data["leadactors"] = row[5]
43 | #data["screenwriter"] = row[6]
44 | data["picture"] = row[7]
45 | data["averating"] = row[8]
46 | data["numrating"] = row[9]
47 | data["description"] = row[10]
48 | data["typelist"] = row[11]
49 |
50 | except:
51 | print("error")
52 | db.close()
53 | return data
54 |
55 | def get_moviename(movie_id):
56 | # 打开数据库连接
57 | db = pymysql.connect(host,username,password,port)
58 | # 使用 cursor() 方法创建一个游标对象 cursor
59 | cursor = db.cursor()
60 | sql = """select * from movie where movieid = %s"""%(int(movie_id))
61 | print(sql)
62 | data = {}
63 | try:
64 | cursor.execute(sql)
65 | results = cursor.fetchall()
66 | row = results[0]
67 | data["movieid"] = row[0]
68 | data["moviename"] = row[1]
69 | data["showyear"] = row[2]
70 | #data["nation"] = row[3]
71 | data["director"] = row[4]
72 | data["leadactors"] = row[5]
73 | #data["screenwriter"] = row[6]
74 | data["picture"] = row[7]
75 | data["averating"] = row[8]
76 | data["numrating"] = row[9]
77 | data["description"] = row[10]
78 | data["typelist"] = row[11]
79 |
80 | except:
81 | print("error")
82 | db.close()
83 | return data
84 |
85 | def get_username(user_id):
86 | # 打开数据库连接
87 | db = pymysql.connect(host,username,password,port)
88 | # 使用 cursor() 方法创建一个游标对象 cursor
89 | cursor = db.cursor()
90 | sql = """select * from user where userid = %s"""%(int(user_id))
91 | print(sql)
92 | data = {}
93 | try:
94 | cursor.execute(sql)
95 | results = cursor.fetchall()
96 | row = results[0]
97 | name = row[1]
98 | except:
99 | print("error")
100 | db.close()
101 | return name
102 |
103 | @app.route('/get_recommend',methods=['GET'])
104 | def get_recommend():
105 | print("======")
106 | print(request.headers)
107 | # log = {}
108 | # log["host"] = request.headers.get("Host")
109 | # log["route"] = "get_recommend"
110 | # log["userid"] = request.args.get("userid")
111 | # print(log)
112 | # with open("back_log","a+") as f:
113 | # f.write(str(log)+'\n')
114 | print("======")
115 | user_id = request.args.get("userid")
116 | result = predict.movie_predict(int(user_id),5)
117 | info = []
118 | for re in result:
119 | a = movie_info(re)
120 | if a:
121 | info.append(a)
122 | res = {}
123 | res["code"] = 0
124 | res["data"] = info
125 | resp = jsonify(res)
126 | resp.headers['Access-Control-Allow-Origin'] = '*'
127 | return resp
128 |
129 |
130 | @app.route('/get_movie',methods=['GET'])
131 | def add():
132 | print("======")
133 | print(request.headers)
134 | print("======")
135 | movie_id = request.args.get("movieid")
136 | user_id = request.args.get("userid")
137 | # 打开数据库连接
138 | db = pymysql.connect(host,username,password,port)
139 | # 使用 cursor() 方法创建一个游标对象 cursor
140 | cursor = db.cursor()
141 | sql = """select * from movie where movieid = %s"""%(int(movie_id))
142 | print(sql)
143 | data = {}
144 | try:
145 | cursor.execute(sql)
146 | results = cursor.fetchall()
147 | row = results[0]
148 | data["movieid"] = row[0]
149 | data["moviename"] = row[1]
150 | data["showyear"] = row[2]
151 | #data["nation"] = row[3]
152 | data["director"] = row[4]
153 | data["leadactors"] = row[5]
154 | #data["screenwriter"] = row[6]
155 | data["picture"] = row[7]
156 | data["averating"] = row[8]
157 | data["numrating"] = row[9]
158 | data["description"] = row[10]
159 | data["typelist"] = row[11]
160 |
161 | except:
162 | print("error")
163 |
164 | # review”:true/false,(当前用户是否对该电影进行打分)
165 | #“user_rating”:5(当前用户对该电影的打分)
166 | sql = """SELECT * FROM rating WHERE movieId = %s and userId = %s"""%(int(movie_id),int(user_id))
167 | try:
168 | cursor.execute(sql)
169 | results = cursor.fetchall()
170 | if len(results) == 0:
171 | data["review"] = False
172 | else:
173 | data["review"] = True
174 | data["user_rating"]=results[0][2]
175 | except:
176 | print("error")
177 | res = {}
178 | res["code"] = 0
179 | res["data"] = data
180 | resp = jsonify(res)
181 | resp.headers['Access-Control-Allow-Origin'] = '*'
182 |
183 | #写入log文件
184 | # log = {}
185 | # log["host"] = request.headers.get("Host")
186 | # log["route"] = "get_movie"
187 | # log["movieid"] = request.args.get("movieid")
188 | # log["userid"] = request.args.get("userid")
189 | millis = int(round(time.time() * 1000))
190 | log = str(millis) + '\t' + get_username(user_id) + '\t'+ data["moviename"]
191 | print(log)
192 | with open("back_log","a+") as f:
193 | f.write(str(log)+'\n')
194 | return resp
195 |
196 | def handle_chain_data(data):
197 | with open('./ratings.csv','a+') as csvfile:
198 | fieldnames=["userId","movieId","rating","timestamp"]
199 | write=csv.DictWriter(csvfile,fieldnames=fieldnames)
200 | write.writerow(data)
201 |
202 |
203 | @app.route('/user_rate',methods=['GET'])
204 | #@cross_origin()
205 | def user_rate():
206 | print("======")
207 | print(request.headers)
208 | print("======")
209 | print("*****")
210 | movie_id = request.args.get("movieid")
211 | user_id = request.args.get("userid")
212 | star = request.args.get("star")
213 | # 打开数据库连接
214 | db = pymysql.connect(host,username,password,port)
215 | # 使用 cursor() 方法创建一个游标对象 cursor
216 | cursor = db.cursor()
217 | sql = """insert into rating(userId,movieId,rating) values(%s,%s,%s) """%(int(user_id),int(movie_id),float(star))
218 | print(sql)
219 | try:
220 | cursor.execute(sql)
221 | db.commit()
222 | except:
223 | db.rollback()
224 | db.close()
225 | res= {}
226 | res["code"] = 0
227 | resp = jsonify(res)
228 | resp.headers['Access-Control-Allow-Origin'] = '*'
229 |
230 | moviename = movie_info(movie_id)["moviename"]
231 | print(moviename)
232 | millis = int(round(time.time() * 1000))
233 | log = str(millis) + '\t' + get_username(user_id) + '\t'+str(moviename)
234 | print(log)
235 | with open("back_log","a+") as f:
236 | f.write(str(log)+'\n')
237 | # write to ratings.csv
238 | newdata={}
239 | newdata["userId"] = user_id
240 | newdata["movieId"] = movie_id
241 | newdata["rating"] = star
242 | newdata["timestamp"] = int(time.time())
243 | handle_chain_data(newdata)
244 | # train
245 | os.system("nohup python3 train.py &")
246 | return resp
247 | if __name__ == '__main__':
248 | app.run(host="0.0.0.0",port = 18999)
249 |
--------------------------------------------------------------------------------
/log/frontend/public/img/icons/safari-pinned-tab.svg:
--------------------------------------------------------------------------------
1 |
2 |
4 |
7 |
8 | Created by potrace 1.11, written by Peter Selinger 2001-2013
9 |
10 |
12 |
148 |
149 |
150 |
--------------------------------------------------------------------------------
/movie/backend/user/package-lock.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "lognixapi",
3 | "version": "0.0.1",
4 | "lockfileVersion": 1,
5 | "requires": true,
6 | "dependencies": {
7 | "accepts": {
8 | "version": "1.3.7",
9 | "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz",
10 | "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==",
11 | "requires": {
12 | "mime-types": "~2.1.24",
13 | "negotiator": "0.6.2"
14 | }
15 | },
16 | "array-flatten": {
17 | "version": "1.1.1",
18 | "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
19 | "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI="
20 | },
21 | "bignumber.js": {
22 | "version": "9.0.0",
23 | "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.0.0.tgz",
24 | "integrity": "sha512-t/OYhhJ2SD+YGBQcjY8GzzDHEk9f3nerxjtfa6tlMXfe7frs/WozhvCNoGvpM0P3bNf3Gq5ZRMlGr5f3r4/N8A=="
25 | },
26 | "body-parser": {
27 | "version": "1.19.0",
28 | "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz",
29 | "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==",
30 | "requires": {
31 | "bytes": "3.1.0",
32 | "content-type": "~1.0.4",
33 | "debug": "2.6.9",
34 | "depd": "~1.1.2",
35 | "http-errors": "1.7.2",
36 | "iconv-lite": "0.4.24",
37 | "on-finished": "~2.3.0",
38 | "qs": "6.7.0",
39 | "raw-body": "2.4.0",
40 | "type-is": "~1.6.17"
41 | }
42 | },
43 | "bytes": {
44 | "version": "3.1.0",
45 | "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz",
46 | "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg=="
47 | },
48 | "content-disposition": {
49 | "version": "0.5.3",
50 | "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz",
51 | "integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==",
52 | "requires": {
53 | "safe-buffer": "5.1.2"
54 | }
55 | },
56 | "content-type": {
57 | "version": "1.0.4",
58 | "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
59 | "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA=="
60 | },
61 | "cookie": {
62 | "version": "0.4.0",
63 | "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.0.tgz",
64 | "integrity": "sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg=="
65 | },
66 | "cookie-signature": {
67 | "version": "1.0.6",
68 | "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
69 | "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw="
70 | },
71 | "core-util-is": {
72 | "version": "1.0.2",
73 | "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
74 | "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac="
75 | },
76 | "cors": {
77 | "version": "2.8.5",
78 | "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz",
79 | "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==",
80 | "requires": {
81 | "object-assign": "^4",
82 | "vary": "^1"
83 | }
84 | },
85 | "debug": {
86 | "version": "2.6.9",
87 | "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
88 | "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
89 | "requires": {
90 | "ms": "2.0.0"
91 | }
92 | },
93 | "depd": {
94 | "version": "1.1.2",
95 | "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
96 | "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak="
97 | },
98 | "destroy": {
99 | "version": "1.0.4",
100 | "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
101 | "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA="
102 | },
103 | "ee-first": {
104 | "version": "1.1.1",
105 | "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
106 | "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0="
107 | },
108 | "encodeurl": {
109 | "version": "1.0.2",
110 | "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
111 | "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k="
112 | },
113 | "escape-html": {
114 | "version": "1.0.3",
115 | "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
116 | "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg="
117 | },
118 | "etag": {
119 | "version": "1.8.1",
120 | "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
121 | "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc="
122 | },
123 | "express": {
124 | "version": "4.17.1",
125 | "resolved": "https://registry.npmjs.org/express/-/express-4.17.1.tgz",
126 | "integrity": "sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==",
127 | "requires": {
128 | "accepts": "~1.3.7",
129 | "array-flatten": "1.1.1",
130 | "body-parser": "1.19.0",
131 | "content-disposition": "0.5.3",
132 | "content-type": "~1.0.4",
133 | "cookie": "0.4.0",
134 | "cookie-signature": "1.0.6",
135 | "debug": "2.6.9",
136 | "depd": "~1.1.2",
137 | "encodeurl": "~1.0.2",
138 | "escape-html": "~1.0.3",
139 | "etag": "~1.8.1",
140 | "finalhandler": "~1.1.2",
141 | "fresh": "0.5.2",
142 | "merge-descriptors": "1.0.1",
143 | "methods": "~1.1.2",
144 | "on-finished": "~2.3.0",
145 | "parseurl": "~1.3.3",
146 | "path-to-regexp": "0.1.7",
147 | "proxy-addr": "~2.0.5",
148 | "qs": "6.7.0",
149 | "range-parser": "~1.2.1",
150 | "safe-buffer": "5.1.2",
151 | "send": "0.17.1",
152 | "serve-static": "1.14.1",
153 | "setprototypeof": "1.1.1",
154 | "statuses": "~1.5.0",
155 | "type-is": "~1.6.18",
156 | "utils-merge": "1.0.1",
157 | "vary": "~1.1.2"
158 | }
159 | },
160 | "finalhandler": {
161 | "version": "1.1.2",
162 | "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz",
163 | "integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==",
164 | "requires": {
165 | "debug": "2.6.9",
166 | "encodeurl": "~1.0.2",
167 | "escape-html": "~1.0.3",
168 | "on-finished": "~2.3.0",
169 | "parseurl": "~1.3.3",
170 | "statuses": "~1.5.0",
171 | "unpipe": "~1.0.0"
172 | }
173 | },
174 | "forwarded": {
175 | "version": "0.1.2",
176 | "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz",
177 | "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ="
178 | },
179 | "fresh": {
180 | "version": "0.5.2",
181 | "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
182 | "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac="
183 | },
184 | "http-errors": {
185 | "version": "1.7.2",
186 | "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz",
187 | "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==",
188 | "requires": {
189 | "depd": "~1.1.2",
190 | "inherits": "2.0.3",
191 | "setprototypeof": "1.1.1",
192 | "statuses": ">= 1.5.0 < 2",
193 | "toidentifier": "1.0.0"
194 | }
195 | },
196 | "iconv-lite": {
197 | "version": "0.4.24",
198 | "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
199 | "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
200 | "requires": {
201 | "safer-buffer": ">= 2.1.2 < 3"
202 | }
203 | },
204 | "inherits": {
205 | "version": "2.0.3",
206 | "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
207 | "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
208 | },
209 | "ipaddr.js": {
210 | "version": "1.9.0",
211 | "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.0.tgz",
212 | "integrity": "sha512-M4Sjn6N/+O6/IXSJseKqHoFc+5FdGJ22sXqnjTpdZweHK64MzEPAyQZyEU3R/KRv2GLoa7nNtg/C2Ev6m7z+eA=="
213 | },
214 | "isarray": {
215 | "version": "1.0.0",
216 | "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
217 | "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
218 | },
219 | "media-typer": {
220 | "version": "0.3.0",
221 | "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
222 | "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g="
223 | },
224 | "merge-descriptors": {
225 | "version": "1.0.1",
226 | "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
227 | "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E="
228 | },
229 | "methods": {
230 | "version": "1.1.2",
231 | "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
232 | "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4="
233 | },
234 | "mime": {
235 | "version": "1.6.0",
236 | "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
237 | "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg=="
238 | },
239 | "mime-db": {
240 | "version": "1.40.0",
241 | "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.40.0.tgz",
242 | "integrity": "sha512-jYdeOMPy9vnxEqFRRo6ZvTZ8d9oPb+k18PKoYNYUe2stVEBPPwsln/qWzdbmaIvnhZ9v2P+CuecK+fpUfsV2mA=="
243 | },
244 | "mime-types": {
245 | "version": "2.1.24",
246 | "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.24.tgz",
247 | "integrity": "sha512-WaFHS3MCl5fapm3oLxU4eYDw77IQM2ACcxQ9RIxfaC3ooc6PFuBMGZZsYpvoXS5D5QTWPieo1jjLdAm3TBP3cQ==",
248 | "requires": {
249 | "mime-db": "1.40.0"
250 | }
251 | },
252 | "ms": {
253 | "version": "2.0.0",
254 | "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
255 | "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
256 | },
257 | "mysql": {
258 | "version": "2.18.1",
259 | "resolved": "https://registry.npmjs.org/mysql/-/mysql-2.18.1.tgz",
260 | "integrity": "sha512-Bca+gk2YWmqp2Uf6k5NFEurwY/0td0cpebAucFpY/3jhrwrVGuxU2uQFCHjU19SJfje0yQvi+rVWdq78hR5lig==",
261 | "requires": {
262 | "bignumber.js": "9.0.0",
263 | "readable-stream": "2.3.7",
264 | "safe-buffer": "5.1.2",
265 | "sqlstring": "2.3.1"
266 | }
267 | },
268 | "negotiator": {
269 | "version": "0.6.2",
270 | "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz",
271 | "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw=="
272 | },
273 | "object-assign": {
274 | "version": "4.1.1",
275 | "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
276 | "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM="
277 | },
278 | "on-finished": {
279 | "version": "2.3.0",
280 | "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
281 | "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
282 | "requires": {
283 | "ee-first": "1.1.1"
284 | }
285 | },
286 | "parseurl": {
287 | "version": "1.3.3",
288 | "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
289 | "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="
290 | },
291 | "path-to-regexp": {
292 | "version": "0.1.7",
293 | "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
294 | "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w="
295 | },
296 | "process-nextick-args": {
297 | "version": "2.0.1",
298 | "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
299 | "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
300 | },
301 | "proxy-addr": {
302 | "version": "2.0.5",
303 | "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.5.tgz",
304 | "integrity": "sha512-t/7RxHXPH6cJtP0pRG6smSr9QJidhB+3kXu0KgXnbGYMgzEnUxRQ4/LDdfOwZEMyIh3/xHb8PX3t+lfL9z+YVQ==",
305 | "requires": {
306 | "forwarded": "~0.1.2",
307 | "ipaddr.js": "1.9.0"
308 | }
309 | },
310 | "qs": {
311 | "version": "6.7.0",
312 | "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz",
313 | "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ=="
314 | },
315 | "range-parser": {
316 | "version": "1.2.1",
317 | "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
318 | "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="
319 | },
320 | "raw-body": {
321 | "version": "2.4.0",
322 | "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz",
323 | "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==",
324 | "requires": {
325 | "bytes": "3.1.0",
326 | "http-errors": "1.7.2",
327 | "iconv-lite": "0.4.24",
328 | "unpipe": "1.0.0"
329 | }
330 | },
331 | "readable-stream": {
332 | "version": "2.3.7",
333 | "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
334 | "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
335 | "requires": {
336 | "core-util-is": "~1.0.0",
337 | "inherits": "~2.0.3",
338 | "isarray": "~1.0.0",
339 | "process-nextick-args": "~2.0.0",
340 | "safe-buffer": "~5.1.1",
341 | "string_decoder": "~1.1.1",
342 | "util-deprecate": "~1.0.1"
343 | }
344 | },
345 | "safe-buffer": {
346 | "version": "5.1.2",
347 | "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
348 | "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
349 | },
350 | "safer-buffer": {
351 | "version": "2.1.2",
352 | "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
353 | "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
354 | },
355 | "send": {
356 | "version": "0.17.1",
357 | "resolved": "https://registry.npmjs.org/send/-/send-0.17.1.tgz",
358 | "integrity": "sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==",
359 | "requires": {
360 | "debug": "2.6.9",
361 | "depd": "~1.1.2",
362 | "destroy": "~1.0.4",
363 | "encodeurl": "~1.0.2",
364 | "escape-html": "~1.0.3",
365 | "etag": "~1.8.1",
366 | "fresh": "0.5.2",
367 | "http-errors": "~1.7.2",
368 | "mime": "1.6.0",
369 | "ms": "2.1.1",
370 | "on-finished": "~2.3.0",
371 | "range-parser": "~1.2.1",
372 | "statuses": "~1.5.0"
373 | },
374 | "dependencies": {
375 | "ms": {
376 | "version": "2.1.1",
377 | "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz",
378 | "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg=="
379 | }
380 | }
381 | },
382 | "serve-static": {
383 | "version": "1.14.1",
384 | "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz",
385 | "integrity": "sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==",
386 | "requires": {
387 | "encodeurl": "~1.0.2",
388 | "escape-html": "~1.0.3",
389 | "parseurl": "~1.3.3",
390 | "send": "0.17.1"
391 | }
392 | },
393 | "setprototypeof": {
394 | "version": "1.1.1",
395 | "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz",
396 | "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw=="
397 | },
398 | "sqlstring": {
399 | "version": "2.3.1",
400 | "resolved": "https://registry.npmjs.org/sqlstring/-/sqlstring-2.3.1.tgz",
401 | "integrity": "sha1-R1OT/56RR5rqYtyvDKPRSYOn+0A="
402 | },
403 | "statuses": {
404 | "version": "1.5.0",
405 | "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
406 | "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow="
407 | },
408 | "string_decoder": {
409 | "version": "1.1.1",
410 | "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
411 | "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
412 | "requires": {
413 | "safe-buffer": "~5.1.0"
414 | }
415 | },
416 | "toidentifier": {
417 | "version": "1.0.0",
418 | "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz",
419 | "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw=="
420 | },
421 | "type-is": {
422 | "version": "1.6.18",
423 | "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
424 | "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
425 | "requires": {
426 | "media-typer": "0.3.0",
427 | "mime-types": "~2.1.24"
428 | }
429 | },
430 | "unpipe": {
431 | "version": "1.0.0",
432 | "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
433 | "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw="
434 | },
435 | "util-deprecate": {
436 | "version": "1.0.2",
437 | "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
438 | "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
439 | },
440 | "utils-merge": {
441 | "version": "1.0.1",
442 | "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
443 | "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM="
444 | },
445 | "vary": {
446 | "version": "1.1.2",
447 | "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
448 | "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
449 | }
450 | }
451 | }
452 |
--------------------------------------------------------------------------------