├── client
├── static
│ └── .gitkeep
├── config
│ ├── prod.env.js
│ ├── dev.env.js
│ └── index.js
├── build
│ ├── logo.png
│ ├── vue-loader.conf.js
│ ├── build.js
│ ├── check-versions.js
│ ├── webpack.base.conf.js
│ ├── utils.js
│ ├── webpack.dev.conf.js
│ └── webpack.prod.conf.js
├── .editorconfig
├── .gitignore
├── .babelrc
├── .postcssrc.js
├── src
│ ├── router
│ │ └── index.js
│ ├── App.vue
│ ├── service
│ │ └── api.js
│ ├── global.js.default
│ ├── main.js
│ └── components
│ │ └── HelloWorld.vue
├── index.html
├── README.md
└── package.json
├── .coveralls.yml
├── .gitignore
├── views
├── index.pug
├── error.pug
└── layout.pug
├── public
└── stylesheets
│ └── style.css
├── routes
└── index.js
├── Dockerfile
├── lib
├── redis.js
└── wx.js
├── .travis.yml
├── tasks
└── sync_users.js
├── supervisord.conf
├── migrations
└── 20180201080925-create-users.js
├── app.js
├── models
└── index.js
├── config
├── config.js
├── wx_config.js
└── config.json.default
├── README.md
├── controllers
└── home.js
├── package.json
├── nginx
├── bin
└── www
├── test
└── server
│ └── home.spec.js
└── redis.conf
/client/static/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.coveralls.yml:
--------------------------------------------------------------------------------
1 | service_name: travis-pro
2 | repo_token: 8BbCFcgoR5EQjrhyrNIqngqSRzaoW8iLn
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules/
2 | .vscode/
3 | config/config.json
4 | package-lock.json
5 | coverage/
--------------------------------------------------------------------------------
/client/config/prod.env.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 | module.exports = {
3 | NODE_ENV: '"production"'
4 | }
5 |
--------------------------------------------------------------------------------
/views/index.pug:
--------------------------------------------------------------------------------
1 | extends layout
2 |
3 | block content
4 | h1= title
5 | p Welcome to #{title}
6 |
--------------------------------------------------------------------------------
/client/build/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yaonie084/koa2-vue2-wechat-enterprise/HEAD/client/build/logo.png
--------------------------------------------------------------------------------
/views/error.pug:
--------------------------------------------------------------------------------
1 | extends layout
2 |
3 | block content
4 | h1= message
5 | h2= error.status
6 | pre #{error.stack}
7 |
--------------------------------------------------------------------------------
/public/stylesheets/style.css:
--------------------------------------------------------------------------------
1 | body {
2 | padding: 50px;
3 | font: 14px "Lucida Grande", Helvetica, Arial, sans-serif;
4 | }
5 |
6 | a {
7 | color: #00B7FF;
8 | }
9 |
--------------------------------------------------------------------------------
/views/layout.pug:
--------------------------------------------------------------------------------
1 | doctype html
2 | html
3 | head
4 | title= title
5 | link(rel='stylesheet', href='/stylesheets/style.css')
6 | body
7 | block content
8 |
--------------------------------------------------------------------------------
/client/config/dev.env.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 | const merge = require('webpack-merge')
3 | const prodEnv = require('./prod.env')
4 |
5 | module.exports = merge(prodEnv, {
6 | NODE_ENV: '"development"'
7 | })
8 |
--------------------------------------------------------------------------------
/client/.editorconfig:
--------------------------------------------------------------------------------
1 | root = true
2 |
3 | [*]
4 | charset = utf-8
5 | indent_style = space
6 | indent_size = 2
7 | end_of_line = lf
8 | insert_final_newline = true
9 | trim_trailing_whitespace = true
10 |
--------------------------------------------------------------------------------
/client/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | node_modules/
3 | /dist/
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | src/global.js
8 | src/assets/*
9 |
10 | # Editor directories and files
11 | .idea
12 | .vscode
13 | *.suo
14 | *.ntvs*
15 | *.njsproj
16 | *.sln
17 |
--------------------------------------------------------------------------------
/routes/index.js:
--------------------------------------------------------------------------------
1 | const router = require('koa-router')()
2 | const home = require('../controllers/home');
3 |
4 | router.get('/api', home.index);
5 | router.post('/api/auth', home.auth);
6 | router.get('/api/get-user-info', home.getUserInfo);
7 |
8 | module.exports = router;
9 |
--------------------------------------------------------------------------------
/client/.babelrc:
--------------------------------------------------------------------------------
1 | {
2 | "presets": [
3 | ["env", {
4 | "modules": false,
5 | "targets": {
6 | "browsers": ["> 1%", "last 2 versions", "not ie <= 8"]
7 | }
8 | }],
9 | "stage-2"
10 | ],
11 | "plugins": ["transform-vue-jsx", "transform-runtime"]
12 | }
13 |
--------------------------------------------------------------------------------
/client/.postcssrc.js:
--------------------------------------------------------------------------------
1 | // https://github.com/michael-ciniawsky/postcss-load-config
2 |
3 | module.exports = {
4 | "plugins": {
5 | "postcss-import": {},
6 | "postcss-url": {},
7 | // to edit target browsers: use "browserslist" field in package.json
8 | "autoprefixer": {}
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/client/src/router/index.js:
--------------------------------------------------------------------------------
1 | import Vue from 'vue'
2 | import Router from 'vue-router'
3 | import HelloWorld from '@/components/HelloWorld'
4 |
5 | Vue.use(Router)
6 |
7 | export default new Router({
8 | routes: [
9 | {
10 | path: '/',
11 | name: 'HelloWorld',
12 | component: HelloWorld
13 | }
14 | ],
15 | mode: 'history'
16 | })
17 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM registry.cn-hangzhou.aliyuncs.com/bjmaster/enterprise
2 | ENV NODE_ENV=production
3 | RUN mkdir /app
4 | ADD ./ /app
5 | RUN cd /app && npm install
6 | COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
7 | COPY redis.conf /etc/redis/redis.conf
8 | COPY nginx /etc/nginx/sites-enabled/default
9 | RUN mkdir /log
10 | CMD ["/usr/bin/supervisord"]
--------------------------------------------------------------------------------
/client/src/App.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
12 |
13 |
20 |
--------------------------------------------------------------------------------
/lib/redis.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var redis = require('then-redis');
4 | var env = process.env.NODE_ENV || 'development';
5 | var config = {
6 | redis_host: '127.0.0.1',
7 | redis_port: '6379',
8 | redis_auth: ''
9 | }
10 | var db = redis.createClient({
11 | host: config.redis_host,
12 | port: config.redis_port,
13 | password: config.redis_auth,
14 | db: 6
15 | });
16 |
17 | // db.select(6);
18 |
19 | module.exports = db;
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: node_js
2 | node_js:
3 | - "8.0.0"
4 | before_install:
5 | - mysql -e 'CREATE DATABASE wx_boilerplate_test;'
6 | install: npm install
7 | script:
8 | - mv ./config/config.json.default ./config/config.json
9 | - NODE_ENV=test ./node_modules/.bin/jest --coverage --forceExit
10 | after_script:
11 | - npm run coveralls
12 | cache:
13 | directories:
14 | - .node_modules
15 | services:
16 | - redis-server
17 | - mysql
--------------------------------------------------------------------------------
/client/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/client/README.md:
--------------------------------------------------------------------------------
1 | # src2
2 |
3 | > A Vue.js project
4 |
5 | ## Build Setup
6 |
7 | ``` bash
8 | # install dependencies
9 | npm install
10 |
11 | # serve with hot reload at localhost:8080
12 | npm run dev
13 |
14 | # build for production with minification
15 | npm run build
16 |
17 | # build for production and view the bundle analyzer report
18 | npm run build --report
19 | ```
20 |
21 | For a detailed explanation on how things work, check out the [guide](http://vuejs-templates.github.io/webpack/) and [docs for vue-loader](http://vuejs.github.io/vue-loader).
22 |
--------------------------------------------------------------------------------
/client/build/vue-loader.conf.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 | const utils = require('./utils')
3 | const config = require('../config')
4 | const isProduction = process.env.NODE_ENV?true:false
5 | const sourceMapEnabled = isProduction
6 | ? config.build.productionSourceMap
7 | : config.dev.cssSourceMap
8 |
9 | module.exports = {
10 | loaders: utils.cssLoaders({
11 | sourceMap: sourceMapEnabled,
12 | extract: isProduction
13 | }),
14 | cssSourceMap: sourceMapEnabled,
15 | cacheBusting: config.dev.cacheBusting,
16 | transformToRequire: {
17 | video: ['src', 'poster'],
18 | source: 'src',
19 | img: 'src',
20 | image: 'xlink:href'
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/client/src/service/api.js:
--------------------------------------------------------------------------------
1 | import axios from 'axios'
2 |
3 | export function fetch(url, method = 'GET', params) {
4 |
5 | // $.showPreloader();
6 |
7 | return new Promise((resolve, reject) => {
8 | axios({
9 | method: method,
10 | url: G_SERVER_URL + url,
11 | headers: {
12 | },
13 | data: params
14 | })
15 | .then((response) => {
16 |
17 | // $.hidePreloader();
18 |
19 | resolve(response.data)
20 |
21 | })
22 | .catch((error) => {
23 |
24 | // $.hidePreloader();
25 |
26 | // if(error.response.status == 500) return $.alert('系统发生偶然错误!');
27 |
28 | reject(error)
29 | })
30 | })
31 | }
32 |
33 | export default {
34 |
35 | // 测试用的
36 | getHomeIndex(){
37 | return fetch('api/')
38 | }
39 |
40 | }
--------------------------------------------------------------------------------
/tasks/sync_users.js:
--------------------------------------------------------------------------------
1 | var wx = require('../lib/wx');
2 | var db = require('../models');
3 |
4 | (async function() {
5 | var info = await wx.getAllUsers();
6 | var users = info.userlist;
7 | for(var i in users){
8 | var userParams = users[i];
9 | var user = await db.User.findOne({
10 | where:{
11 | userid: userParams.userid
12 | }
13 | });
14 | if(user){
15 | await db.User.update({
16 | name: userParams.name,
17 | position: userParams.position,
18 | mobile:userParams.mobile
19 | })
20 | }else{
21 | user = await db.User.create({
22 | userid: userParams.userid,
23 | name: userParams.name,
24 | position: userParams.position,
25 | mobile:userParams.mobile
26 | })
27 | }
28 | }
29 | console.log('finished!');
30 | })();
--------------------------------------------------------------------------------
/client/src/global.js.default:
--------------------------------------------------------------------------------
1 | "use strict"
2 |
3 | global.development = {
4 | G_SERVER_URL: 'http://127.0.0.1:3000',
5 | AGENT_ID: 'development',
6 | CORP_ID: 'development',
7 | TITLE: 'development',
8 | ADDRESS: 'development'
9 | }
10 |
11 | global.wx_boilerplate_production1 = {
12 | G_SERVER_URL: 'http://production1',
13 | AGENT_ID: 'production1',
14 | CORP_ID: 'production1',
15 | TITLE: 'production1系统',
16 | ADDRESS: 'production1'
17 | }
18 |
19 | global.wx_boilerplate_production2 = {
20 | G_SERVER_URL: 'http://production2',
21 | AGENT_ID: 'production2',
22 | CORP_ID: 'production2',
23 | TITLE: 'production2',
24 | ADDRESS: 'production2'
25 | }
26 |
27 | global.production = {
28 | G_SERVER_URL: 'http://test1.funenc.com/',
29 | AGENT_ID: '1000206',
30 | CORP_ID: 'ww459483b92c637742',
31 | TITLE: '测试种子项目',
32 | ADDRESS: '成都地铁7号线中环停车场'
33 | }
--------------------------------------------------------------------------------
/supervisord.conf:
--------------------------------------------------------------------------------
1 | [supervisord]
2 | nodaemon=true
3 | [program:nginx]
4 | command=/usr/sbin/nginx -g 'daemon off;'
5 | stdout_events_enabled=true
6 | stderr_events_enabled=true
7 | [program:redis]
8 | command=/usr/bin/redis-server
9 | stdout_logfile=/log/redis_out.log
10 | stderr_logfile=/log/redis_err.log
11 | stdout_logfile_maxbytes=5MB
12 | stdout_logfile_backups=20
13 | priority=999
14 | [program:node]
15 | command=/usr/local/bin/node /app/bin/www
16 | stdout_logfile=/log/node_out.log
17 | stderr_logfile=/log/node_err.log
18 | stdout_logfile_maxbytes=5MB
19 | stdout_logfile_backups=20
20 | priority=0
21 | [inet_http_server] ; inet (TCP) server disabled by default
22 | port=0.0.0.0:9001 ; (ip_address:port specifier, *:port for all iface)
23 | username=wy ; (default is no username (open server))
24 | password=123456 ; (default is no password (open server))
--------------------------------------------------------------------------------
/migrations/20180201080925-create-users.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | module.exports = {
4 | up: (queryInterface, Sequelize) => {
5 | queryInterface.createTable("Users", {
6 | id: {
7 | allowNull: false,
8 | autoIncrement: true,
9 | primaryKey: true,
10 | type: Sequelize.INTEGER
11 | },
12 | userid: {
13 | type: Sequelize.STRING
14 | },
15 | name: {
16 | type: Sequelize.STRING
17 | },
18 | position: {
19 | type: Sequelize.STRING
20 | },
21 | mobile: {
22 | type: Sequelize.STRING
23 | },
24 | createdAt: {
25 | allowNull: false,
26 | type: Sequelize.DATE
27 | },
28 | updatedAt: {
29 | allowNull: false,
30 | type: Sequelize.DATE
31 | }
32 | });
33 | },
34 |
35 | down: (queryInterface, Sequelize) => {
36 | queryInterface.dropTable('Users');
37 | }
38 | };
39 |
--------------------------------------------------------------------------------
/app.js:
--------------------------------------------------------------------------------
1 | const Koa = require('koa')
2 | const app = new Koa()
3 | const views = require('koa-views')
4 | const json = require('koa-json')
5 | const onerror = require('koa-onerror')
6 | const bodyparser = require('koa-bodyparser')
7 | const logger = require('koa-logger')
8 | const cors = require('kcors')
9 |
10 | const index = require('./routes/index')
11 |
12 | // error handler
13 | onerror(app)
14 |
15 | // middlewares
16 | app.use(bodyparser({
17 | enableTypes:['json', 'form', 'text']
18 | }))
19 | app.use(json())
20 | app.use(logger())
21 | app.use(require('koa-static')(__dirname + '/public'))
22 |
23 | app.use(views(__dirname + '/views', {
24 | extension: 'pug'
25 | }))
26 |
27 | // cors
28 | app.use(cors())
29 |
30 | // logger
31 | app.use(async (ctx, next) => {
32 | const start = new Date()
33 | await next()
34 | const ms = new Date() - start
35 | console.log(`${ctx.method} ${ctx.url} - ${ms}ms`)
36 | })
37 |
38 |
39 | // routes
40 | app.use(index.routes(), index.allowedMethods())
41 |
42 | // error-handling
43 | app.on('error', (err, ctx) => {
44 | console.error('server error', err, ctx)
45 | });
46 |
47 | module.exports = app
48 |
--------------------------------------------------------------------------------
/models/index.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | var fs = require('fs');
4 | var path = require('path');
5 | var Sequelize = require('sequelize');
6 | var basename = path.basename(__filename);
7 | var env = process.env.NODE_ENV || 'development';
8 | var config = require(__dirname + '/../config/config.js')[env];
9 | var db = {};
10 |
11 | console.log(config);
12 |
13 | if (config.use_env_variable) {
14 | var sequelize = new Sequelize(process.env[config.use_env_variable], config);
15 | } else {
16 | var sequelize = new Sequelize(config.database, config.username, config.password, config);
17 | }
18 |
19 | fs
20 | .readdirSync(__dirname)
21 | .filter(file => {
22 | return (file.indexOf('.') !== 0) && (file !== basename) && (file.slice(-3) === '.js');
23 | })
24 | .forEach(file => {
25 | var model = sequelize['import'](path.join(__dirname, file));
26 | db[model.name] = model;
27 | });
28 |
29 | Object.keys(db).forEach(modelName => {
30 | if (db[modelName].associate) {
31 | db[modelName].associate(db);
32 | }
33 | });
34 |
35 | db.sequelize = sequelize;
36 | db.Sequelize = Sequelize;
37 |
38 | module.exports = db;
39 |
--------------------------------------------------------------------------------
/config/config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | "development": {
3 | "username": "root",
4 | "password": process.env.DEV_DB_PASSWORD,
5 | "database": "wx_boilerplate_development",
6 | "host": "127.0.0.1",
7 | "dialect": "mysql",
8 | "dialectOptions": {
9 | "charset": "utf8mb4"
10 | }
11 | },
12 | "test": {
13 | "username": "root",
14 | "password": "123456",
15 | "database": "wx_boilerplate_test",
16 | "host": "127.0.0.1",
17 | "dialect": "mysql"
18 | },
19 | "production": {
20 | "username": process.env.PROD_DB_USERNAME,
21 | "password": process.env.PROD_DB_PASSWORD,
22 | "database": process.env.PROD_DB_DATABASE,
23 | "host": process.env.PROD_DB_HOST,
24 | "port": process.env.PROD_DB_PORT,
25 | "dialect": "mysql",
26 | "dialectOptions": {
27 | "charset": "utf8mb4"
28 | }
29 | },
30 | "wx_boilerplate_production2": {
31 | "username": "root",
32 | "password": "password",
33 | "database": "wx_boilerplate_production1",
34 | "host": "dbhost",
35 | "port": 3306,
36 | "dialect": "mysql",
37 | "dialectOptions": {
38 | "charset": "utf8mb4"
39 | }
40 | }
41 | }
--------------------------------------------------------------------------------
/config/wx_config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | "development": {
3 | "corpID": "corpID_development",
4 | "secret": "secret_development",
5 | "agentId": "agentId_development",
6 | "contactSecret": "contactSecret_development",
7 | "redisKey": "redisKey-development-token",
8 | "contactRedisKey": "contactRedisKey-development-token",
9 | "host": "http://development"
10 | },
11 | "test": {
12 | "username": "root",
13 | "password": "123456",
14 | "database": "wx_boilerplate_test",
15 | "host": "127.0.0.1",
16 | "dialect": "mysql"
17 | },
18 | "wx_boilerplate_production1": {
19 | "corpID": "ww000000000001",
20 | "secret": "secret1",
21 | "agentId": "agentId",
22 | "contactSecret": "contactSecret1",
23 | "redisKey": "redisKey-token1",
24 | "contactRedisKey": "contactRedisKey-token1",
25 | "host": "http://production1"
26 | },
27 | "wx_boilerplate_production2": {
28 | "corpID": "ww000000000002",
29 | "secret": "secret2",
30 | "agentId": "agentId",
31 | "contactSecret": "contactSecret2",
32 | "redisKey": "redisKey-token2",
33 | "contactRedisKey": "contactRedisKey-token2",
34 | "host": "http://production2"
35 | }
36 | }
--------------------------------------------------------------------------------
/client/build/build.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 | require('./check-versions')()
3 |
4 | // process.env.NODE_ENV = 'production'
5 | console.log('开始编译>>>>>>');
6 | console.log(process.env.NODE_ENV);
7 |
8 | const ora = require('ora')
9 | const rm = require('rimraf')
10 | const path = require('path')
11 | const chalk = require('chalk')
12 | const webpack = require('webpack')
13 | const config = require('../config')
14 | const webpackConfig = require('./webpack.prod.conf')
15 |
16 | const spinner = ora('building for production...')
17 | spinner.start()
18 |
19 | rm(path.join(config.build.assetsRoot, config.build.assetsSubDirectory), err => {
20 | if (err) throw err
21 | webpack(webpackConfig, (err, stats) => {
22 | spinner.stop()
23 | if (err) throw err
24 | process.stdout.write(stats.toString({
25 | colors: true,
26 | modules: false,
27 | children: false, // If you are using ts-loader, setting this to true will make TypeScript errors show up during build.
28 | chunks: false,
29 | chunkModules: false
30 | }) + '\n\n')
31 |
32 | if (stats.hasErrors()) {
33 | console.log(chalk.red(' Build failed with errors.\n'))
34 | process.exit(1)
35 | }
36 |
37 | console.log(chalk.cyan(' Build complete.\n'))
38 | console.log(chalk.yellow(
39 | ' Tip: built files are meant to be served over an HTTP server.\n' +
40 | ' Opening index.html over file:// won\'t work.\n'
41 | ))
42 | })
43 | })
44 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://travis-ci.org/yaonie084/koa2-vue2-wechat-enterprise)
2 | [](https://github.com/facebook/jest) [](https://github.com/facebook/jest)
3 | [](https://coveralls.io/github/yaonie084/koa2-vue2-wechat-enterprise?branch=fix-jest)
4 |
5 | # koa2-vue2-wechat-enterprise
6 |
7 | > 前后端分离的企业微信第三方应用种子项目,后端用的koa2,前端用的vue2
8 | > 方便在多个企业微信中部署同一个版本,前端代码在client目录下
9 | > 集成了免登服务和jssdk的校验
10 |
11 | ## 准备
12 | 第三方应用的 corpid, secret, agentid, 通信录的secret, 一个备过案的域名,在企业微信里面配置好,如何配置参见企业微信的文档
13 |
14 | ## 后端Run Setup
15 |
16 | ``` bash
17 | # 安装依赖
18 | cnpm install
19 |
20 | # 修改后端配置文件,将所有需要的全局变量都填里面
21 | config/config.json
22 |
23 | # 运行后端开发环境
24 | npm start
25 |
26 | # 运行后端第一个生产环境
27 | NODE_ENV=company1_production nodemon bin/www
28 |
29 | # 运行后端第二个生产环境
30 | NODE_ENV=company2_production nodemon bin/www
31 | ```
32 |
33 |
34 | ## 前端Run Setup
35 |
36 | ``` bash
37 | # 安装依赖
38 | cnpm install
39 |
40 | # 修改后端配置文件,将所有需要的全局变量都填里面
41 | src/src/global.js
42 |
43 | # 运行开发环境
44 | npm run dev
45 |
46 | # 运行前端第一个生产环境
47 | NODE_ENV=company1_production npm run build
48 |
49 | # 运行前端第二个生成环境
50 | NODE_ENV=company2_production npm run build
51 | ```
52 |
53 | ##todo
54 | 通过环境变量来加载assets目录
--------------------------------------------------------------------------------
/client/build/check-versions.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 | const chalk = require('chalk')
3 | const semver = require('semver')
4 | const packageConfig = require('../package.json')
5 | const shell = require('shelljs')
6 |
7 | function exec (cmd) {
8 | return require('child_process').execSync(cmd).toString().trim()
9 | }
10 |
11 | const versionRequirements = [
12 | {
13 | name: 'node',
14 | currentVersion: semver.clean(process.version),
15 | versionRequirement: packageConfig.engines.node
16 | }
17 | ]
18 |
19 | if (shell.which('npm')) {
20 | versionRequirements.push({
21 | name: 'npm',
22 | currentVersion: exec('npm --version'),
23 | versionRequirement: packageConfig.engines.npm
24 | })
25 | }
26 |
27 | module.exports = function () {
28 | const warnings = []
29 |
30 | for (let i = 0; i < versionRequirements.length; i++) {
31 | const mod = versionRequirements[i]
32 |
33 | if (!semver.satisfies(mod.currentVersion, mod.versionRequirement)) {
34 | warnings.push(mod.name + ': ' +
35 | chalk.red(mod.currentVersion) + ' should be ' +
36 | chalk.green(mod.versionRequirement)
37 | )
38 | }
39 | }
40 |
41 | if (warnings.length) {
42 | console.log('')
43 | console.log(chalk.yellow('To use this template, you must update following to modules:'))
44 | console.log()
45 |
46 | for (let i = 0; i < warnings.length; i++) {
47 | const warning = warnings[i]
48 | console.log(' ' + warning)
49 | }
50 |
51 | console.log()
52 | process.exit(1)
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/controllers/home.js:
--------------------------------------------------------------------------------
1 | const Wx = require('../lib/wx');
2 | const Crypto = require('crypto');
3 | const moment = require('moment');
4 | const env = process.env.NODE_ENV || 'development';
5 | const config = require(__dirname + '/../config/wx_config.js')[env];
6 |
7 | var getSha1 = function(str) {
8 | var sha1 = Crypto.createHash("sha1");//定义加密方式:md5不可逆,此处的md5可以换成任意hash加密的方法名称;
9 | sha1.update(str, 'utf8');
10 | var res = sha1.digest("hex"); //加密后的值d
11 | return res;
12 | }
13 |
14 | exports.index = async (ctx, next) => {
15 | // console.log(require(__dirname + '/../config/config.js')[env]);
16 | ctx.body = {
17 | foo: require(__dirname + '/../config/config.js')[env],
18 | env: env
19 | }
20 | }
21 |
22 | exports.auth = async (ctx, next) => {
23 |
24 | var token = await Wx.getToken()
25 | var ticket = await Wx.getTicket(token)
26 | var noncestr = 'asdGfuoaSheh3322';
27 | var timestamp = moment().unix() + '';
28 | //todo: 这里是首页,应该是获取header的origin做url
29 | // console.log(ctx.headers);
30 | var url = config.host;
31 | var string = `jsapi_ticket=${ticket}&noncestr=${noncestr}×tamp=${timestamp}&url=${url}`
32 |
33 | var signature = getSha1(string);
34 | ctx.body = {
35 | signature: signature,
36 | timestamp: timestamp,
37 | nonceStr: noncestr
38 | }
39 | }
40 |
41 | exports.getUserInfo = async (ctx, next) => {
42 | var userInfo = await Wx.getUserInfo(ctx.query.code);
43 | var department = await Wx.getDepartmentById(userInfo.department[0]);
44 | ctx.body = {
45 | userInfo: userInfo,
46 | department: department.department[0].name
47 | }
48 | }
--------------------------------------------------------------------------------
/client/src/main.js:
--------------------------------------------------------------------------------
1 | // The Vue build version to load with the `import` command
2 | // (runtime-only or standalone) has been set in webpack.base.conf with an alias.
3 | import Vue from 'vue'
4 | import App from './App'
5 | import MintUI from 'mint-ui'
6 | import 'mint-ui/lib/style.css'
7 | import router from './router'
8 | import api from './service/api'
9 | import './global'
10 |
11 | Vue.config.productionTip = false
12 | Vue.use(MintUI)
13 |
14 | router.beforeEach((to, from, next) => {
15 | // var vConsole = new VConsole();
16 | // console.log('Vconsole ready!');
17 |
18 | //设置好全局的环境变量
19 | for(var key in global[process.env.NODE_ENV]){
20 | global[key] = global[process.env.NODE_ENV][key]
21 | }
22 |
23 | document.title = TITLE;
24 | // var wxid = localStorage.getItem('wxid');
25 | // // if(to.name == 'MyVisitors'){
26 | // if(true){
27 | // // 这里是判断哪些页面需要过免登流程
28 | // if(!wxid){
29 | // // 没有登录的情况则登录
30 | // if(to.query.code){
31 | // // 这是从微信redirect跳过来的情况
32 | // var code = to.query.code;
33 | // api.getUserInfo(code).then((res)=>{
34 | // localStorage.setItem('wxid', res.userInfo.userid);
35 | // localStorage.setItem('name', res.userInfo.name);
36 | // next();
37 | // });
38 |
39 | // }else{
40 | // var redirectUrl = G_SERVER_URL + to.fullPath;
41 | // var url = `https://open.weixin.qq.com/connect/oauth2/authorize?appid=${CORP_ID}&redirect_uri=${redirectUrl}&response_type=code&scope=snsapi_userinfo&agentid=${AGENT_ID}&state=STATE123#wechat_redirect`
42 | // window.location.href = url;
43 | // }
44 | // }else{
45 | // console.log('已登录:', wxid);
46 | // next();
47 | // }
48 | // }else{
49 | next();
50 | // }
51 | });
52 |
53 | /* eslint-disable no-new */
54 | new Vue({
55 | el: '#app',
56 | router,
57 | components: { App },
58 | template: ''
59 | })
60 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "koa2-vue2-wechat-enterprise",
3 | "version": "0.1.0",
4 | "description": "koa2,vue2,企业微信,种子项目",
5 | "main": "bin/www",
6 | "scripts": {
7 | "start": "nodemon bin/www",
8 | "dev": "./node_modules/.bin/nodemon bin/www",
9 | "prd": "pm2 start bin/www",
10 | "test": "NODE_ENV=test ./node_modules/.bin/jest --runInBand --notify --no-cache --silent --watch --coverage",
11 | "coveralls": "cat ./coverage/lcov.info | ./node_modules/coveralls/bin/coveralls.js && rm -rf ./coverage"
12 | },
13 | "jest": {
14 | "verbose": true,
15 | "coverageDirectory": "coverage",
16 | "collectCoverage": true,
17 | "collectCoverageFrom": [
18 | "controllers/*.js",
19 | "models/*.js",
20 | "lib/*.js",
21 | "routes/*js",
22 | "!models/index.js"
23 | ]
24 | },
25 | "repository": {
26 | "type": "git",
27 | "url": "git@github.com:yaonie084/koa2-vue2-wechat-enterprise.git"
28 | },
29 | "keywords": [
30 | "weixin",
31 | "wechat",
32 | "enterprise",
33 | "koa2",
34 | "vue2"
35 | ],
36 | "dependencies": {
37 | "debug": "^2.6.3",
38 | "kcors": "^2.2.1",
39 | "koa": "^2.2.0",
40 | "koa-bodyparser": "^3.2.0",
41 | "koa-convert": "^1.2.0",
42 | "koa-json": "^2.0.2",
43 | "koa-logger": "^2.0.1",
44 | "koa-onerror": "^1.2.1",
45 | "koa-router": "^7.1.1",
46 | "koa-static": "^3.0.0",
47 | "koa-views": "^5.2.1",
48 | "mocha": "^5.0.0",
49 | "moment": "^2.18.1",
50 | "mysql": "^2.14.1",
51 | "pug": "^2.0.0-rc.1",
52 | "qrcode": "^1.2.0",
53 | "random-js": "^1.0.8",
54 | "redis": "^2.8.0",
55 | "request": "^2.81.0",
56 | "request-promise": "^4.2.1",
57 | "sequelize": "^3.30.4",
58 | "should": "^13.2.1",
59 | "supertest": "^3.0.0",
60 | "then-redis": "^2.0.1"
61 | },
62 | "devDependencies": {
63 | "coveralls": "^3.0.1",
64 | "cross-env": "^5.1.6",
65 | "jest": "^23.1.0",
66 | "nodemon": "^1.8.1"
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/nginx:
--------------------------------------------------------------------------------
1 |
2 | upstream node_server {
3 | server 127.0.0.1:3000;
4 | }
5 | upstream monitor_server {
6 | server 127.0.0.1:9001;
7 | }
8 |
9 | #limit_req_zone $binary_remote_addr zone=req_one:10m rate=1r/s;
10 |
11 | server {
12 | listen 80;
13 |
14 | server_name default_server;
15 | keepalive_timeout 10;
16 | # ssl on;
17 | # ssl_certificate /home/ethan/214523655670791.pem;
18 | # ssl_certificate_key /home/ethan/214523655670791.key;
19 | # ssl_session_timeout 5m;
20 | # ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4;
21 | # ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
22 | # ssl_prefer_server_ciphers on;
23 |
24 | # limit_req zone=req_one burst=120 nodelay;
25 | location / {
26 | root /app/client/dist;
27 | try_files $uri $uri/ /index.html =404;
28 | }
29 | location /api/{
30 | proxy_redirect off;
31 | proxy_set_header X-Real-IP $remote_addr;
32 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
33 | proxy_set_header X-Forwarded-Proto $scheme;
34 | proxy_set_header Host $http_host;
35 | proxy_set_header X-NginX-Proxy true;
36 | proxy_set_header Connection "";
37 | proxy_pass http://node_server;
38 | }
39 |
40 | location /monitor/{
41 | proxy_redirect off;
42 | proxy_set_header X-Real-IP $remote_addr;
43 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
44 | proxy_set_header X-Forwarded-Proto $scheme;
45 | proxy_set_header Host $http_host;
46 | proxy_set_header X-NginX-Proxy true;
47 | proxy_set_header Connection "";
48 | proxy_pass http://monitor_server;
49 | }
50 |
51 | gzip on;
52 | gzip_min_length 1k;
53 | gzip_buffers 4 16k;
54 | gzip_comp_level 5;
55 | gzip_types text/plain application/x-javascript text/css application/xml text/javascript application/x-httpd-php;
56 | }
57 |
--------------------------------------------------------------------------------
/config/config.json.default:
--------------------------------------------------------------------------------
1 | {
2 | "development": {
3 | "username": "root",
4 | "password": "123456",
5 | "database": "wx_boilerplate_development",
6 | "host": "127.0.0.1",
7 | "dialect": "mysql",
8 | "dialectOptions": {
9 | "charset": "utf8mb4"
10 | },
11 | "project":{
12 | "corpID": "corpID_development",
13 | "secret": "secret_development",
14 | "agentId": "agentId_development",
15 | "contactSecret": "contactSecret_development",
16 | "redisKey": "redisKey-development-token",
17 | "contactRedisKey": "contactRedisKey-development-token",
18 | "host": "http://development"
19 | }
20 | },
21 | "test": {
22 | "username": "root",
23 | "password": null,
24 | "database": "wx_boilerplate_test",
25 | "host": "127.0.0.1",
26 | "dialect": "mysql"
27 | },
28 | "wx_boilerplate_production1": {
29 | "username": "root",
30 | "password": "password",
31 | "database": "wx_boilerplate_production1",
32 | "host": "dbhost",
33 | "port": 3306,
34 | "dialect": "mysql",
35 | "dialectOptions": {
36 | "charset": "utf8mb4"
37 | },
38 | "project":{
39 | "corpID": "ww000000000001",
40 | "secret": "secret1",
41 | "agentId": "agentId",
42 | "contactSecret": "contactSecret1",
43 | "redisKey": "redisKey-token1",
44 | "contactRedisKey": "contactRedisKey-token1",
45 | "host": "http://production1"
46 | }
47 | },
48 | "wx_boilerplate_production2": {
49 | "username": "root",
50 | "password": "password",
51 | "database": "wx_boilerplate_production1",
52 | "host": "dbhost",
53 | "port": 3306,
54 | "dialect": "mysql",
55 | "dialectOptions": {
56 | "charset": "utf8mb4"
57 | },
58 | "project":{
59 | "corpID": "ww000000000002",
60 | "secret": "secret2",
61 | "agentId": "agentId",
62 | "contactSecret": "contactSecret2",
63 | "redisKey": "redisKey-token2",
64 | "contactRedisKey": "contactRedisKey-token2",
65 | "host": "http://production2"
66 | }
67 | }
68 | }
--------------------------------------------------------------------------------
/bin/www:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | /**
4 | * Module dependencies.
5 | */
6 |
7 | var app = require('../app');
8 | var debug = require('debug')('demo:server');
9 | var http = require('http');
10 |
11 | /**
12 | * Get port from environment and store in Express.
13 | */
14 |
15 | var port = normalizePort(process.env.PORT || '3000');
16 | // app.set('port', port);
17 |
18 | /**
19 | * Create HTTP server.
20 | */
21 |
22 | var server = http.createServer(app.callback());
23 |
24 | /**
25 | * Listen on provided port, on all network interfaces.
26 | */
27 |
28 | server.listen(port);
29 | server.on('error', onError);
30 | server.on('listening', onListening);
31 |
32 | /**
33 | * Normalize a port into a number, string, or false.
34 | */
35 |
36 | function normalizePort(val) {
37 | var port = parseInt(val, 10);
38 |
39 | if (isNaN(port)) {
40 | // named pipe
41 | return val;
42 | }
43 |
44 | if (port >= 0) {
45 | // port number
46 | return port;
47 | }
48 |
49 | return false;
50 | }
51 |
52 | /**
53 | * Event listener for HTTP server "error" event.
54 | */
55 |
56 | function onError(error) {
57 | if (error.syscall !== 'listen') {
58 | throw error;
59 | }
60 |
61 | var bind = typeof port === 'string'
62 | ? 'Pipe ' + port
63 | : 'Port ' + port;
64 |
65 | // handle specific listen errors with friendly messages
66 | switch (error.code) {
67 | case 'EACCES':
68 | console.error(bind + ' requires elevated privileges');
69 | process.exit(1);
70 | break;
71 | case 'EADDRINUSE':
72 | console.error(bind + ' is already in use');
73 | process.exit(1);
74 | break;
75 | default:
76 | throw error;
77 | }
78 | }
79 |
80 | /**
81 | * Event listener for HTTP server "listening" event.
82 | */
83 |
84 | function onListening() {
85 | var addr = server.address();
86 | var bind = typeof addr === 'string'
87 | ? 'pipe ' + addr
88 | : 'port ' + addr.port;
89 | console.log('Listening on ' + bind);
90 | console.log(process.env.NODE_ENV);
91 | }
92 |
93 | exports.app = app;
94 | exports.server = server;
--------------------------------------------------------------------------------
/client/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "client",
3 | "version": "1.0.0",
4 | "description": "A Vue.js project",
5 | "author": "ethan ",
6 | "private": true,
7 | "scripts": {
8 | "dev": "webpack-dev-server --inline --progress --config build/webpack.dev.conf.js",
9 | "start": "npm run dev",
10 | "build": "node build/build.js"
11 | },
12 | "dependencies": {
13 | "axios": "^0.17.1",
14 | "mint-ui": "^2.2.13",
15 | "moment": "^2.20.1",
16 | "vue": "^2.5.2",
17 | "vue-qrcode": "github:xkeshi/vue-qrcode",
18 | "vue-router": "^3.0.1"
19 | },
20 | "devDependencies": {
21 | "autoprefixer": "^7.1.2",
22 | "babel-core": "^6.22.1",
23 | "babel-helper-vue-jsx-merge-props": "^2.0.3",
24 | "babel-loader": "^7.1.1",
25 | "babel-plugin-syntax-jsx": "^6.18.0",
26 | "babel-plugin-transform-runtime": "^6.22.0",
27 | "babel-plugin-transform-vue-jsx": "^3.5.0",
28 | "babel-preset-env": "^1.3.2",
29 | "babel-preset-stage-2": "^6.22.0",
30 | "chalk": "^2.0.1",
31 | "copy-webpack-plugin": "^4.0.1",
32 | "css-loader": "^0.28.9",
33 | "extract-text-webpack-plugin": "^3.0.2",
34 | "file-loader": "^1.1.4",
35 | "friendly-errors-webpack-plugin": "^1.6.1",
36 | "html-webpack-plugin": "^2.30.1",
37 | "node-notifier": "^5.1.2",
38 | "optimize-css-assets-webpack-plugin": "^3.2.0",
39 | "ora": "^1.2.0",
40 | "portfinder": "^1.0.13",
41 | "postcss-import": "^11.0.0",
42 | "postcss-loader": "^2.0.8",
43 | "postcss-url": "^7.2.1",
44 | "rimraf": "^2.6.0",
45 | "semver": "^5.5.0",
46 | "shelljs": "^0.7.6",
47 | "uglifyjs-webpack-plugin": "^1.1.1",
48 | "url-loader": "^0.5.8",
49 | "vue-loader": "^13.3.0",
50 | "vue-style-loader": "^3.0.1",
51 | "vue-template-compiler": "^2.5.2",
52 | "webpack": "^3.6.0",
53 | "webpack-bundle-analyzer": "^2.9.0",
54 | "webpack-dev-server": "^2.9.1",
55 | "webpack-merge": "^4.1.0"
56 | },
57 | "engines": {
58 | "node": ">= 6.0.0",
59 | "npm": ">= 3.0.0"
60 | },
61 | "browserslist": [
62 | "> 1%",
63 | "last 2 versions",
64 | "not ie <= 8"
65 | ]
66 | }
67 |
--------------------------------------------------------------------------------
/test/server/home.spec.js:
--------------------------------------------------------------------------------
1 | "use strict";
2 | var db = require("../../models");
3 | var should = require("should");
4 | var app = require("../../bin/www").server;
5 | var request = require("supertest").agent(app);
6 |
7 | jest.mock("../../lib/wx");
8 | const Wx = require("../../lib/wx");
9 |
10 | var sleep = function (time) {
11 | return new Promise(function (resolve) {
12 | setTimeout(function () {
13 | resolve("ok");
14 | }, time);
15 | });
16 | }
17 |
18 | afterAll(() => {
19 | console.log('finish');
20 | app.close() // 当所有测试都跑完了之后,关闭server
21 | // process.exit();
22 | })
23 |
24 | beforeEach(() => {
25 | // console.log('begin');
26 | })
27 |
28 | describe('test index', () => {
29 | it("should return the string 'test'", async () => {
30 | const response = await request
31 | .get('/api')
32 | .expect(200)
33 | should.exist(response.body);
34 | response.body.env.should.equal('test');
35 | })
36 | });
37 |
38 | describe('test getUserInfo', () => {
39 | it("should return development", async () => {
40 |
41 | Wx.getUserInfo.mockImplementation(params => {
42 | return {
43 | department: [{
44 | foo: 'bar'
45 | }]
46 | };
47 | });
48 |
49 | Wx.getDepartmentById.mockImplementation(params => {
50 | return {
51 | department: [{
52 | name: 'development'
53 | }]
54 | };
55 | });
56 |
57 | jest.spyOn(Date, 'now').mockImplementation(() => 1528739176000)
58 | const response = await request
59 | .get('/api/get-user-info')
60 | .expect(200)
61 |
62 | should.exist(response.body);
63 |
64 | response.body.department.should.equal('development');
65 | })
66 | });
67 |
68 | describe('test auth', () => {
69 |
70 | it("should return a hash", async () => {
71 |
72 | Wx.getUserInfo.mockImplementation(() => {
73 | return 'mock token'
74 | });
75 |
76 | Wx.getDepartmentById.mockImplementation(params => {
77 | return 'mock ticket'
78 | });
79 | const response = await request
80 | .post('/api/auth')
81 | .expect(200)
82 |
83 | should.exist(response.body);
84 | response.body.signature.should.equal('bdd93d6cfc90ec73793ea87b6413227f668ef705');
85 | });
86 | });
--------------------------------------------------------------------------------
/client/config/index.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 | // Template version: 1.3.1
3 | // see http://vuejs-templates.github.io/webpack for documentation.
4 |
5 | const path = require('path')
6 |
7 | module.exports = {
8 | dev: {
9 |
10 | // Paths
11 | assetsSubDirectory: 'static',
12 | assetsPublicPath: '/',
13 | proxyTable: {},
14 |
15 | // Various Dev Server settings
16 | host: '0.0.0.0', // can be overwritten by process.env.HOST
17 | port: 8084, // can be overwritten by process.env.PORT, if port is in use, a free one will be determined
18 | autoOpenBrowser: false,
19 | errorOverlay: true,
20 | notifyOnErrors: true,
21 | poll: false, // https://webpack.js.org/configuration/dev-server/#devserver-watchoptions-
22 |
23 |
24 | /**
25 | * Source Maps
26 | */
27 |
28 | // https://webpack.js.org/configuration/devtool/#development
29 | devtool: 'cheap-module-eval-source-map',
30 |
31 | // If you have problems debugging vue-files in devtools,
32 | // set this to false - it *may* help
33 | // https://vue-loader.vuejs.org/en/options.html#cachebusting
34 | cacheBusting: true,
35 |
36 | cssSourceMap: true
37 | },
38 |
39 | build: {
40 | // Template for index.html
41 | index: path.resolve(__dirname, '../dist/index.html'),
42 |
43 | // Paths
44 | assetsRoot: path.resolve(__dirname, '../dist'),
45 | assetsSubDirectory: 'static',
46 | assetsPublicPath: '/',
47 |
48 | /**
49 | * Source Maps
50 | */
51 |
52 | productionSourceMap: true,
53 | // https://webpack.js.org/configuration/devtool/#production
54 | devtool: '#source-map',
55 |
56 | // Gzip off by default as many popular static hosts such as
57 | // Surge or Netlify already gzip all static assets for you.
58 | // Before setting to `true`, make sure to:
59 | // npm install --save-dev compression-webpack-plugin
60 | productionGzip: false,
61 | productionGzipExtensions: ['js', 'css'],
62 |
63 | // Run the build command with an extra argument to
64 | // View the bundle analyzer report after build finishes:
65 | // `npm run build --report`
66 | // Set to `true` or `false` to always turn it on or off
67 | bundleAnalyzerReport: process.env.npm_config_report
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/client/build/webpack.base.conf.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 | const path = require('path')
3 | const utils = require('./utils')
4 | const config = require('../config')
5 | const vueLoaderConfig = require('./vue-loader.conf')
6 |
7 | function resolve (dir) {
8 | return path.join(__dirname, '..', dir)
9 | }
10 |
11 | module.exports = {
12 | context: path.resolve(__dirname, '../'),
13 | entry: {
14 | app: './src/main.js'
15 | },
16 | output: {
17 | path: config.build.assetsRoot,
18 | filename: '[name].js',
19 | publicPath: process.env.NODE_ENV
20 | ? config.build.assetsPublicPath
21 | : config.dev.assetsPublicPath
22 | },
23 | resolve: {
24 | extensions: ['.js', '.vue', '.json'],
25 | alias: {
26 | 'vue$': 'vue/dist/vue.esm.js',
27 | '@': resolve('src'),
28 | }
29 | },
30 | module: {
31 | rules: [
32 | {
33 | test: /\.vue$/,
34 | loader: 'vue-loader',
35 | options: vueLoaderConfig
36 | },
37 | {
38 | test: /\.js$/,
39 | loader: 'babel-loader',
40 | include: [resolve('src'), resolve('test'), resolve('node_modules/webpack-dev-server/client')]
41 | },
42 | {
43 | test: /\.(png|jpe?g|gif|svg)(\?.*)?$/,
44 | loader: 'url-loader',
45 | options: {
46 | limit: 10000,
47 | name: utils.assetsPath('img/[name].[hash:7].[ext]')
48 | }
49 | },
50 | {
51 | test: /\.(mp4|webm|ogg|mp3|wav|flac|aac)(\?.*)?$/,
52 | loader: 'url-loader',
53 | options: {
54 | limit: 10000,
55 | name: utils.assetsPath('media/[name].[hash:7].[ext]')
56 | }
57 | },
58 | {
59 | test: /\.(woff2?|eot|ttf|otf)(\?.*)?$/,
60 | loader: 'url-loader',
61 | options: {
62 | limit: 10000,
63 | name: utils.assetsPath('fonts/[name].[hash:7].[ext]')
64 | }
65 | }
66 | ]
67 | },
68 | node: {
69 | // prevent webpack from injecting useless setImmediate polyfill because Vue
70 | // source contains it (although only uses it if it's native).
71 | setImmediate: false,
72 | // prevent webpack from injecting mocks to Node native modules
73 | // that does not make sense for the client
74 | dgram: 'empty',
75 | fs: 'empty',
76 | net: 'empty',
77 | tls: 'empty',
78 | child_process: 'empty'
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/lib/wx.js:
--------------------------------------------------------------------------------
1 |
2 | const request = require('request-promise');
3 | const redis = require('./redis');
4 | const moment = require('moment');
5 | const env = process.env.NODE_ENV || 'development';
6 | const config = require(__dirname + '/../config/wx_config.js')[env];
7 |
8 |
9 | var invoke = async function (method, uri, qs, body) {
10 | var result = await request({
11 | method: method,
12 | uri: uri,
13 | qs: qs,
14 | body: body,
15 | headers: {
16 | 'Content-Type': 'application/json',
17 | 'cache-control': 'no-cache'
18 | },
19 | json: true
20 | });
21 | return result;
22 | }
23 |
24 | //isUserList==true是获取通信录模块的token
25 | //isUserList==false是获取应用本身的token
26 | exports.getToken = async function(isUserList = false) {
27 |
28 | var key = config.redisKey
29 | var secret = config.secret
30 | if(isUserList){
31 | secret = config.contactSecret
32 | key = config.contactRedisKey
33 | }
34 |
35 | token = await redis.get(key)
36 | if (token) {
37 | return token;
38 | } else {
39 | var secret;
40 | var uri = `https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=${config.corpID}&corpsecret=${secret}`;
41 | var newToken = await invoke('GET', uri, null, null);
42 | await redis.set(key, newToken.access_token);
43 | await redis.expire(key, 7200);
44 | return newToken.access_token;
45 | }
46 | }
47 |
48 | exports.getTicket = async function(token) {
49 | var uri = `https://qyapi.weixin.qq.com/cgi-bin/get_jsapi_ticket?access_token=${token}`;
50 | var ticket = await invoke('GET', uri, null, null);
51 | // console.log(ticket);
52 | return ticket.ticket;
53 | }
54 |
55 | exports.getUserInfo = async function(code){
56 | var token = await this.getToken();
57 | var uri = `https://qyapi.weixin.qq.com/cgi-bin/user/getuserinfo?access_token=${token}&code=${code}`;
58 | var info = await invoke('GET', uri, null, null);
59 | var ticket = info.user_ticket;
60 | var uri = `https://qyapi.weixin.qq.com/cgi-bin/user/getuserdetail?access_token=${token}`;
61 | var userInfo = await invoke('POST', uri, null, {
62 | user_ticket: ticket
63 | });
64 | return userInfo;
65 | }
66 |
67 | exports.getAllUsers = async ()=>{
68 | var token = await this.getToken(true);
69 | var uri = `https://qyapi.weixin.qq.com/cgi-bin/user/list?access_token=${token}&department_id=1&fetch_child=1&status=0`;
70 | var info = await invoke('GET', uri, null, null);
71 | return info;
72 | }
73 |
74 | exports.getDepartmentById = async ()=>{
75 | return {
76 | foo: 'bar'
77 | }
78 | }
--------------------------------------------------------------------------------
/client/src/components/HelloWorld.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
当前环境: {{ env }}
4 |
Essential Links
5 |
48 |
Ecosystem
49 |
83 |
84 |
85 |
86 |
103 |
104 |
105 |
121 |
--------------------------------------------------------------------------------
/client/build/utils.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 | const path = require('path')
3 | const config = require('../config')
4 | const ExtractTextPlugin = require('extract-text-webpack-plugin')
5 | const packageConfig = require('../package.json')
6 |
7 | exports.assetsPath = function (_path) {
8 | const assetsSubDirectory = process.env.NODE_ENV
9 | ? config.build.assetsSubDirectory
10 | : config.dev.assetsSubDirectory
11 |
12 | return path.posix.join(assetsSubDirectory, _path)
13 | }
14 |
15 | exports.cssLoaders = function (options) {
16 | options = options || {}
17 |
18 | const cssLoader = {
19 | loader: 'css-loader',
20 | options: {
21 | sourceMap: options.sourceMap
22 | }
23 | }
24 |
25 | const postcssLoader = {
26 | loader: 'postcss-loader',
27 | options: {
28 | sourceMap: options.sourceMap
29 | }
30 | }
31 |
32 | // generate loader string to be used with extract text plugin
33 | function generateLoaders (loader, loaderOptions) {
34 | const loaders = options.usePostCSS ? [cssLoader, postcssLoader] : [cssLoader]
35 |
36 | if (loader) {
37 | loaders.push({
38 | loader: loader + '-loader',
39 | options: Object.assign({}, loaderOptions, {
40 | sourceMap: options.sourceMap
41 | })
42 | })
43 | }
44 |
45 | // Extract CSS when that option is specified
46 | // (which is the case during production build)
47 | if (options.extract) {
48 | return ExtractTextPlugin.extract({
49 | use: loaders,
50 | fallback: 'vue-style-loader'
51 | })
52 | } else {
53 | return ['vue-style-loader'].concat(loaders)
54 | }
55 | }
56 |
57 | // https://vue-loader.vuejs.org/en/configurations/extract-css.html
58 | return {
59 | css: generateLoaders(),
60 | postcss: generateLoaders(),
61 | less: generateLoaders('less'),
62 | sass: generateLoaders('sass', { indentedSyntax: true }),
63 | scss: generateLoaders('sass'),
64 | stylus: generateLoaders('stylus'),
65 | styl: generateLoaders('stylus')
66 | }
67 | }
68 |
69 | // Generate loaders for standalone style files (outside of .vue)
70 | exports.styleLoaders = function (options) {
71 | const output = []
72 | const loaders = exports.cssLoaders(options)
73 |
74 | for (const extension in loaders) {
75 | const loader = loaders[extension]
76 | output.push({
77 | test: new RegExp('\\.' + extension + '$'),
78 | use: loader
79 | })
80 | }
81 |
82 | return output
83 | }
84 |
85 | exports.createNotifierCallback = () => {
86 | const notifier = require('node-notifier')
87 |
88 | return (severity, errors) => {
89 | if (severity !== 'error') return
90 |
91 | const error = errors[0]
92 | const filename = error.file && error.file.split('!').pop()
93 |
94 | notifier.notify({
95 | title: packageConfig.name,
96 | message: severity + ': ' + error.name,
97 | subtitle: filename || '',
98 | icon: path.join(__dirname, 'logo.png')
99 | })
100 | }
101 | }
102 |
--------------------------------------------------------------------------------
/client/build/webpack.dev.conf.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 | const utils = require('./utils')
3 | const webpack = require('webpack')
4 | const config = require('../config')
5 | const merge = require('webpack-merge')
6 | const path = require('path')
7 | const baseWebpackConfig = require('./webpack.base.conf')
8 | const CopyWebpackPlugin = require('copy-webpack-plugin')
9 | const HtmlWebpackPlugin = require('html-webpack-plugin')
10 | const FriendlyErrorsPlugin = require('friendly-errors-webpack-plugin')
11 | const portfinder = require('portfinder')
12 |
13 | const HOST = process.env.HOST
14 | const PORT = process.env.PORT && Number(process.env.PORT)
15 |
16 | const devWebpackConfig = merge(baseWebpackConfig, {
17 | module: {
18 | rules: utils.styleLoaders({ sourceMap: config.dev.cssSourceMap, usePostCSS: true })
19 | },
20 | // cheap-module-eval-source-map is faster for development
21 | devtool: config.dev.devtool,
22 |
23 | // these devServer options should be customized in /config/index.js
24 | devServer: {
25 | clientLogLevel: 'warning',
26 | historyApiFallback: {
27 | rewrites: [
28 | { from: /.*/, to: path.posix.join(config.dev.assetsPublicPath, 'index.html') },
29 | ],
30 | },
31 | hot: true,
32 | contentBase: false, // since we use CopyWebpackPlugin.
33 | compress: true,
34 | host: HOST || config.dev.host,
35 | port: PORT || config.dev.port,
36 | open: config.dev.autoOpenBrowser,
37 | overlay: config.dev.errorOverlay
38 | ? { warnings: false, errors: true }
39 | : false,
40 | publicPath: config.dev.assetsPublicPath,
41 | proxy: config.dev.proxyTable,
42 | quiet: true, // necessary for FriendlyErrorsPlugin
43 | watchOptions: {
44 | poll: config.dev.poll,
45 | }
46 | },
47 | plugins: [
48 | new webpack.DefinePlugin({
49 | 'process.env': require('../config/dev.env')
50 | }),
51 | new webpack.HotModuleReplacementPlugin(),
52 | new webpack.NamedModulesPlugin(), // HMR shows correct file names in console on update.
53 | new webpack.NoEmitOnErrorsPlugin(),
54 | // https://github.com/ampedandwired/html-webpack-plugin
55 | new HtmlWebpackPlugin({
56 | filename: 'index.html',
57 | template: 'index.html',
58 | inject: true
59 | }),
60 | // copy custom static assets
61 | new CopyWebpackPlugin([
62 | {
63 | from: path.resolve(__dirname, '../static'),
64 | to: config.dev.assetsSubDirectory,
65 | ignore: ['.*']
66 | }
67 | ])
68 | ]
69 | })
70 |
71 | module.exports = new Promise((resolve, reject) => {
72 | portfinder.basePort = process.env.PORT || config.dev.port
73 | portfinder.getPort((err, port) => {
74 | if (err) {
75 | reject(err)
76 | } else {
77 | // publish the new Port, necessary for e2e tests
78 | process.env.PORT = port
79 | // add port to devServer config
80 | devWebpackConfig.devServer.port = port
81 |
82 | // Add FriendlyErrorsPlugin
83 | devWebpackConfig.plugins.push(new FriendlyErrorsPlugin({
84 | compilationSuccessInfo: {
85 | messages: [`Your application is running here: http://${devWebpackConfig.devServer.host}:${port}`],
86 | },
87 | onErrors: config.dev.notifyOnErrors
88 | ? utils.createNotifierCallback()
89 | : undefined
90 | }))
91 |
92 | resolve(devWebpackConfig)
93 | }
94 | })
95 | })
96 |
--------------------------------------------------------------------------------
/client/build/webpack.prod.conf.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 | const path = require('path')
3 | const utils = require('./utils')
4 | const webpack = require('webpack')
5 | const config = require('../config')
6 | const merge = require('webpack-merge')
7 | const baseWebpackConfig = require('./webpack.base.conf')
8 | const CopyWebpackPlugin = require('copy-webpack-plugin')
9 | const HtmlWebpackPlugin = require('html-webpack-plugin')
10 | const ExtractTextPlugin = require('extract-text-webpack-plugin')
11 | const OptimizeCSSPlugin = require('optimize-css-assets-webpack-plugin')
12 | const UglifyJsPlugin = require('uglifyjs-webpack-plugin')
13 |
14 | // const env = require('../config/prod.env')
15 |
16 | const webpackConfig = merge(baseWebpackConfig, {
17 | module: {
18 | rules: utils.styleLoaders({
19 | sourceMap: config.build.productionSourceMap,
20 | extract: true,
21 | usePostCSS: true
22 | })
23 | },
24 | devtool: config.build.productionSourceMap ? config.build.devtool : false,
25 | output: {
26 | path: config.build.assetsRoot,
27 | filename: utils.assetsPath('js/[name].[chunkhash].js'),
28 | chunkFilename: utils.assetsPath('js/[id].[chunkhash].js')
29 | },
30 | plugins: [
31 | // http://vuejs.github.io/vue-loader/en/workflow/production.html
32 | new webpack.DefinePlugin({
33 | 'process.env': {
34 | NODE_ENV: `"${process.env.NODE_ENV}"`
35 | }
36 | }),
37 | new UglifyJsPlugin({
38 | uglifyOptions: {
39 | compress: {
40 | warnings: false
41 | }
42 | },
43 | sourceMap: config.build.productionSourceMap,
44 | parallel: true
45 | }),
46 | // extract css into its own file
47 | new ExtractTextPlugin({
48 | filename: utils.assetsPath('css/[name].[contenthash].css'),
49 | // Setting the following option to `false` will not extract CSS from codesplit chunks.
50 | // Their CSS will instead be inserted dynamically with style-loader when the codesplit chunk has been loaded by webpack.
51 | // It's currently set to `true` because we are seeing that sourcemaps are included in the codesplit bundle as well when it's `false`,
52 | // increasing file size: https://github.com/vuejs-templates/webpack/issues/1110
53 | allChunks: true,
54 | }),
55 | // Compress extracted CSS. We are using this plugin so that possible
56 | // duplicated CSS from different components can be deduped.
57 | new OptimizeCSSPlugin({
58 | cssProcessorOptions: config.build.productionSourceMap
59 | ? { safe: true, map: { inline: false } }
60 | : { safe: true }
61 | }),
62 | // generate dist index.html with correct asset hash for caching.
63 | // you can customize output by editing /index.html
64 | // see https://github.com/ampedandwired/html-webpack-plugin
65 | new HtmlWebpackPlugin({
66 | filename: config.build.index,
67 | template: 'index.html',
68 | inject: true,
69 | minify: {
70 | removeComments: true,
71 | collapseWhitespace: true,
72 | removeAttributeQuotes: true
73 | // more options:
74 | // https://github.com/kangax/html-minifier#options-quick-reference
75 | },
76 | // necessary to consistently work with multiple chunks via CommonsChunkPlugin
77 | chunksSortMode: 'dependency'
78 | }),
79 | // keep module.id stable when vendor modules does not change
80 | new webpack.HashedModuleIdsPlugin(),
81 | // enable scope hoisting
82 | new webpack.optimize.ModuleConcatenationPlugin(),
83 | // split vendor js into its own file
84 | new webpack.optimize.CommonsChunkPlugin({
85 | name: 'vendor',
86 | minChunks (module) {
87 | // any required modules inside node_modules are extracted to vendor
88 | return (
89 | module.resource &&
90 | /\.js$/.test(module.resource) &&
91 | module.resource.indexOf(
92 | path.join(__dirname, '../node_modules')
93 | ) === 0
94 | )
95 | }
96 | }),
97 | // extract webpack runtime and module manifest to its own file in order to
98 | // prevent vendor hash from being updated whenever app bundle is updated
99 | new webpack.optimize.CommonsChunkPlugin({
100 | name: 'manifest',
101 | minChunks: Infinity
102 | }),
103 | // This instance extracts shared chunks from code splitted chunks and bundles them
104 | // in a separate chunk, similar to the vendor chunk
105 | // see: https://webpack.js.org/plugins/commons-chunk-plugin/#extra-async-commons-chunk
106 | new webpack.optimize.CommonsChunkPlugin({
107 | name: 'app',
108 | async: 'vendor-async',
109 | children: true,
110 | minChunks: 3
111 | }),
112 |
113 | // copy custom static assets
114 | new CopyWebpackPlugin([
115 | {
116 | from: path.resolve(__dirname, '../static'),
117 | to: config.build.assetsSubDirectory,
118 | ignore: ['.*']
119 | }
120 | ])
121 | ]
122 | })
123 |
124 | if (config.build.productionGzip) {
125 | const CompressionWebpackPlugin = require('compression-webpack-plugin')
126 |
127 | webpackConfig.plugins.push(
128 | new CompressionWebpackPlugin({
129 | asset: '[path].gz[query]',
130 | algorithm: 'gzip',
131 | test: new RegExp(
132 | '\\.(' +
133 | config.build.productionGzipExtensions.join('|') +
134 | ')$'
135 | ),
136 | threshold: 10240,
137 | minRatio: 0.8
138 | })
139 | )
140 | }
141 |
142 | if (config.build.bundleAnalyzerReport) {
143 | const BundleAnalyzerPlugin = require('webpack-bundle-analyzer').BundleAnalyzerPlugin
144 | webpackConfig.plugins.push(new BundleAnalyzerPlugin())
145 | }
146 |
147 | module.exports = webpackConfig
148 |
--------------------------------------------------------------------------------
/redis.conf:
--------------------------------------------------------------------------------
1 | # Redis configuration file example
2 |
3 | # Note on units: when memory size is needed, it is possible to specify
4 | # it in the usual form of 1k 5GB 4M and so forth:
5 | #
6 | # 1k => 1000 bytes
7 | # 1kb => 1024 bytes
8 | # 1m => 1000000 bytes
9 | # 1mb => 1024*1024 bytes
10 | # 1g => 1000000000 bytes
11 | # 1gb => 1024*1024*1024 bytes
12 | #
13 | # units are case insensitive so 1GB 1Gb 1gB are all the same.
14 |
15 | ################################## INCLUDES ###################################
16 |
17 | # Include one or more other config files here. This is useful if you
18 | # have a standard template that goes to all Redis server but also need
19 | # to customize a few per-server settings. Include files can include
20 | # other files, so use this wisely.
21 | #
22 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE"
23 | # from admin or Redis Sentinel. Since Redis always uses the last processed
24 | # line as value of a configuration directive, you'd better put includes
25 | # at the beginning of this file to avoid overwriting config change at runtime.
26 | #
27 | # If instead you are interested in using includes to override configuration
28 | # options, it is better to use include as the last line.
29 | #
30 | # include /path/to/local.conf
31 | # include /path/to/other.conf
32 |
33 | ################################ GENERAL #####################################
34 |
35 | # By default Redis does not run as a daemon. Use 'yes' if you need it.
36 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
37 | daemonize no
38 |
39 | # When running daemonized, Redis writes a pid file in /var/run/redis.pid by
40 | # default. You can specify a custom pid file location here.
41 | pidfile /var/run/redis/redis-server.pid
42 |
43 | # Accept connections on the specified port, default is 6379.
44 | # If port 0 is specified Redis will not listen on a TCP socket.
45 | port 6379
46 |
47 | # By default Redis listens for connections from all the network interfaces
48 | # available on the server. It is possible to listen to just one or multiple
49 | # interfaces using the "bind" configuration directive, followed by one or
50 | # more IP addresses.
51 | #
52 | # Examples:
53 | #
54 | # bind 192.168.1.100 10.0.0.1
55 | bind 127.0.0.1
56 |
57 | # Specify the path for the unix socket that will be used to listen for
58 | # incoming connections. There is no default, so Redis will not listen
59 | # on a unix socket when not specified.
60 | #
61 | # unixsocket /var/run/redis/redis.sock
62 | # unixsocketperm 755
63 |
64 | # Close the connection after a client is idle for N seconds (0 to disable)
65 | timeout 0
66 |
67 | # TCP keepalive.
68 | #
69 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
70 | # of communication. This is useful for two reasons:
71 | #
72 | # 1) Detect dead peers.
73 | # 2) Take the connection alive from the point of view of network
74 | # equipment in the middle.
75 | #
76 | # On Linux, the specified value (in seconds) is the period used to send ACKs.
77 | # Note that to close the connection the double of the time is needed.
78 | # On other kernels the period depends on the kernel configuration.
79 | #
80 | # A reasonable value for this option is 60 seconds.
81 | tcp-keepalive 0
82 |
83 | # Specify the server verbosity level.
84 | # This can be one of:
85 | # debug (a lot of information, useful for development/testing)
86 | # verbose (many rarely useful info, but not a mess like the debug level)
87 | # notice (moderately verbose, what you want in production probably)
88 | # warning (only very important / critical messages are logged)
89 | loglevel notice
90 |
91 | # Specify the log file name. Also the empty string can be used to force
92 | # Redis to log on the standard output. Note that if you use standard
93 | # output for logging but daemonize, logs will be sent to /dev/null
94 | logfile /var/log/redis/redis-server.log
95 |
96 | # To enable logging to the system logger, just set 'syslog-enabled' to yes,
97 | # and optionally update the other syslog parameters to suit your needs.
98 | # syslog-enabled no
99 |
100 | # Specify the syslog identity.
101 | # syslog-ident redis
102 |
103 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
104 | # syslog-facility local0
105 |
106 | # Set the number of databases. The default database is DB 0, you can select
107 | # a different one on a per-connection basis using SELECT where
108 | # dbid is a number between 0 and 'databases'-1
109 | databases 16
110 |
111 | ################################ SNAPSHOTTING ################################
112 | #
113 | # Save the DB on disk:
114 | #
115 | # save
116 | #
117 | # Will save the DB if both the given number of seconds and the given
118 | # number of write operations against the DB occurred.
119 | #
120 | # In the example below the behaviour will be to save:
121 | # after 900 sec (15 min) if at least 1 key changed
122 | # after 300 sec (5 min) if at least 10 keys changed
123 | # after 60 sec if at least 10000 keys changed
124 | #
125 | # Note: you can disable saving at all commenting all the "save" lines.
126 | #
127 | # It is also possible to remove all the previously configured save
128 | # points by adding a save directive with a single empty string argument
129 | # like in the following example:
130 | #
131 | # save ""
132 |
133 | save 900 1
134 | save 300 10
135 | save 60 10000
136 |
137 | # By default Redis will stop accepting writes if RDB snapshots are enabled
138 | # (at least one save point) and the latest background save failed.
139 | # This will make the user aware (in a hard way) that data is not persisting
140 | # on disk properly, otherwise chances are that no one will notice and some
141 | # disaster will happen.
142 | #
143 | # If the background saving process will start working again Redis will
144 | # automatically allow writes again.
145 | #
146 | # However if you have setup your proper monitoring of the Redis server
147 | # and persistence, you may want to disable this feature so that Redis will
148 | # continue to work as usual even if there are problems with disk,
149 | # permissions, and so forth.
150 | stop-writes-on-bgsave-error yes
151 |
152 | # Compress string objects using LZF when dump .rdb databases?
153 | # For default that's set to 'yes' as it's almost always a win.
154 | # If you want to save some CPU in the saving child set it to 'no' but
155 | # the dataset will likely be bigger if you have compressible values or keys.
156 | rdbcompression yes
157 |
158 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
159 | # This makes the format more resistant to corruption but there is a performance
160 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it
161 | # for maximum performances.
162 | #
163 | # RDB files created with checksum disabled have a checksum of zero that will
164 | # tell the loading code to skip the check.
165 | rdbchecksum yes
166 |
167 | # The filename where to dump the DB
168 | dbfilename dump.rdb
169 |
170 | # The working directory.
171 | #
172 | # The DB will be written inside this directory, with the filename specified
173 | # above using the 'dbfilename' configuration directive.
174 | #
175 | # The Append Only File will also be created inside this directory.
176 | #
177 | # Note that you must specify a directory here, not a file name.
178 | dir /var/lib/redis
179 |
180 | ################################# REPLICATION #################################
181 |
182 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of
183 | # another Redis server. Note that the configuration is local to the slave
184 | # so for example it is possible to configure the slave to save the DB with a
185 | # different interval, or to listen to another port, and so on.
186 | #
187 | # slaveof
188 |
189 | # If the master is password protected (using the "requirepass" configuration
190 | # directive below) it is possible to tell the slave to authenticate before
191 | # starting the replication synchronization process, otherwise the master will
192 | # refuse the slave request.
193 | #
194 | # masterauth
195 |
196 | # When a slave loses its connection with the master, or when the replication
197 | # is still in progress, the slave can act in two different ways:
198 | #
199 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
200 | # still reply to client requests, possibly with out of date data, or the
201 | # data set may just be empty if this is the first synchronization.
202 | #
203 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with
204 | # an error "SYNC with master in progress" to all the kind of commands
205 | # but to INFO and SLAVEOF.
206 | #
207 | slave-serve-stale-data yes
208 |
209 | # You can configure a slave instance to accept writes or not. Writing against
210 | # a slave instance may be useful to store some ephemeral data (because data
211 | # written on a slave will be easily deleted after resync with the master) but
212 | # may also cause problems if clients are writing to it because of a
213 | # misconfiguration.
214 | #
215 | # Since Redis 2.6 by default slaves are read-only.
216 | #
217 | # Note: read only slaves are not designed to be exposed to untrusted clients
218 | # on the internet. It's just a protection layer against misuse of the instance.
219 | # Still a read only slave exports by default all the administrative commands
220 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
221 | # security of read only slaves using 'rename-command' to shadow all the
222 | # administrative / dangerous commands.
223 | slave-read-only yes
224 |
225 | # Slaves send PINGs to server in a predefined interval. It's possible to change
226 | # this interval with the repl_ping_slave_period option. The default value is 10
227 | # seconds.
228 | #
229 | # repl-ping-slave-period 10
230 |
231 | # The following option sets the replication timeout for:
232 | #
233 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave.
234 | # 2) Master timeout from the point of view of slaves (data, pings).
235 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
236 | #
237 | # It is important to make sure that this value is greater than the value
238 | # specified for repl-ping-slave-period otherwise a timeout will be detected
239 | # every time there is low traffic between the master and the slave.
240 | #
241 | # repl-timeout 60
242 |
243 | # Disable TCP_NODELAY on the slave socket after SYNC?
244 | #
245 | # If you select "yes" Redis will use a smaller number of TCP packets and
246 | # less bandwidth to send data to slaves. But this can add a delay for
247 | # the data to appear on the slave side, up to 40 milliseconds with
248 | # Linux kernels using a default configuration.
249 | #
250 | # If you select "no" the delay for data to appear on the slave side will
251 | # be reduced but more bandwidth will be used for replication.
252 | #
253 | # By default we optimize for low latency, but in very high traffic conditions
254 | # or when the master and slaves are many hops away, turning this to "yes" may
255 | # be a good idea.
256 | repl-disable-tcp-nodelay no
257 |
258 | # Set the replication backlog size. The backlog is a buffer that accumulates
259 | # slave data when slaves are disconnected for some time, so that when a slave
260 | # wants to reconnect again, often a full resync is not needed, but a partial
261 | # resync is enough, just passing the portion of data the slave missed while
262 | # disconnected.
263 | #
264 | # The biggest the replication backlog, the longer the time the slave can be
265 | # disconnected and later be able to perform a partial resynchronization.
266 | #
267 | # The backlog is only allocated once there is at least a slave connected.
268 | #
269 | # repl-backlog-size 1mb
270 |
271 | # After a master has no longer connected slaves for some time, the backlog
272 | # will be freed. The following option configures the amount of seconds that
273 | # need to elapse, starting from the time the last slave disconnected, for
274 | # the backlog buffer to be freed.
275 | #
276 | # A value of 0 means to never release the backlog.
277 | #
278 | # repl-backlog-ttl 3600
279 |
280 | # The slave priority is an integer number published by Redis in the INFO output.
281 | # It is used by Redis Sentinel in order to select a slave to promote into a
282 | # master if the master is no longer working correctly.
283 | #
284 | # A slave with a low priority number is considered better for promotion, so
285 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will
286 | # pick the one with priority 10, that is the lowest.
287 | #
288 | # However a special priority of 0 marks the slave as not able to perform the
289 | # role of master, so a slave with priority of 0 will never be selected by
290 | # Redis Sentinel for promotion.
291 | #
292 | # By default the priority is 100.
293 | slave-priority 100
294 |
295 | # It is possible for a master to stop accepting writes if there are less than
296 | # N slaves connected, having a lag less or equal than M seconds.
297 | #
298 | # The N slaves need to be in "online" state.
299 | #
300 | # The lag in seconds, that must be <= the specified value, is calculated from
301 | # the last ping received from the slave, that is usually sent every second.
302 | #
303 | # This option does not GUARANTEES that N replicas will accept the write, but
304 | # will limit the window of exposure for lost writes in case not enough slaves
305 | # are available, to the specified number of seconds.
306 | #
307 | # For example to require at least 3 slaves with a lag <= 10 seconds use:
308 | #
309 | # min-slaves-to-write 3
310 | # min-slaves-max-lag 10
311 | #
312 | # Setting one or the other to 0 disables the feature.
313 | #
314 | # By default min-slaves-to-write is set to 0 (feature disabled) and
315 | # min-slaves-max-lag is set to 10.
316 |
317 | ################################## SECURITY ###################################
318 |
319 | # Require clients to issue AUTH before processing any other
320 | # commands. This might be useful in environments in which you do not trust
321 | # others with access to the host running redis-server.
322 | #
323 | # This should stay commented out for backward compatibility and because most
324 | # people do not need auth (e.g. they run their own servers).
325 | #
326 | # Warning: since Redis is pretty fast an outside user can try up to
327 | # 150k passwords per second against a good box. This means that you should
328 | # use a very strong password otherwise it will be very easy to break.
329 | #
330 | requirepass !pemywN%qW%$E3miO6OvKuj@*BR&3sX1
331 |
332 | # Command renaming.
333 | #
334 | # It is possible to change the name of dangerous commands in a shared
335 | # environment. For instance the CONFIG command may be renamed into something
336 | # hard to guess so that it will still be available for internal-use tools
337 | # but not available for general clients.
338 | #
339 | # Example:
340 | #
341 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
342 | #
343 | # It is also possible to completely kill a command by renaming it into
344 | # an empty string:
345 | #
346 | # rename-command CONFIG ""
347 | #
348 | # Please note that changing the name of commands that are logged into the
349 | # AOF file or transmitted to slaves may cause problems.
350 |
351 | ################################### LIMITS ####################################
352 |
353 | # Set the max number of connected clients at the same time. By default
354 | # this limit is set to 10000 clients, however if the Redis server is not
355 | # able to configure the process file limit to allow for the specified limit
356 | # the max number of allowed clients is set to the current file limit
357 | # minus 32 (as Redis reserves a few file descriptors for internal uses).
358 | #
359 | # Once the limit is reached Redis will close all the new connections sending
360 | # an error 'max number of clients reached'.
361 | #
362 | # maxclients 10000
363 |
364 | # Don't use more memory than the specified amount of bytes.
365 | # When the memory limit is reached Redis will try to remove keys
366 | # according to the eviction policy selected (see maxmemory-policy).
367 | #
368 | # If Redis can't remove keys according to the policy, or if the policy is
369 | # set to 'noeviction', Redis will start to reply with errors to commands
370 | # that would use more memory, like SET, LPUSH, and so on, and will continue
371 | # to reply to read-only commands like GET.
372 | #
373 | # This option is usually useful when using Redis as an LRU cache, or to set
374 | # a hard memory limit for an instance (using the 'noeviction' policy).
375 | #
376 | # WARNING: If you have slaves attached to an instance with maxmemory on,
377 | # the size of the output buffers needed to feed the slaves are subtracted
378 | # from the used memory count, so that network problems / resyncs will
379 | # not trigger a loop where keys are evicted, and in turn the output
380 | # buffer of slaves is full with DELs of keys evicted triggering the deletion
381 | # of more keys, and so forth until the database is completely emptied.
382 | #
383 | # In short... if you have slaves attached it is suggested that you set a lower
384 | # limit for maxmemory so that there is some free RAM on the system for slave
385 | # output buffers (but this is not needed if the policy is 'noeviction').
386 | #
387 | # maxmemory
388 |
389 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
390 | # is reached. You can select among five behaviors:
391 | #
392 | # volatile-lru -> remove the key with an expire set using an LRU algorithm
393 | # allkeys-lru -> remove any key accordingly to the LRU algorithm
394 | # volatile-random -> remove a random key with an expire set
395 | # allkeys-random -> remove a random key, any key
396 | # volatile-ttl -> remove the key with the nearest expire time (minor TTL)
397 | # noeviction -> don't expire at all, just return an error on write operations
398 | #
399 | # Note: with any of the above policies, Redis will return an error on write
400 | # operations, when there are not suitable keys for eviction.
401 | #
402 | # At the date of writing this commands are: set setnx setex append
403 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
404 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
405 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
406 | # getset mset msetnx exec sort
407 | #
408 | # The default is:
409 | #
410 | # maxmemory-policy volatile-lru
411 |
412 | # LRU and minimal TTL algorithms are not precise algorithms but approximated
413 | # algorithms (in order to save memory), so you can select as well the sample
414 | # size to check. For instance for default Redis will check three keys and
415 | # pick the one that was used less recently, you can change the sample size
416 | # using the following configuration directive.
417 | #
418 | # maxmemory-samples 3
419 |
420 | ############################## APPEND ONLY MODE ###############################
421 |
422 | # By default Redis asynchronously dumps the dataset on disk. This mode is
423 | # good enough in many applications, but an issue with the Redis process or
424 | # a power outage may result into a few minutes of writes lost (depending on
425 | # the configured save points).
426 | #
427 | # The Append Only File is an alternative persistence mode that provides
428 | # much better durability. For instance using the default data fsync policy
429 | # (see later in the config file) Redis can lose just one second of writes in a
430 | # dramatic event like a server power outage, or a single write if something
431 | # wrong with the Redis process itself happens, but the operating system is
432 | # still running correctly.
433 | #
434 | # AOF and RDB persistence can be enabled at the same time without problems.
435 | # If the AOF is enabled on startup Redis will load the AOF, that is the file
436 | # with the better durability guarantees.
437 | #
438 | # Please check http://redis.io/topics/persistence for more information.
439 |
440 | appendonly no
441 |
442 | # The name of the append only file (default: "appendonly.aof")
443 |
444 | appendfilename "appendonly.aof"
445 |
446 | # The fsync() call tells the Operating System to actually write data on disk
447 | # instead to wait for more data in the output buffer. Some OS will really flush
448 | # data on disk, some other OS will just try to do it ASAP.
449 | #
450 | # Redis supports three different modes:
451 | #
452 | # no: don't fsync, just let the OS flush the data when it wants. Faster.
453 | # always: fsync after every write to the append only log . Slow, Safest.
454 | # everysec: fsync only one time every second. Compromise.
455 | #
456 | # The default is "everysec", as that's usually the right compromise between
457 | # speed and data safety. It's up to you to understand if you can relax this to
458 | # "no" that will let the operating system flush the output buffer when
459 | # it wants, for better performances (but if you can live with the idea of
460 | # some data loss consider the default persistence mode that's snapshotting),
461 | # or on the contrary, use "always" that's very slow but a bit safer than
462 | # everysec.
463 | #
464 | # More details please check the following article:
465 | # http://antirez.com/post/redis-persistence-demystified.html
466 | #
467 | # If unsure, use "everysec".
468 |
469 | # appendfsync always
470 | appendfsync everysec
471 | # appendfsync no
472 |
473 | # When the AOF fsync policy is set to always or everysec, and a background
474 | # saving process (a background save or AOF log background rewriting) is
475 | # performing a lot of I/O against the disk, in some Linux configurations
476 | # Redis may block too long on the fsync() call. Note that there is no fix for
477 | # this currently, as even performing fsync in a different thread will block
478 | # our synchronous write(2) call.
479 | #
480 | # In order to mitigate this problem it's possible to use the following option
481 | # that will prevent fsync() from being called in the main process while a
482 | # BGSAVE or BGREWRITEAOF is in progress.
483 | #
484 | # This means that while another child is saving, the durability of Redis is
485 | # the same as "appendfsync none". In practical terms, this means that it is
486 | # possible to lose up to 30 seconds of log in the worst scenario (with the
487 | # default Linux settings).
488 | #
489 | # If you have latency problems turn this to "yes". Otherwise leave it as
490 | # "no" that is the safest pick from the point of view of durability.
491 |
492 | no-appendfsync-on-rewrite no
493 |
494 | # Automatic rewrite of the append only file.
495 | # Redis is able to automatically rewrite the log file implicitly calling
496 | # BGREWRITEAOF when the AOF log size grows by the specified percentage.
497 | #
498 | # This is how it works: Redis remembers the size of the AOF file after the
499 | # latest rewrite (if no rewrite has happened since the restart, the size of
500 | # the AOF at startup is used).
501 | #
502 | # This base size is compared to the current size. If the current size is
503 | # bigger than the specified percentage, the rewrite is triggered. Also
504 | # you need to specify a minimal size for the AOF file to be rewritten, this
505 | # is useful to avoid rewriting the AOF file even if the percentage increase
506 | # is reached but it is still pretty small.
507 | #
508 | # Specify a percentage of zero in order to disable the automatic AOF
509 | # rewrite feature.
510 |
511 | auto-aof-rewrite-percentage 100
512 | auto-aof-rewrite-min-size 64mb
513 |
514 | ################################ LUA SCRIPTING ###############################
515 |
516 | # Max execution time of a Lua script in milliseconds.
517 | #
518 | # If the maximum execution time is reached Redis will log that a script is
519 | # still in execution after the maximum allowed time and will start to
520 | # reply to queries with an error.
521 | #
522 | # When a long running script exceed the maximum execution time only the
523 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
524 | # used to stop a script that did not yet called write commands. The second
525 | # is the only way to shut down the server in the case a write commands was
526 | # already issue by the script but the user don't want to wait for the natural
527 | # termination of the script.
528 | #
529 | # Set it to 0 or a negative value for unlimited execution without warnings.
530 | lua-time-limit 5000
531 |
532 | ################################## SLOW LOG ###################################
533 |
534 | # The Redis Slow Log is a system to log queries that exceeded a specified
535 | # execution time. The execution time does not include the I/O operations
536 | # like talking with the client, sending the reply and so forth,
537 | # but just the time needed to actually execute the command (this is the only
538 | # stage of command execution where the thread is blocked and can not serve
539 | # other requests in the meantime).
540 | #
541 | # You can configure the slow log with two parameters: one tells Redis
542 | # what is the execution time, in microseconds, to exceed in order for the
543 | # command to get logged, and the other parameter is the length of the
544 | # slow log. When a new command is logged the oldest one is removed from the
545 | # queue of logged commands.
546 |
547 | # The following time is expressed in microseconds, so 1000000 is equivalent
548 | # to one second. Note that a negative number disables the slow log, while
549 | # a value of zero forces the logging of every command.
550 | slowlog-log-slower-than 10000
551 |
552 | # There is no limit to this length. Just be aware that it will consume memory.
553 | # You can reclaim memory used by the slow log with SLOWLOG RESET.
554 | slowlog-max-len 128
555 |
556 | ############################# Event notification ##############################
557 |
558 | # Redis can notify Pub/Sub clients about events happening in the key space.
559 | # This feature is documented at http://redis.io/topics/keyspace-events
560 | #
561 | # For instance if keyspace events notification is enabled, and a client
562 | # performs a DEL operation on key "foo" stored in the Database 0, two
563 | # messages will be published via Pub/Sub:
564 | #
565 | # PUBLISH __keyspace@0__:foo del
566 | # PUBLISH __keyevent@0__:del foo
567 | #
568 | # It is possible to select the events that Redis will notify among a set
569 | # of classes. Every class is identified by a single character:
570 | #
571 | # K Keyspace events, published with __keyspace@__ prefix.
572 | # E Keyevent events, published with __keyevent@__ prefix.
573 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
574 | # $ String commands
575 | # l List commands
576 | # s Set commands
577 | # h Hash commands
578 | # z Sorted set commands
579 | # x Expired events (events generated every time a key expires)
580 | # e Evicted events (events generated when a key is evicted for maxmemory)
581 | # A Alias for g$lshzxe, so that the "AKE" string means all the events.
582 | #
583 | # The "notify-keyspace-events" takes as argument a string that is composed
584 | # by zero or multiple characters. The empty string means that notifications
585 | # are disabled at all.
586 | #
587 | # Example: to enable list and generic events, from the point of view of the
588 | # event name, use:
589 | #
590 | # notify-keyspace-events Elg
591 | #
592 | # Example 2: to get the stream of the expired keys subscribing to channel
593 | # name __keyevent@0__:expired use:
594 | #
595 | # notify-keyspace-events Ex
596 | #
597 | # By default all notifications are disabled because most users don't need
598 | # this feature and the feature has some overhead. Note that if you don't
599 | # specify at least one of K or E, no events will be delivered.
600 | notify-keyspace-events ""
601 |
602 | ############################### ADVANCED CONFIG ###############################
603 |
604 | # Hashes are encoded using a memory efficient data structure when they have a
605 | # small number of entries, and the biggest entry does not exceed a given
606 | # threshold. These thresholds can be configured using the following directives.
607 | hash-max-ziplist-entries 512
608 | hash-max-ziplist-value 64
609 |
610 | # Similarly to hashes, small lists are also encoded in a special way in order
611 | # to save a lot of space. The special representation is only used when
612 | # you are under the following limits:
613 | list-max-ziplist-entries 512
614 | list-max-ziplist-value 64
615 |
616 | # Sets have a special encoding in just one case: when a set is composed
617 | # of just strings that happens to be integers in radix 10 in the range
618 | # of 64 bit signed integers.
619 | # The following configuration setting sets the limit in the size of the
620 | # set in order to use this special memory saving encoding.
621 | set-max-intset-entries 512
622 |
623 | # Similarly to hashes and lists, sorted sets are also specially encoded in
624 | # order to save a lot of space. This encoding is only used when the length and
625 | # elements of a sorted set are below the following limits:
626 | zset-max-ziplist-entries 128
627 | zset-max-ziplist-value 64
628 |
629 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
630 | # order to help rehashing the main Redis hash table (the one mapping top-level
631 | # keys to values). The hash table implementation Redis uses (see dict.c)
632 | # performs a lazy rehashing: the more operation you run into a hash table
633 | # that is rehashing, the more rehashing "steps" are performed, so if the
634 | # server is idle the rehashing is never complete and some more memory is used
635 | # by the hash table.
636 | #
637 | # The default is to use this millisecond 10 times every second in order to
638 | # active rehashing the main dictionaries, freeing memory when possible.
639 | #
640 | # If unsure:
641 | # use "activerehashing no" if you have hard latency requirements and it is
642 | # not a good thing in your environment that Redis can reply form time to time
643 | # to queries with 2 milliseconds delay.
644 | #
645 | # use "activerehashing yes" if you don't have such hard requirements but
646 | # want to free memory asap when possible.
647 | activerehashing yes
648 |
649 | # The client output buffer limits can be used to force disconnection of clients
650 | # that are not reading data from the server fast enough for some reason (a
651 | # common reason is that a Pub/Sub client can't consume messages as fast as the
652 | # publisher can produce them).
653 | #
654 | # The limit can be set differently for the three different classes of clients:
655 | #
656 | # normal -> normal clients
657 | # slave -> slave clients and MONITOR clients
658 | # pubsub -> clients subscribed to at least one pubsub channel or pattern
659 | #
660 | # The syntax of every client-output-buffer-limit directive is the following:
661 | #
662 | # client-output-buffer-limit
663 | #
664 | # A client is immediately disconnected once the hard limit is reached, or if
665 | # the soft limit is reached and remains reached for the specified number of
666 | # seconds (continuously).
667 | # So for instance if the hard limit is 32 megabytes and the soft limit is
668 | # 16 megabytes / 10 seconds, the client will get disconnected immediately
669 | # if the size of the output buffers reach 32 megabytes, but will also get
670 | # disconnected if the client reaches 16 megabytes and continuously overcomes
671 | # the limit for 10 seconds.
672 | #
673 | # By default normal clients are not limited because they don't receive data
674 | # without asking (in a push way), but just after a request, so only
675 | # asynchronous clients may create a scenario where data is requested faster
676 | # than it can read.
677 | #
678 | # Instead there is a default limit for pubsub and slave clients, since
679 | # subscribers and slaves receive data in a push fashion.
680 | #
681 | # Both the hard or the soft limit can be disabled by setting them to zero.
682 | client-output-buffer-limit normal 0 0 0
683 | client-output-buffer-limit slave 256mb 64mb 60
684 | client-output-buffer-limit pubsub 32mb 8mb 60
685 |
686 | # Redis calls an internal function to perform many background tasks, like
687 | # closing connections of clients in timeout, purging expired keys that are
688 | # never requested, and so forth.
689 | #
690 | # Not all tasks are performed with the same frequency, but Redis checks for
691 | # tasks to perform accordingly to the specified "hz" value.
692 | #
693 | # By default "hz" is set to 10. Raising the value will use more CPU when
694 | # Redis is idle, but at the same time will make Redis more responsive when
695 | # there are many keys expiring at the same time, and timeouts may be
696 | # handled with more precision.
697 | #
698 | # The range is between 1 and 500, however a value over 100 is usually not
699 | # a good idea. Most users should use the default of 10 and raise this up to
700 | # 100 only in environments where very low latency is required.
701 | hz 10
702 |
703 | # When a child rewrites the AOF file, if the following option is enabled
704 | # the file will be fsync-ed every 32 MB of data generated. This is useful
705 | # in order to commit the file to the disk more incrementally and avoid
706 | # big latency spikes.
707 | aof-rewrite-incremental-fsync yes
708 |
709 |
--------------------------------------------------------------------------------