├── .gitattributes
├── README.md
├── continuous_evaluation_py23
├── build_paddle.xsh
├── models
│ └── README.md
├── main.xsh
├── README.md
├── tools
│ ├── week_report.sh
│ ├── send_email.sh
│ ├── wiki_api.py
│ ├── teamcity_api.py
│ ├── gen_report.py
│ └── html.py
├── init_python.sh
├── config.py
├── utils.py
├── build_paddle.py
├── _config.py
├── repo.py
├── db.py
├── persistence.py
├── analysis_kpis.py
├── eva.py
├── web
│ ├── main.py
│ ├── api.py
│ └── view.py
├── Dockerfile
├── kpi.py
└── main.py
├── .gitmodules
├── models
└── README.md
├── tools
├── week_report.sh
├── send_email.sh
├── wiki_api.py
├── teamcity_api.py
├── gen_report.py
└── html.py
├── .pre-commit-config.yaml
├── .travis.yml
├── repo.xsh
├── config.py
├── utils.xsh
├── _config.py
├── db.py
├── persistence.py
├── analysis_kpis.py
├── web
├── main.py
├── api.py
└── view.py
├── eva.xsh
├── Dockerfile
├── kpi.py
└── main.xsh
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.xsh linguist-language=Python
2 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Paddle Continous Evaluate Framework
2 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/build_paddle.xsh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | python build_paddle.py
4 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "web/pypage"]
2 | path = web/pypage
3 | url = https://github.com/Superjomn/pypage.git
4 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/models/README.md:
--------------------------------------------------------------------------------
1 | This directory contains models that will be evaluated continuously.
2 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/main.xsh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | args=""
4 | if [ $# -gt 0 ]; then
5 | args=$*
6 | fi
7 |
8 | python main.py ${args}
9 |
--------------------------------------------------------------------------------
/models/README.md:
--------------------------------------------------------------------------------
1 | This directory contains models that will be evaluated continuously.
2 | See.https://github.com/PaddlePaddle/paddle-ce-latest-kpis
3 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/README.md:
--------------------------------------------------------------------------------
1 | # Paddle Continous Evaluate Framework
2 | ## Howtos
3 |
4 | ### Contribute
5 | - Run `pre-commit run -a` before your PR, this will help to format code automatically
6 |
--------------------------------------------------------------------------------
/tools/week_report.sh:
--------------------------------------------------------------------------------
1 | docker exec -it teamcity-agent-week bash -c "source ~/.bashrc; cd /workspace/week/tools; python wiki_api.py && python teamcity_api.py && python3 gen_report.py >> week.log 2>&1 "
2 | sh send_email.sh >> week.log 2>&1
3 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/tools/week_report.sh:
--------------------------------------------------------------------------------
1 | docker exec -it teamcity-agent-week bash -c "source ~/.bashrc; cd /workspace/week/tools; python wiki_api.py && python teamcity_api.py && python3 gen_report.py >> week.log 2>&1 "
2 | sh send_email.sh >> week.log 2>&1
3 |
--------------------------------------------------------------------------------
/tools/send_email.sh:
--------------------------------------------------------------------------------
1 | #array=(guochaorong@baidu.com)
2 | array=(paddle-dev@baidu.com)
3 | for mail in ${array[@]}
4 | do
5 | cat index.html | formail -I "Content-type:text/html;charset=gb2312" -I "Subject: PaddlePaddle CE weekly report." -I "To:"$mail | /usr/sbin/sendmail -t guochaorong123@163.com
6 | done
7 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/tools/send_email.sh:
--------------------------------------------------------------------------------
1 | #array=(guochaorong@baidu.com)
2 | array=(paddle-dev@baidu.com)
3 | for mail in ${array[@]}
4 | do
5 | cat index.html | formail -I "Content-type:text/html;charset=gb2312" -I "Subject: PaddlePaddle CE weekly report." -I "To:"$mail | /usr/sbin/sendmail -t guochaorong123@163.com
6 | done
7 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/init_python.sh:
--------------------------------------------------------------------------------
1 | export LD_LIBRARY_PATH=/opt/_internal/cpython-3.7.0/lib/:${LD_LIBRARY_PATH}
2 | export PATH=/opt/_internal/cpython-3.7.0/bin/:${PATH}
3 | export PYTHON_FLAGS='-DPYTHON_EXECUTABLE:FILEPATH=/opt/_internal/cpython-3.7.0/bin/python3.7
4 | -DPYTHON_INCLUDE_DIR:PATH=/opt/_internal/cpython-3.7.0/include/python3.7m
5 | -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-3.7.0/lib/libpython3.so'
6 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/PaddlePaddle/mirrors-yapf.git
3 | sha: 0d79c0c469bab64f7229c9aca2b1186ef47f0e37
4 | hooks:
5 | - id: yapf
6 | files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$
7 | - repo: https://github.com/pre-commit/pre-commit-hooks
8 | sha: 5bf6c09bfa1297d3692cadd621ef95f1284e33c0
9 | hooks:
10 | - id: check-added-large-files
11 | - id: check-merge-conflict
12 | - id: check-symlinks
13 | - id: end-of-file-fixer
14 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | cache:
3 | - pip
4 | - ccache
5 | # - yarn
6 | # - npm
7 | sudo: required
8 | dist: trusty
9 |
10 | os:
11 | - linux
12 |
13 | env:
14 | - JOB=test
15 |
16 | addons:
17 | apt:
18 | sources:
19 | - ubuntu-toolchain-r-test
20 | packages:
21 | # - gcc-5
22 | # - g++-5
23 | - git
24 | - python3
25 | - python3-pip
26 | - ccache
27 |
28 | install:
29 | - sudo pip3 install -r requirements.txt
30 |
31 | before_install:
32 |
33 | script:
34 | - if [[ "$JOB" == "test" ]]; then /bin/bash ./tests.xsh; fi
35 |
36 | notifications:
37 | email:
38 | on_success: change
39 | on_failure: always
40 |
--------------------------------------------------------------------------------
/repo.xsh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env xonsh
2 | '''
3 | some utils for clone repo, commit.
4 | '''
5 | import sys; sys.path.insert(0, '')
6 | from utils import PathRecover
7 | import _config
8 |
9 |
10 | def clone(url, dst):
11 | '''
12 | url: url of a git repo.
13 | dst: a abstract path in local file system.
14 | '''
15 | git clone @(url) @(dst)
16 |
17 | def get_commit(local_repo_path, short=False):
18 | with PathRecover():
19 | cd @(local_repo_path)
20 | flags = '%h' if short else '%H'
21 | if short:
22 | commit = $(git log -1 --pretty=format:%h).strip()
23 | else:
24 | commit = $(git log -1 --pretty=format:%H).strip()
25 | return commit
26 |
27 | def get_commit_date(local_repo_path):
28 | ''' get UNIX timestamp '''
29 | with PathRecover():
30 | cd @(local_repo_path)
31 | date = $(git log -1 --pretty=format:%ct)
32 | return date
33 |
--------------------------------------------------------------------------------
/config.py:
--------------------------------------------------------------------------------
1 | import os
2 | import logging
3 | import shutil
4 |
5 | workspace = os.path.dirname(os.path.realpath(__file__)) # pwd
6 | pjoin = os.path.join
7 |
8 | # the directory structure
9 | # Paddle/
10 | # modelce/
11 | # tasks
12 | # DEBUG
13 | relative_path = os.environ.get('relative_path', '..')
14 | paddle_path = pjoin(workspace, relative_path)
15 | #paddle_path = '/chunwei/Paddle'
16 |
17 | baseline_repo_url = os.environ.get('repo_url', 'git@github.com:PaddlePaddle/paddle-ce-latest-kpis.git')
18 |
19 | baseline_path = pjoin(workspace, 'tasks')
20 |
21 | tmp_root = pjoin(workspace, "tmp")
22 |
23 | # if the latest kpi is better than best kpi by 1%, update the best kpi.
24 | kpi_update_threshold = 0.3
25 |
26 | # mongodb config
27 | db_name = "ce"
28 | # for test, use following config
29 | # db_host = 'ce.paddlepaddle.org'
30 | # db_port = 8006
31 |
32 |
33 | db_host = os.environ.get('db_host', '127.0.0.1')
34 | db_port = os.environ.get('db_port', 27017)
35 | table_name = os.environ.get('table_name', 'logs')
36 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/config.py:
--------------------------------------------------------------------------------
1 | import os
2 | import logging
3 | import shutil
4 |
5 | workspace = os.path.dirname(os.path.realpath(__file__)) # pwd
6 | pjoin = os.path.join
7 |
8 | # the directory structure
9 | # Paddle/
10 | # modelce/
11 | # tasks
12 | # DEBUG
13 | relative_path = os.environ.get('relative_path', '..')
14 | paddle_path = pjoin(workspace, relative_path)
15 | #paddle_path = '/chunwei/Paddle'
16 |
17 | baseline_repo_url = os.environ.get('repo_url', 'git@github.com:PaddlePaddle/paddle-ce-latest-kpis.git')
18 |
19 | baseline_path = pjoin(workspace, 'tasks')
20 |
21 | tmp_root = pjoin(workspace, "tmp")
22 |
23 | # if the latest kpi is better than best kpi by 1%, update the best kpi.
24 | kpi_update_threshold = 0.3
25 |
26 | # mongodb config
27 | db_name = "ce"
28 | # for test, use following config
29 | # db_host = 'ce.paddlepaddle.org'
30 | # db_port = 8006
31 |
32 |
33 | db_host = os.environ.get('db_host', '127.0.0.1')
34 | db_port = os.environ.get('db_port', 27017)
35 | table_name = os.environ.get('table_name', 'logs')
36 |
--------------------------------------------------------------------------------
/utils.xsh:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 |
4 | class log:
5 | @staticmethod
6 | def logger():
7 | mylogger = logging.getLogger(__name__)
8 | mylogger.setLevel(logging.INFO)
9 | if not mylogger.handlers:
10 | ch = logging.StreamHandler()
11 | mylogger.addHandler(ch)
12 | return mylogger
13 |
14 | @staticmethod
15 | def info(*args):
16 | log.logger().info(' '.join([str(s) for s in args]))
17 |
18 | @staticmethod
19 | def warn(*args):
20 | log.logger().warning(' '.join([str(s) for s in args]))
21 |
22 | def error(*args):
23 | log.logger().error(' '.join([str(s) for s in args]))
24 |
25 | def debug(*args):
26 | log.logger().debug(' '.join([str(s) for s in args]))
27 |
28 |
29 | class PathRecover(object):
30 | ''' will jump back to the original path. '''
31 | def __enter__(self):
32 | self.pre_path = $(pwd).strip()
33 |
34 | def __exit__(self, type, value, trace):
35 | if $(pwd).strip() != self.pre_path:
36 | cd @(self.pre_path)
37 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 |
4 |
5 | class log:
6 | @staticmethod
7 | def logger():
8 | mylogger = logging.getLogger(__name__)
9 | mylogger.setLevel(logging.INFO)
10 | if not mylogger.handlers:
11 | ch = logging.StreamHandler()
12 | mylogger.addHandler(ch)
13 | return mylogger
14 |
15 | @staticmethod
16 | def info(*args):
17 | log.logger().info(' '.join([str(s) for s in args]))
18 |
19 | @staticmethod
20 | def warn(*args):
21 | log.logger().warning(' '.join([str(s) for s in args]))
22 |
23 | def error(*args):
24 | log.logger().error(' '.join([str(s) for s in args]))
25 |
26 | def debug(*args):
27 | log.logger().debug(' '.join([str(s) for s in args]))
28 |
29 |
30 | class PathRecover(object):
31 | ''' will jump back to the original path. '''
32 | def __enter__(self):
33 | pwd = os.path.dirname(os.path.abspath(__file__))
34 | self.pre_path = pwd.strip()
35 |
36 | def __exit__(self, type, value, trace):
37 | pwd = os.path.dirname(os.path.abspath(__file__))
38 | if pwd.strip() != self.pre_path:
39 | os.chdir(self.pre_path)
40 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/build_paddle.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | RAISE_SUBPROC_ERROR = True
3 | XONSH_SHOW_TRACEBACK = True
4 |
5 | import sys; sys.path.insert(0, '')
6 | import _config
7 | import subprocess
8 | import os
9 | import shutil
10 |
11 |
12 | os.chdir(_config.paddle_path)
13 | build = "build"
14 | if os.path.exists(build):
15 | shutil.rmtree(build)
16 | os.makedirs(build)
17 | os.chdir(build)
18 | if os.path.exists("python/dist"):
19 | shutil.rmtree("python/dist")
20 | os.makedirs("python/dist_zy")
21 | if os.path.exists("python/build"):
22 | shutil.rmtree("python/build")
23 |
24 | #WITH_TESTING = os.environ.get('WITH_TESTING', 'OFF')
25 |
26 | subprocess.call("WITH_TESTING=ON "
27 | "WITH_GOLANG=OFF "
28 | "CMAKE_BUILD_TYPE=Release "
29 | "WITH_GPU=ON "
30 | "WITH_STYLE_CHECK=OFF "
31 | "WITH_FLUID_ONLY=ON "
32 | "WITH_MKL=ON "
33 | "WITH_MKLDNN=ON "
34 | "WITH_DISTRIBUTE=ON "
35 | "WITH_ANAKIN=OFF "
36 | "WITH_INFERENCE_API_TEST=OFF "
37 | "paddle/scripts/paddle_build.sh build",
38 | shell=True,
39 | cwd=_config.paddle_path
40 | )
41 |
42 |
43 | os.chdir(_config.paddle_path)
44 | os.chdir(build)
45 | cmd = "pip install --upgrade python/dist/*.whl"
46 | os.system(cmd)
47 |
--------------------------------------------------------------------------------
/_config.py:
--------------------------------------------------------------------------------
1 | import os
2 | import logging
3 | import shutil
4 |
5 | workspace = os.path.dirname(os.path.realpath(__file__)) # pwd
6 | pjoin = os.path.join
7 |
8 | # the directory structure
9 | # Paddle/
10 | # modelce/
11 | # tasks
12 | # DEBUG
13 | relative_path = os.environ.get('relative_path', '..')
14 | paddle_path = pjoin(workspace, relative_path)
15 | #paddle_path = '/chunwei/Paddle'
16 |
17 | baseline_repo_url = os.environ.get('repo_url', 'git@github.com:PaddlePaddle/paddle-ce-latest-kpis.git')
18 |
19 | baseline_path = pjoin(workspace, 'tasks')
20 |
21 | tmp_root = pjoin(workspace, "tmp")
22 |
23 | # if the latest kpi is better than best kpi by 1%, update the best kpi.
24 | kpi_update_threshold = 0.3
25 |
26 | # mongodb config
27 | #db_name = "ce"
28 | db_name = os.environ.get('db_name', 'ce')
29 | # for test, use following config
30 | # db_host = 'ce.paddlepaddle.org'
31 | # db_port = 8006
32 |
33 | db_host = os.environ.get('db_host', '127.0.0.1')
34 | db_port = os.environ.get('db_port', 27017)
35 | table_name = os.environ.get('table_name', 'logs')
36 |
37 | develop_evaluate = os.environ.get('develop_evaluate', 'False')
38 |
39 | develop_db_name = os.environ.get('develop_db_name', 'ce_develop')
40 | develop_db_host = os.environ.get('develop_db_host', '127.0.0.1')
41 | develop_db_port = os.environ.get('develop_db_port', 27017)
42 | develop_table_name = os.environ.get('develop_table_name', 'develop_logs')
43 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/_config.py:
--------------------------------------------------------------------------------
1 | import os
2 | import logging
3 | import shutil
4 |
5 | workspace = os.path.dirname(os.path.realpath(__file__)) # pwd
6 | pjoin = os.path.join
7 |
8 | # the directory structure
9 | # Paddle/
10 | # modelce/
11 | # tasks
12 | # DEBUG
13 | relative_path = os.environ.get('relative_path', '..')
14 | paddle_path = pjoin(workspace, relative_path)
15 | #paddle_path = '/chunwei/Paddle'
16 |
17 | baseline_repo_url = os.environ.get('repo_url', 'git@github.com:PaddlePaddle/paddle-ce-latest-kpis.git')
18 |
19 | baseline_path = pjoin(workspace, 'tasks')
20 |
21 | tmp_root = pjoin(workspace, "tmp")
22 |
23 | # if the latest kpi is better than best kpi by 1%, update the best kpi.
24 | kpi_update_threshold = 0.3
25 |
26 | # mongodb config
27 | #db_name = "ce"
28 | db_name = os.environ.get('db_name', 'ce')
29 | # for test, use following config
30 | # db_host = 'ce.paddlepaddle.org'
31 | # db_port = 8006
32 |
33 | db_host = os.environ.get('db_host', '127.0.0.1')
34 | db_port = os.environ.get('db_port', 27017)
35 | table_name = os.environ.get('table_name', 'logs')
36 |
37 | develop_evaluate = os.environ.get('develop_evaluate', 'False')
38 |
39 | develop_db_name = os.environ.get('develop_db_name', 'ce_develop')
40 | develop_db_host = os.environ.get('develop_db_host', '127.0.0.1')
41 | develop_db_port = os.environ.get('develop_db_port', 27017)
42 | develop_table_name = os.environ.get('develop_table_name', 'develop_logs')
43 |
44 | #system linux or windows
45 | system = os.environ.get('system', 'linux')
46 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/repo.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | '''
3 | some utils for clone repo, commit.
4 | '''
5 | import sys; sys.path.insert(0, '')
6 | from utils import PathRecover
7 | import _config
8 | import os
9 | import subprocess
10 |
11 |
12 | def clone(url, dst):
13 | '''
14 | url: url of a git repo.
15 | dst: a abstract path in local file system.
16 | '''
17 | cmd = "git clone url dst"
18 | os.system(cmd)
19 |
20 |
21 | def get_commit(local_repo_path, short=False):
22 | with PathRecover():
23 | os.chdir(local_repo_path)
24 | flags = '%h' if short else '%H'
25 | if short:
26 | cmd = "git log -1 --pretty=format:%h"
27 | if os.system(cmd):
28 | commit = []
29 | return commit
30 | output = subprocess.check_output(cmd, shell=True)
31 | output = output.decode()
32 | commit = output.strip()
33 | else:
34 | cmd = "git log -1 --pretty=format:%H"
35 | if os.system(cmd):
36 | commit = []
37 | return commit
38 | output = subprocess.check_output(cmd, shell=True)
39 | output = output.decode()
40 | commit = output.strip()
41 |
42 | return commit
43 |
44 |
45 | def get_commit_date(local_repo_path):
46 | ''' get UNIX timestamp '''
47 | with PathRecover():
48 | os.chdir(local_repo_path)
49 | cmd = "git log -1 --pretty=format:%ct"
50 | if os.system(cmd):
51 | return ""
52 | output = subprocess.check_output(cmd, shell=True)
53 | output = output.decode()
54 | date = output
55 |
56 | return date
57 |
--------------------------------------------------------------------------------
/db.py:
--------------------------------------------------------------------------------
1 | import bson
2 | from pymongo import MongoClient
3 |
4 |
5 | class MongoDB(object):
6 | def __init__(self, dbname, host='localhost', port=27017):
7 | self.client = MongoClient(host, int(port))
8 | self.db = getattr(self.client, dbname)
9 |
10 | def table(self, table):
11 | ''' table might be a string or a Mongo table object. '''
12 | if isinstance(table, str):
13 | table = getattr(self.db, table)
14 | return table
15 |
16 | # def insert_one(self, table, key, json):
17 | # key['_value'] = json
18 | # self.table(table).insert_one(key)
19 | def insert_one(self, table, record):
20 | self.table(table).insert_one(record)
21 |
22 | def remove(self, table, cond):
23 | self.table(table).remove(cond)
24 |
25 | def find_one(self, table, cond):
26 | '''
27 | Find one record.
28 |
29 | cond: dic
30 | something like {'author': 'Mike'}
31 | '''
32 | return self.table(table).find_one(cond)
33 |
34 | def find_sections(self, table, cond, sections, key, limit=-1):
35 |
36 | if limit == -1:
37 | return self.table(table).find(cond, sections).sort(key)
38 | else:
39 | return self.table(table).find(cond, sections).sort(key).limit(limit)
40 |
41 | def find(self, table, cond):
42 | '''
43 | Find records.
44 |
45 | cond: dic
46 | something like {'author': 'Mike'}
47 | '''
48 | return self.table(table).find(cond)
49 |
50 | def finds(self, table, cond):
51 | '''
52 | Find records.
53 |
54 | cond: dic
55 | something like {'author': 'Mike'}
56 | '''
57 | return [r for r in self.table(table).find(cond)]
58 |
59 |
60 | if __name__ == '__main__':
61 | import _config
62 | from pprint import pprint
63 |
64 | db = MongoDB(_config.db_name)
65 | records = db.finds(_config.table_name, {'type': 'kpi'})
66 | pprint(records)
67 |
--------------------------------------------------------------------------------
/tools/wiki_api.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | ########################################################################
4 | #
5 | # Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
6 | #
7 | ########################################################################
8 | """
9 | File: wiki_api.py
10 | Author: guochaorong(guochaorong@baidu.com)
11 | Date: 2018/08/19 13:45:30
12 | """
13 | import numpy as np
14 |
15 | import urllib
16 | import urllib2
17 | import re
18 | from bs4 import BeautifulSoup
19 | from datetime import datetime
20 |
21 | import sys
22 | reload(sys)
23 | #for UnicodeEncodeError
24 | sys.setdefaultencoding("utf8")
25 |
26 |
27 | def SaveFile(content, filename):
28 | f = open("wikiData/" + filename, "a")
29 | f.write(str(content) + "\n")
30 | f.close()
31 |
32 |
33 | def SpideWiki():
34 | """spide daily report for this week"""
35 | user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
36 | headers = {'User-Agent': user_agent}
37 | record_info = ''
38 | mm_str = datetime.now().strftime("%Y-%m")
39 | try:
40 | url = "https://github.com/PaddlePaddle/continuous_evaluation/wiki/%s" % mm_str
41 | request = urllib2.Request(url, headers=headers)
42 | response = urllib2.urlopen(request)
43 | wikiHtml = response.read()
44 | soup = BeautifulSoup(
45 | str(wikiHtml), 'html.parser', from_encoding='utf-8')
46 | div = soup.find(name='div', id='wiki-body')
47 | ps = div.find_all(
48 | name='p', limit=100, recursive=True) #only direct children
49 | for p in ps:
50 | pText = p.get_text()
51 | record_info += (pText.encode('utf-8').strip())
52 | import re
53 | result = re.split("值班人:", record_info)
54 | infos = ''
55 | for x in result[1:6]:
56 | print x
57 | infos += x + '
'
58 | return infos
59 | except urllib2.URLError as e:
60 | if hasattr(e, "code"):
61 | print(e.code)
62 | if hasattr(e, "reason"):
63 | print(e.reason)
64 |
65 |
66 | records = SpideWiki()
67 | with open("wiki.txt", 'w') as f:
68 | f.write(records)
69 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/db.py:
--------------------------------------------------------------------------------
1 | import bson
2 | from pymongo import MongoClient
3 |
4 |
5 | class MongoDB(object):
6 | def __init__(self, dbname, host='localhost', port=27017):
7 | self.client = MongoClient(host, int(port))
8 | self.db = getattr(self.client, dbname)
9 |
10 | def all_tables(self):
11 | '''
12 | find all collection
13 | '''
14 | collist = self.db.list_collection_names()
15 | return collist
16 |
17 | def table(self, table):
18 | ''' table might be a string or a Mongo table object. '''
19 | if isinstance(table, str):
20 | table = getattr(self.db, table)
21 | return table
22 |
23 | # def insert_one(self, table, key, json):
24 | # key['_value'] = json
25 | # self.table(table).insert_one(key)
26 | def insert_one(self, table, record):
27 | self.table(table).insert_one(record)
28 |
29 | def remove(self, table, cond):
30 | self.table(table).remove(cond)
31 |
32 | def find_one(self, table, cond):
33 | '''
34 | Find one record.
35 |
36 | cond: dic
37 | something like {'author': 'Mike'}
38 | '''
39 | return self.table(table).find_one(cond)
40 |
41 | def find_sections(self, table, cond, sections, key, limit=-1):
42 |
43 | if limit == -1:
44 | return self.table(table).find(cond, sections).sort(key)
45 | else:
46 | return self.table(table).find(cond, sections).sort(key).limit(limit)
47 |
48 | def find(self, table, cond):
49 | '''
50 | Find records.
51 |
52 | cond: dic
53 | something like {'author': 'Mike'}
54 | '''
55 | return self.table(table).find(cond)
56 |
57 | def finds(self, table, cond):
58 | '''
59 | Find records.
60 |
61 | cond: dic
62 | something like {'author': 'Mike'}
63 | '''
64 | return [r for r in self.table(table).find(cond)]
65 |
66 |
67 | if __name__ == '__main__':
68 | import _config
69 | from pprint import pprint
70 |
71 | db = MongoDB(_config.db_name)
72 | records = db.finds(_config.table_name, {'type': 'kpi'})
73 | pprint(records)
74 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/tools/wiki_api.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | ########################################################################
4 | #
5 | # Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
6 | #
7 | ########################################################################
8 | """
9 | File: wiki_api.py
10 | Author: guochaorong(guochaorong@baidu.com)
11 | Date: 2018/08/19 13:45:30
12 | """
13 | import numpy as np
14 |
15 | import urllib
16 | import urllib2
17 | import re
18 | from bs4 import BeautifulSoup
19 | from datetime import datetime
20 |
21 | import sys
22 | reload(sys)
23 | #for UnicodeEncodeError
24 | sys.setdefaultencoding("utf8")
25 |
26 |
27 | def SaveFile(content, filename):
28 | f = open("wikiData/" + filename, "a")
29 | f.write(str(content) + "\n")
30 | f.close()
31 |
32 |
33 | def SpideWiki():
34 | """spide daily report for this week"""
35 | user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
36 | headers = {'User-Agent': user_agent}
37 | record_info = ''
38 | mm_str = datetime.now().strftime("%Y-%m")
39 | try:
40 | url = "https://github.com/PaddlePaddle/continuous_evaluation/wiki/%s" % mm_str
41 | request = urllib2.Request(url, headers=headers)
42 | response = urllib2.urlopen(request)
43 | wikiHtml = response.read()
44 | soup = BeautifulSoup(
45 | str(wikiHtml), 'html.parser', from_encoding='utf-8')
46 | div = soup.find(name='div', id='wiki-body')
47 | ps = div.find_all(
48 | name='p', limit=100, recursive=True) #only direct children
49 | for p in ps:
50 | pText = p.get_text()
51 | record_info += (pText.encode('utf-8').strip())
52 | import re
53 | result = re.split("值班人:", record_info)
54 | infos = ''
55 | for x in result[1:6]:
56 | print x
57 | infos += x + '
'
58 | return infos
59 | except urllib2.URLError as e:
60 | if hasattr(e, "code"):
61 | print(e.code)
62 | if hasattr(e, "reason"):
63 | print(e.reason)
64 |
65 |
66 | records = SpideWiki()
67 | with open("wiki.txt", 'w') as f:
68 | f.write(records)
69 |
--------------------------------------------------------------------------------
/persistence.py:
--------------------------------------------------------------------------------
1 | '''
2 | Use a mongodb to persist the status of this framework.
3 | '''
4 | from db import MongoDB
5 | import _config
6 | import json
7 |
8 | db = MongoDB(_config.db_name, host=_config.db_host, port=_config.db_port)
9 |
10 | develop_db = MongoDB(_config.develop_db_name, host=_config.develop_db_host, port=_config.develop_db_port)
11 |
12 |
13 | def add_evaluation_record(commitid, date, task, passed, infos, kpis, kpi_values, kpi_types,
14 | kpi_objs, detail_infos, develop_infos):
15 | '''
16 | persist the evaluation infomation of a task to the database.
17 |
18 | commitid: str
19 | date: UNIX timestamp
20 | task: str
21 | passed: bool
22 | infos: list of string
23 | kpis: the kpis in a task, name -> kpivalues
24 | kpi_objs: objects of KPI.
25 | '''
26 | # delete old task record for this commit
27 | db.remove(_config.table_name, {
28 | 'commitid': commitid,
29 | 'type': 'kpi',
30 | 'task': task,
31 | })
32 |
33 | # insert new record
34 | record = {
35 | 'commitid': commitid,
36 | 'date': date,
37 | 'task': task,
38 | 'type': 'kpi',
39 | 'passed': passed,
40 | 'infos': infos,
41 | 'detail_infos': detail_infos,
42 | 'develop_infos': develop_infos,
43 | 'kpis-keys': kpis,
44 | 'kpis-values':
45 | json.dumps(list(value.tolist() for value in kpi_values)),
46 | 'kpi-types': kpi_types,
47 | 'kpi-activeds': [kpi.actived for kpi in kpi_objs],
48 | 'kpi-unit-reprs': [kpi.unit_repr for kpi in kpi_objs],
49 | 'kpi-descs': [kpi.desc for kpi in kpi_objs],
50 | }
51 | db.insert_one(_config.table_name, record)
52 |
53 |
54 | def get_kpis_from_db(tasks):
55 | '''
56 | '''
57 | sections = {"_id": 0, "kpis-values": 1, "kpis-keys": 1, "date": 1, "task": 1}
58 | key = [("date", -1)]
59 | kpis = {}
60 | for task in tasks:
61 | cond = {"task": task}
62 | res = develop_db.find_sections(_config.develop_table_name, cond, sections, key, limit=1)
63 | for i in res:
64 | kpis[i["task"]] = {"kpis-keys": i["kpis-keys"], "kpis-values": i["kpis-values"]}
65 | return kpis
66 |
67 |
68 | if __name__ == "__main__":
69 | #tasks = ["model_ce_image_classification", "mnist"]
70 | tasks = ["model_ce_language_model"]
71 | tasks = ['resnet50_net_CPU', 'seq2seq', 'language_model', 'object_detection', 'lstm', 'mnist', 'fail_models', 'image_classification', 'vgg16', 'transformer', 'resnet50_net_GPU', 'resnet50']
72 | tasks = ['mnist']
73 | res = get_kpis_from_db(tasks)
74 | for k, v in res.items():
75 | print(k, v)
76 |
77 |
78 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/persistence.py:
--------------------------------------------------------------------------------
1 | '''
2 | Use a mongodb to persist the status of this framework.
3 | '''
4 | from db import MongoDB
5 | import _config
6 | import json
7 |
8 | db = MongoDB(_config.db_name, host=_config.db_host, port=_config.db_port)
9 |
10 | develop_db = MongoDB(_config.develop_db_name, host=_config.develop_db_host, port=_config.develop_db_port)
11 |
12 |
13 | def add_evaluation_record(commitid, date, task, passed, infos, kpis, kpi_values, kpi_types,
14 | kpi_objs, detail_infos, develop_infos):
15 | '''
16 | persist the evaluation infomation of a task to the database.
17 |
18 | commitid: str
19 | date: UNIX timestamp
20 | task: str
21 | passed: bool
22 | infos: list of string
23 | kpis: the kpis in a task, name -> kpivalues
24 | kpi_objs: objects of KPI.
25 | '''
26 | # delete old task record for this commit
27 | db.remove(_config.table_name, {
28 | 'commitid': commitid,
29 | 'type': 'kpi',
30 | 'task': task,
31 | })
32 |
33 | # insert new record
34 | record = {
35 | 'commitid': commitid,
36 | 'date': date,
37 | 'task': task,
38 | 'type': 'kpi',
39 | 'passed': passed,
40 | 'infos': infos,
41 | 'detail_infos': detail_infos,
42 | 'develop_infos': develop_infos,
43 | 'kpis-keys': kpis,
44 | 'kpis-values':
45 | json.dumps(list(value.tolist() for value in kpi_values)),
46 | 'kpi-types': kpi_types,
47 | 'kpi-activeds': [kpi.actived for kpi in kpi_objs],
48 | 'kpi-unit-reprs': [kpi.unit_repr for kpi in kpi_objs],
49 | 'kpi-descs': [kpi.desc for kpi in kpi_objs],
50 | }
51 | db.insert_one(_config.table_name, record)
52 |
53 |
54 | def get_kpis_from_db(tasks):
55 | '''
56 | '''
57 | sections = {"_id": 0, "kpis-values": 1, "kpis-keys": 1, "date": 1, "task": 1}
58 | key = [("date", -1)]
59 | kpis = {}
60 | for task in tasks:
61 | cond = {"task": task}
62 | res = develop_db.find_sections(_config.develop_table_name, cond, sections, key, limit=1)
63 | for i in res:
64 | kpis[i["task"]] = {"kpis-keys": i["kpis-keys"], "kpis-values": i["kpis-values"]}
65 | return kpis
66 |
67 |
68 | if __name__ == "__main__":
69 | #tasks = ["model_ce_image_classification", "mnist"]
70 | tasks = ["model_ce_language_model"]
71 | tasks = ['resnet50_net_CPU', 'seq2seq', 'language_model', 'object_detection', 'lstm', 'mnist', 'fail_models', 'image_classification', 'vgg16', 'transformer', 'resnet50_net_GPU', 'resnet50']
72 | tasks = ['mnist']
73 | res = get_kpis_from_db(tasks)
74 | for k, v in res.items():
75 | print(k, v)
76 |
77 |
78 |
--------------------------------------------------------------------------------
/analysis_kpis.py:
--------------------------------------------------------------------------------
1 | #!/bin/env python
2 | # -*- coding: utf-8 -*-
3 | #encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
4 | """
5 | analysis the benchmark model kpi
6 | """
7 | import numpy as np
8 | from utils import log
9 |
10 | class AnalysisKpiData(object):
11 | """
12 | Analysis_kpi_data
13 | """
14 |
15 | def __init__(self, kpis_status, kpis_list):
16 | self.kpis_list = kpis_list
17 | self.kpis_status = kpis_status
18 | self.analysis_result = {}
19 | self.diff_thre = 0.05
20 |
21 | def analysis_data(self):
22 | """
23 | analysis the benchmark data
24 | """
25 | kpi_names = self.kpis_list[0].keys()
26 | for name in kpi_names:
27 | self.analysis_result[name] = {}
28 | for kpis in self.kpis_list:
29 | for kpi_name in kpis.keys():
30 | if 'kpi_data' not in self.analysis_result[kpi_name].keys():
31 | self.analysis_result[kpi_name]['kpi_data'] = []
32 | self.analysis_result[kpi_name]['kpi_data'].append(kpis[
33 | kpi_name][-1])
34 | for name in kpi_names:
35 | np_data = np.array(self.analysis_result[name]['kpi_data'])
36 | self.analysis_result[name]['min'] = np_data.min()
37 | self.analysis_result[name]['max'] = np_data.max()
38 | self.analysis_result[name]['mean'] = np_data.mean()
39 | self.analysis_result[name]['median'] = np.median(np_data)
40 | self.analysis_result[name]['var'] = np_data.var()
41 | self.analysis_result[name]['std'] = np_data.std()
42 | self.analysis_result[name]['change_rate'] = np_data.std(
43 | ) / np_data.mean()
44 |
45 | def print_result(self):
46 | """
47 | print analysis result
48 | """
49 | suc = True
50 | for kpi_name in self.analysis_result.keys():
51 | is_actived = self.kpis_status[kpi_name]
52 | log.info('kpi: %s, actived: %s' % (kpi_name, is_actived))
53 | if is_actived:
54 | if self.analysis_result[kpi_name]['change_rate'] > self.diff_thre:
55 | suc = False
56 | log.warn("NOTE kpi: %s change_rate too bigger!" % kpi_name)
57 | log.info('min:%s max:%s mean:%s median:%s std:%s change_rate:%s' %
58 | (self.analysis_result[kpi_name]['min'],
59 | self.analysis_result[kpi_name]['max'],
60 | self.analysis_result[kpi_name]['mean'],
61 | self.analysis_result[kpi_name]['median'],
62 | self.analysis_result[kpi_name]['std'],
63 | self.analysis_result[kpi_name]['change_rate']))
64 | if not suc:
65 | raise Exception("some kpi's change_rate has bigger then thre")
66 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/analysis_kpis.py:
--------------------------------------------------------------------------------
1 | #!/bin/env python
2 | # -*- coding: utf-8 -*-
3 | #encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
4 | """
5 | analysis the benchmark model kpi
6 | """
7 | import numpy as np
8 | from utils import log
9 |
10 | class AnalysisKpiData(object):
11 | """
12 | Analysis_kpi_data
13 | """
14 |
15 | def __init__(self, kpis_status, kpis_list):
16 | self.kpis_list = kpis_list
17 | self.kpis_status = kpis_status
18 | self.analysis_result = {}
19 | self.diff_thre = 0.05
20 |
21 | def analysis_data(self):
22 | """
23 | analysis the benchmark data
24 | """
25 | kpi_names = self.kpis_list[0].keys()
26 | for name in kpi_names:
27 | self.analysis_result[name] = {}
28 | for kpis in self.kpis_list:
29 | for kpi_name in kpis.keys():
30 | if 'kpi_data' not in self.analysis_result[kpi_name].keys():
31 | self.analysis_result[kpi_name]['kpi_data'] = []
32 | self.analysis_result[kpi_name]['kpi_data'].append(kpis[
33 | kpi_name][-1])
34 | for name in kpi_names:
35 | np_data = np.array(self.analysis_result[name]['kpi_data'])
36 | self.analysis_result[name]['min'] = np_data.min()
37 | self.analysis_result[name]['max'] = np_data.max()
38 | self.analysis_result[name]['mean'] = np_data.mean()
39 | self.analysis_result[name]['median'] = np.median(np_data)
40 | self.analysis_result[name]['var'] = np_data.var()
41 | self.analysis_result[name]['std'] = np_data.std()
42 | self.analysis_result[name]['change_rate'] = np_data.std(
43 | ) / np_data.mean()
44 |
45 | def print_result(self):
46 | """
47 | print analysis result
48 | """
49 | suc = True
50 | for kpi_name in self.analysis_result.keys():
51 | is_actived = self.kpis_status[kpi_name]
52 | log.info('kpi: %s, actived: %s' % (kpi_name, is_actived))
53 | if is_actived:
54 | if self.analysis_result[kpi_name]['change_rate'] > self.diff_thre:
55 | suc = False
56 | log.warn("NOTE kpi: %s change_rate too bigger!" % kpi_name)
57 | log.info('min:%s max:%s mean:%s median:%s std:%s change_rate:%s' %
58 | (self.analysis_result[kpi_name]['min'],
59 | self.analysis_result[kpi_name]['max'],
60 | self.analysis_result[kpi_name]['mean'],
61 | self.analysis_result[kpi_name]['median'],
62 | self.analysis_result[kpi_name]['std'],
63 | self.analysis_result[kpi_name]['change_rate']))
64 | if not suc:
65 | raise Exception("some kpi's change_rate has bigger then thre")
66 |
--------------------------------------------------------------------------------
/tools/teamcity_api.py:
--------------------------------------------------------------------------------
1 | import re
2 | import sys
3 | import base64
4 | import httplib
5 | from http import Http
6 | #from bs4 import BeautifulSoup
7 | from xml.dom.minidom import parseString
8 | import time
9 |
10 | http_conn = Http("http://ce.paddlepaddle.org:8080")
11 |
12 | max_retry = 3
13 | build_dict = {}
14 |
15 |
16 | def show_build(build_id):
17 | """GET details build info of the build id"""
18 |
19 | auth = base64.b64encode('guest' + ':' + 'guest')
20 | headers = {"Authorization": "Basic " + auth}
21 | url = "/app/rest/builds/%s" % build_id
22 | for cnt in range(max_retry):
23 | try:
24 | status, resp, _ = http_conn.access("GET", url, headers=headers)
25 | if status == 200:
26 | break
27 | except Exception as e:
28 | if cnt == max_retry - 1:
29 | raise Exception("list builds fail, error: %s " % e)
30 | else:
31 | time.sleep(1)
32 |
33 | b = parseString(resp)
34 | builds = b.getElementsByTagName('build')
35 | for build in builds:
36 | status = build.getElementsByTagName('statusText')[0]
37 | date = build.getElementsByTagName('startDate')[0]
38 | revisions = build.getElementsByTagName('revisions')
39 | res = ''
40 | for revision in revisions:
41 | mm = revision.getElementsByTagName('revision')[0]
42 | res = mm.getAttribute('version')
43 |
44 | build_dict[res] = {'id':build.getAttribute('id'),\
45 | 'weburl':build.getAttribute('webUrl'),\
46 | 'status':status.childNodes[0].data,\
47 | 'date':date.childNodes[0].data,\
48 | 'version': res}
49 |
50 |
51 | def list_build():
52 | """GET all abstract build info"""
53 | build_list = []
54 | auth = base64.b64encode('guest' + ':' + 'guest')
55 | headers = {"Authorization": "Basic " + auth}
56 | #assume no more then 60 builds one week.
57 | url = "/app/rest/builds?buildType=PaddleCe_CEBuild&count=60"
58 | #list build should ensure suc.
59 | for cnt in range(2 * max_retry):
60 | try:
61 | status, resp, _ = http_conn.access("GET", url, headers=headers)
62 | if status == 200:
63 | break
64 | except Exception as e:
65 | if cnt == max_retry - 1:
66 | raise Exception("show builds fail, error: %s " % e)
67 | else:
68 | time.sleep(1)
69 |
70 | b = parseString(resp)
71 | builds = b.getElementsByTagName('build')
72 | for build in builds:
73 | build_list.append(build.getAttribute('id'))
74 | return (build_list)
75 |
76 |
77 | if __name__ == '__main__':
78 | builds = list_build()
79 | print len(builds)
80 |
81 | for build in builds:
82 | try:
83 | show_build(build)
84 | time.sleep(0.5)
85 | except Exception as e:
86 | print("build failed, %s" % e)
87 | sys.exit(1)
88 | print(build_dict)
89 |
90 | import json
91 | with open('teamcity.json', 'w') as json_file:
92 | json_file.write(json.dumps(build_dict))
93 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/tools/teamcity_api.py:
--------------------------------------------------------------------------------
1 | import re
2 | import sys
3 | import base64
4 | import httplib
5 | from http import Http
6 | #from bs4 import BeautifulSoup
7 | from xml.dom.minidom import parseString
8 | import time
9 |
10 | http_conn = Http("http://ce.paddlepaddle.org:8080")
11 |
12 | max_retry = 3
13 | build_dict = {}
14 |
15 |
16 | def show_build(build_id):
17 | """GET details build info of the build id"""
18 |
19 | auth = base64.b64encode('guest' + ':' + 'guest')
20 | headers = {"Authorization": "Basic " + auth}
21 | url = "/app/rest/builds/%s" % build_id
22 | for cnt in range(max_retry):
23 | try:
24 | status, resp, _ = http_conn.access("GET", url, headers=headers)
25 | if status == 200:
26 | break
27 | except Exception as e:
28 | if cnt == max_retry - 1:
29 | raise Exception("list builds fail, error: %s " % e)
30 | else:
31 | time.sleep(1)
32 |
33 | b = parseString(resp)
34 | builds = b.getElementsByTagName('build')
35 | for build in builds:
36 | status = build.getElementsByTagName('statusText')[0]
37 | date = build.getElementsByTagName('startDate')[0]
38 | revisions = build.getElementsByTagName('revisions')
39 | res = ''
40 | for revision in revisions:
41 | mm = revision.getElementsByTagName('revision')[0]
42 | res = mm.getAttribute('version')
43 |
44 | build_dict[res] = {'id':build.getAttribute('id'),\
45 | 'weburl':build.getAttribute('webUrl'),\
46 | 'status':status.childNodes[0].data,\
47 | 'date':date.childNodes[0].data,\
48 | 'version': res}
49 |
50 |
51 | def list_build():
52 | """GET all abstract build info"""
53 | build_list = []
54 | auth = base64.b64encode('guest' + ':' + 'guest')
55 | headers = {"Authorization": "Basic " + auth}
56 | #assume no more then 60 builds one week.
57 | url = "/app/rest/builds?buildType=PaddleCe_CEBuild&count=60"
58 | #list build should ensure suc.
59 | for cnt in range(2 * max_retry):
60 | try:
61 | status, resp, _ = http_conn.access("GET", url, headers=headers)
62 | if status == 200:
63 | break
64 | except Exception as e:
65 | if cnt == max_retry - 1:
66 | raise Exception("show builds fail, error: %s " % e)
67 | else:
68 | time.sleep(1)
69 |
70 | b = parseString(resp)
71 | builds = b.getElementsByTagName('build')
72 | for build in builds:
73 | build_list.append(build.getAttribute('id'))
74 | return (build_list)
75 |
76 |
77 | if __name__ == '__main__':
78 | builds = list_build()
79 | print len(builds)
80 |
81 | for build in builds:
82 | try:
83 | show_build(build)
84 | time.sleep(0.5)
85 | except Exception as e:
86 | print("build failed, %s" % e)
87 | sys.exit(1)
88 | print(build_dict)
89 |
90 | import json
91 | with open('teamcity.json', 'w') as json_file:
92 | json_file.write(json.dumps(build_dict))
93 |
--------------------------------------------------------------------------------
/web/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | from flask import Flask, request, redirect, send_from_directory, render_template_string
4 | from flask.ext.cache import Cache
5 | sys.path.append('..')
6 | from db import MongoDB
7 | from datetime import datetime, timedelta
8 | import _config
9 | import json
10 | import pprint
11 | from kpi import Kpi
12 | from view import *
13 | from api import *
14 | import pyecharts
15 |
16 | SERVER_PATH = os.path.abspath(os.path.dirname(sys.argv[0]))
17 | STATIC_DIR = os.path.join(SERVER_PATH, "static")
18 | TEMPLATE_DIR = os.path.join(SERVER_PATH, "template")
19 |
20 | app = Flask(
21 | "modelce", static_url_path=STATIC_DIR, template_folder=TEMPLATE_DIR)
22 | cache = Cache(
23 | app, _config={'CACHE_TYPE': 'filesystem',
24 | 'CACHE_DIR': './_cache'})
25 | db = MongoDB(_config.db_name, _config.db_host, _config.db_port)
26 |
27 |
28 | @app.route('/')
29 | @cache.cached(timeout=120)
30 | def index():
31 | '''
32 | Show the status, the contents:
33 |
34 | a list of commitids and their status(passed or not, the info)
35 | '''
36 | page, snips = build_index_page()
37 | commits = CommitRecord.get_all()
38 | latest_commit = commits[-1].commit
39 | logics = merge_logics(snips[0].logic(), snips[1].logic(latest_commit))
40 | print('commits', snips[0].logic())
41 | return render_template_string(page, **logics)
42 |
43 |
44 | @app.route('/commit/details', methods=["GET"])
45 | #@cache.cached(timeout=5)
46 | def commit_details():
47 | commit = request.args.get('commit')
48 |
49 | page, snips = build_commit_detail_page()
50 |
51 | logics = snips[0].logic(commit)
52 | return render_template_string(page, **logics)
53 |
54 |
55 | @app.route('/commit/compare', methods=["GET"])
56 | #@cache.cached(timeout=5)
57 | def commit_compare():
58 | if 'cur' not in request.args:
59 | commits = CommitRecord.get_all()
60 | latest_commit = commits[-1]
61 | success_commits = [v for v in filter(lambda r: r.passed, commits)]
62 | latest_success_commit = success_commits[
63 | -1] if not latest_commit.passed else success_commits[-2]
64 | cur = latest_commit.commit
65 | base = latest_success_commit.commit
66 | else:
67 | cur = request.args.get('cur')
68 | base = request.args.get('base')
69 |
70 | page, (select_snip, result_snip) = build_compare_page()
71 | logics = merge_logics(select_snip.logic(), result_snip.logic(cur, base))
72 | return render_template_string(page, **logics)
73 |
74 | #@cache.cached(timeout=120)
75 | @app.route('/commit/draw_scalar', methods=["GET"])
76 | def draw_scalar():
77 | task_name = request.args['task']
78 |
79 | page, (scalar_snap,) = build_scalar_page(task_name)
80 | logics = merge_logics(scalar_snap.logic())
81 | return render_template_string(page, **logics)
82 |
83 |
84 | if __name__ == '__main__':
85 | import argparse
86 |
87 | parser = argparse.ArgumentParser(description='CE Web')
88 | parser.add_argument('--port', type=int, default=80, required=False,
89 | help='web service port')
90 |
91 | parser.add_argument('--host', type=str, default='0.0.0.0', required=False,
92 | help='web service host')
93 | args = parser.parse_args()
94 | app.run(debug=True, host=args.host, port=args.port, threaded=True)
95 |
--------------------------------------------------------------------------------
/eva.xsh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env xonsh
2 | $RAISE_SUBPROC_ERROR = True
3 | $XONSH_SHOW_TRACEBACK = True
4 |
5 | import sys; sys.path.insert(0, '')
6 | import subprocess
7 | import _config
8 | from _config import pjoin
9 | from utils import PathRecover, log
10 | import os
11 | import argparse
12 | from analysis_kpis import AnalysisKpiData
13 |
14 | $ceroot=_config.workspace
15 | os.environ['ceroot'] = _config.workspace
16 |
17 | def parse_args():
18 | parser= argparse.ArgumentParser("model benchmark")
19 | parser.add_argument(
20 | '--task_dir',
21 | type=str,
22 | help='The model dir.')
23 | parser.add_argument(
24 | '--times', type=int, default=5, help='The run times')
25 | args = parser.parse_args()
26 | return args
27 |
28 | def get_changed_tasks(args):
29 | tasks = []
30 | print (args.task_dir, args.times)
31 | if args.task_dir:
32 | tasks = args.task_dir.split()
33 | return tasks
34 | cd @(_config.baseline_path)
35 | out = $(git diff master | grep "diff --git")
36 | out = out.strip()
37 | for item in out.split('\n'):
38 | task = item.split()[3].split('/')[1]
39 | if task not in tasks:
40 | tasks.append(task)
41 | log.warn("changed tasks: %s" % tasks)
42 | return tasks
43 |
44 |
45 | def main():
46 | args = parse_args()
47 | suc = True
48 | fail_models = []
49 | tasks = get_changed_tasks(args)
50 | times = args.times
51 | for task in tasks:
52 | try:
53 | kpis_status, kpis_list = run_task(task, times)
54 | print(kpis_list)
55 | ana = AnalysisKpiData(kpis_status, kpis_list)
56 | ana.analysis_data()
57 | ana.print_result()
58 | except Exception as e:
59 | print (e)
60 | suc = False
61 | fail_models.append(task)
62 | if suc:
63 | print("all changed models success!")
64 | else:
65 | log.warn("failed models:", fail_models)
66 | sys.exit(1)
67 |
68 |
69 | def run_task(task_name, times):
70 | '''
71 | Run the model task.
72 | '''
73 | task_dir = pjoin(_config.baseline_path, task_name)
74 | log.warn('run model', task_name)
75 | cd @(_config.workspace)
76 | env = {}
77 |
78 | try:
79 | exec('from tasks.%s.continuous_evaluation import tracking_kpis'
80 | % task_name, env)
81 | log.info("import from continuous_evaluation suc.")
82 | except Exception as e:
83 | log.warn("import failed. %s" % e)
84 | exec('from tasks.%s._ce import tracking_kpis' % task_name, env)
85 | log.info("import from _ce suc")
86 | tracking_kpis = env['tracking_kpis']
87 |
88 | kpis_status = get_kpis_status(tracking_kpis)
89 |
90 | need_mul_times = False
91 | for actived in kpis_status.values():
92 | if actived:
93 | need_mul_times = True
94 | break
95 | if not need_mul_times:
96 | times = 1
97 |
98 | kpis_list = []
99 | for i in range(times):
100 | with PathRecover():
101 | cd @(task_dir)
102 | ./run.xsh
103 |
104 | cd @(_config.workspace)
105 |
106 | kpis = {}
107 | for kpi in tracking_kpis:
108 | kpi.root = task_dir
109 | kpis[kpi.name] = kpi.cur_data
110 | kpis_list.append(kpis)
111 | return kpis_status, kpis_list
112 |
113 |
114 | def get_kpis_status(tracking_kpis):
115 | kpis_status = {}
116 | for kpi in tracking_kpis:
117 | kpis_status[kpi.name] = kpi.actived
118 | print (kpis_status)
119 | return kpis_status
120 |
121 |
122 | main()
123 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/eva.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | RAISE_SUBPROC_ERROR = True
3 | XONSH_SHOW_TRACEBACK = True
4 |
5 | import sys; sys.path.insert(0, '')
6 | import subprocess
7 | import _config
8 | from _config import pjoin
9 | from utils import PathRecover, log
10 | import os
11 | import argparse
12 | from analysis_kpis import AnalysisKpiData
13 |
14 | ceroot=_config.workspace
15 | os.environ['ceroot'] = _config.workspace
16 |
17 |
18 | def parse_args():
19 | parser= argparse.ArgumentParser("model benchmark")
20 | parser.add_argument(
21 | '--task_dir',
22 | type=str,
23 | help='The model dir.')
24 | parser.add_argument(
25 | '--times', type=int, default=5, help='The run times')
26 | args = parser.parse_args()
27 | return args
28 |
29 |
30 | def get_changed_tasks(args):
31 | tasks = []
32 | print (args.task_dir, args.times)
33 | if args.task_dir:
34 | tasks = args.task_dir.split()
35 | return tasks
36 | os.chdir(_config.baseline_path)
37 | cmd = 'git diff master | grep "diff --git"'
38 | (status, out) = commands.getstatusoutput(cmd)
39 | out = out.strip()
40 | for item in out.split('\n'):
41 | task = item.split()[3].split('/')[1]
42 | if task not in tasks:
43 | tasks.append(task)
44 | log.warn("changed tasks: %s" % tasks)
45 | return tasks
46 |
47 |
48 | def main():
49 | args = parse_args()
50 | suc = True
51 | fail_models = []
52 | tasks = get_changed_tasks(args)
53 | times = args.times
54 | for task in tasks:
55 | try:
56 | kpis_status, kpis_list = run_task(task, times)
57 | print(kpis_list)
58 | ana = AnalysisKpiData(kpis_status, kpis_list)
59 | ana.analysis_data()
60 | ana.print_result()
61 | except Exception as e:
62 | print (e)
63 | suc = False
64 | fail_models.append(task)
65 | if suc:
66 | print("all changed models success!")
67 | else:
68 | log.warn("failed models:", fail_models)
69 | sys.exit(1)
70 |
71 |
72 | def run_task(task_name, times):
73 | '''
74 | Run the model task.
75 | '''
76 | task_dir = pjoin(_config.baseline_path, task_name)
77 | log.warn('run model', task_name)
78 | os.chdir(_config.workspace)
79 | env = {}
80 |
81 | try:
82 | exec('from tasks.%s.continuous_evaluation import tracking_kpis'
83 | % task_name, env)
84 | log.info("import from continuous_evaluation suc.")
85 | except Exception as e:
86 | log.warn("import failed. %s" % e)
87 | exec('from tasks.%s._ce import tracking_kpis' % task_name, env)
88 | log.info("import from _ce suc")
89 | tracking_kpis = env['tracking_kpis']
90 |
91 | kpis_status = get_kpis_status(tracking_kpis)
92 |
93 | need_mul_times = False
94 | for actived in kpis_status.values():
95 | if actived:
96 | need_mul_times = True
97 | break
98 | if not need_mul_times:
99 | times = 1
100 |
101 | kpis_list = []
102 | for i in range(times):
103 | with PathRecover():
104 | os.chdir(task_dir)
105 | cmd = "./run.xsh"
106 | os.system(cmd)
107 |
108 | os.chdir(_config.workspace)
109 |
110 | kpis = {}
111 | for kpi in tracking_kpis:
112 | kpi.root = task_dir
113 | kpis[kpi.name] = kpi.cur_data
114 | kpis_list.append(kpis)
115 | return kpis_status, kpis_list
116 |
117 |
118 | def get_kpis_status(tracking_kpis):
119 | kpis_status = {}
120 | for kpi in tracking_kpis:
121 | kpis_status[kpi.name] = kpi.actived
122 | print (kpis_status)
123 | return kpis_status
124 |
125 |
126 | main()
127 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/web/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | from flask import Flask, request, redirect, send_from_directory, render_template_string
4 | from flask_cache import Cache
5 | sys.path.append('..')
6 | from db import MongoDB
7 | from datetime import datetime, timedelta
8 | import _config
9 | import json
10 | import pprint
11 | from kpi import Kpi
12 | from view import *
13 | from api import *
14 | import pyecharts
15 |
16 | SERVER_PATH = os.path.abspath(os.path.dirname(sys.argv[0]))
17 | STATIC_DIR = os.path.join(SERVER_PATH, "static")
18 | TEMPLATE_DIR = os.path.join(SERVER_PATH, "template")
19 |
20 | app = Flask(
21 | "modelce", static_url_path=STATIC_DIR, template_folder=TEMPLATE_DIR)
22 | cache = Cache(
23 | app, config={'CACHE_TYPE': 'filesystem',
24 | 'CACHE_DIR': './_cache'})
25 | db = MongoDB(_config.db_name, _config.db_host, _config.db_port)
26 |
27 |
28 | @app.route('/')
29 | @cache.cached(timeout=120)
30 | def index():
31 | '''
32 | build index page
33 | '''
34 | page, snips = build_index_page()
35 | tables = CommitRecord.get_all_tables()
36 | logics = merge_logics(snips[0].logic())
37 | return render_template_string(page, **logics)
38 |
39 | @app.route('/main', methods=["GET"])
40 | #@cache.cached(timeout=120)
41 | def main():
42 | '''
43 | Show the status, the contents:
44 |
45 | a list of commitids and their status(passed or not, the info)
46 | '''
47 | table_name = request.args.get('table')
48 | page, snips = build_main_page(table_name)
49 | commits = CommitRecord.get_all(table_name)
50 | latest_commit = commits[-1].commit
51 | logics = merge_logics(snips[0].logic(table_name), snips[1].logic(table_name, latest_commit))
52 | #print('commits', snips[0].logic(table_name))
53 | return render_template_string(page, **logics)
54 |
55 |
56 | @app.route('/commit/details', methods=["GET"])
57 | #@cache.cached(timeout=5)
58 | def commit_details():
59 | table_name = request.args.get('table')
60 | commit = request.args.get('commit')
61 |
62 | page, snips = build_commit_detail_page(table_name)
63 |
64 | logics = snips[0].logic(table_name, commit)
65 | return render_template_string(page, **logics)
66 |
67 |
68 | @app.route('/commit/compare', methods=["GET"])
69 | #@cache.cached(timeout=5)
70 | def commit_compare():
71 | if 'cur' not in request.args:
72 | commits = CommitRecord.get_all()
73 | latest_commit = commits[-1]
74 | success_commits = [v for v in filter(lambda r: r.passed, commits)]
75 | latest_success_commit = success_commits[
76 | -1] if not latest_commit.passed else success_commits[-2]
77 | cur = latest_commit.commit
78 | base = latest_success_commit.commit
79 | else:
80 | cur = request.args.get('cur')
81 | base = request.args.get('base')
82 |
83 | page, (select_snip, result_snip) = build_compare_page()
84 | logics = merge_logics(select_snip.logic(), result_snip.logic(cur, base))
85 | return render_template_string(page, **logics)
86 |
87 | #@cache.cached(timeout=120)
88 | @app.route('/commit/draw_scalar', methods=["GET"])
89 | def draw_scalar():
90 | table_name = request.args['table']
91 | task_name = request.args['task']
92 |
93 | page, (scalar_snap,) = build_scalar_page(task_name)
94 | logics = merge_logics(scalar_snap.logic(table_name))
95 | return render_template_string(page, **logics)
96 |
97 |
98 | if __name__ == '__main__':
99 | import argparse
100 |
101 | parser = argparse.ArgumentParser(description='CE Web')
102 | parser.add_argument('--port', type=int, default=80, required=False,
103 | help='web service port')
104 |
105 | parser.add_argument('--host', type=str, default='0.0.0.0', required=False,
106 | help='web service host')
107 | args = parser.parse_args()
108 | app.run(debug=True, host=args.host, port=args.port, threaded=True)
109 |
--------------------------------------------------------------------------------
/tools/gen_report.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #coding: utf-8
3 | ################################################################################
4 | #
5 | # Copyright (c) 2017 Baidu.com, Inc. All Rights Reserved
6 | #
7 | ################################################################################
8 | """
9 | This class is used to run remote command
10 |
11 | Authors: guochaorong(guochaorong@baidu.com)
12 | Date: 2018/07/11
13 | """
14 |
15 | import sys
16 | sys.path.append('pypage')
17 | sys.path.append('..')
18 | import _config
19 | import json
20 | import time
21 | from db import MongoDB
22 | from datetime import datetime, timedelta
23 | from kpi import Kpi
24 |
25 | import sys
26 | db = MongoDB(_config.db_name)
27 | from web.api import CommitRecord
28 | from html import Html
29 | from datetime import datetime
30 |
31 | period = 7
32 | today = datetime.now().strftime("%Y%m%d")
33 |
34 |
35 | def gettimestamp(dd_str):
36 | timeArray = time.strptime(dd_str + " 00:00:00", "%Y-%m-%d %H:%M:%S")
37 | timeStamp = int(time.mktime(timeArray))
38 | return timeStamp
39 |
40 |
41 | def get_target_commits(records):
42 | """get commits for this week"""
43 | need_commit = []
44 | for record in records:
45 | print(record.date)
46 | begin = datetime.now() - timedelta(days=period)
47 | print(begin)
48 | if record.date > begin:
49 | need_commit.append(record)
50 | return need_commit
51 |
52 |
53 | def get_all_tasks(need_commit):
54 | """get details task info of this week"""
55 | print(need_commit)
56 | all_tasks = {}
57 | for commit in need_commit:
58 | print(commit.commit)
59 | print(commit.date)
60 | tasks = CommitRecord.get_tasks(commit.commit)
61 | for task, values in tasks.items():
62 | if not task in all_tasks.keys():
63 | all_tasks[task] = {'times': 0, 'commits': [], 'date': []}
64 |
65 | if not values['passed']:
66 | all_tasks[task]['times'] += 1
67 | all_tasks[task]['date'].append(commit.date.strftime("%Y-%m-%d"))
68 | all_tasks[task]['commits'].append(commit.commit)
69 | print(all_tasks)
70 | return all_tasks
71 |
72 |
73 | def gen_html(all_tasks, build_dict, sums):
74 | """gen html for summary and model infos section"""
75 | hh = Html()
76 | with open('wiki.txt', 'r') as f:
77 | duty = f.read()
78 | failed_commit = get_failed_commit(all_tasks)
79 | suc = sums - len(failed_commit)
80 | hh.html_create(period, duty, sums, suc)
81 |
82 | for task, values in all_tasks.items():
83 | print(task, values)
84 | res = ''
85 | if values['times'] == 0:
86 | res += '''result: \tpass\t
'''
87 | else:
88 | cm_str = ''
89 | for cm in values['commits']:
90 | if cm in build_dict.keys():
91 | cm_str += build_dict[cm]['weburl'] + ' '
92 | status = "unsolved"
93 | if values['date'][-1] != today:
94 | status = "solved"
95 | res += '''result: failed
\tjobs: %s
status: %s\t
''' \
96 | %(cm_str, status)
97 | hh.html_add_param(task, res, task)
98 |
99 |
100 | def get_failed_commit(all_tasks):
101 | """get failed commits for this week"""
102 | failed_commit = []
103 | for task, values in all_tasks.items():
104 | for cm in values['commits']:
105 | if cm not in failed_commit:
106 | failed_commit.append(cm)
107 | return failed_commit
108 |
109 |
110 | if __name__ == '__main__':
111 | import traceback
112 | try:
113 | with open('teamcity.json') as json_file:
114 | build_dict = json.load(json_file)
115 |
116 | records = CommitRecord.get_all()
117 | need_commits = get_target_commits(records)
118 | all_tasks = get_all_tasks(need_commits)
119 | gen_html(all_tasks, build_dict, len(need_commits))
120 | except Exception as e:
121 | print(e)
122 | print(traceback.format_exc())
123 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/tools/gen_report.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #coding: utf-8
3 | ################################################################################
4 | #
5 | # Copyright (c) 2017 Baidu.com, Inc. All Rights Reserved
6 | #
7 | ################################################################################
8 | """
9 | This class is used to run remote command
10 |
11 | Authors: guochaorong(guochaorong@baidu.com)
12 | Date: 2018/07/11
13 | """
14 |
15 | import sys
16 | sys.path.append('pypage')
17 | sys.path.append('..')
18 | import _config
19 | import json
20 | import time
21 | from db import MongoDB
22 | from datetime import datetime, timedelta
23 | from kpi import Kpi
24 |
25 | import sys
26 | db = MongoDB(_config.db_name)
27 | from web.api import CommitRecord
28 | from html import Html
29 | from datetime import datetime
30 |
31 | period = 7
32 | today = datetime.now().strftime("%Y%m%d")
33 |
34 |
35 | def gettimestamp(dd_str):
36 | timeArray = time.strptime(dd_str + " 00:00:00", "%Y-%m-%d %H:%M:%S")
37 | timeStamp = int(time.mktime(timeArray))
38 | return timeStamp
39 |
40 |
41 | def get_target_commits(records):
42 | """get commits for this week"""
43 | need_commit = []
44 | for record in records:
45 | print(record.date)
46 | begin = datetime.now() - timedelta(days=period)
47 | print(begin)
48 | if record.date > begin:
49 | need_commit.append(record)
50 | return need_commit
51 |
52 |
53 | def get_all_tasks(need_commit):
54 | """get details task info of this week"""
55 | print(need_commit)
56 | all_tasks = {}
57 | for commit in need_commit:
58 | print(commit.commit)
59 | print(commit.date)
60 | tasks = CommitRecord.get_tasks(commit.commit)
61 | for task, values in tasks.items():
62 | if not task in all_tasks.keys():
63 | all_tasks[task] = {'times': 0, 'commits': [], 'date': []}
64 |
65 | if not values['passed']:
66 | all_tasks[task]['times'] += 1
67 | all_tasks[task]['date'].append(commit.date.strftime("%Y-%m-%d"))
68 | all_tasks[task]['commits'].append(commit.commit)
69 | print(all_tasks)
70 | return all_tasks
71 |
72 |
73 | def gen_html(all_tasks, build_dict, sums):
74 | """gen html for summary and model infos section"""
75 | hh = Html()
76 | with open('wiki.txt', 'r') as f:
77 | duty = f.read()
78 | failed_commit = get_failed_commit(all_tasks)
79 | suc = sums - len(failed_commit)
80 | hh.html_create(period, duty, sums, suc)
81 |
82 | for task, values in all_tasks.items():
83 | print(task, values)
84 | res = ''
85 | if values['times'] == 0:
86 | res += '''result: \tpass\t
'''
87 | else:
88 | cm_str = ''
89 | for cm in values['commits']:
90 | if cm in build_dict.keys():
91 | cm_str += build_dict[cm]['weburl'] + ' '
92 | status = "unsolved"
93 | if values['date'][-1] != today:
94 | status = "solved"
95 | res += '''result: failed
\tjobs: %s
status: %s\t
''' \
96 | %(cm_str, status)
97 | hh.html_add_param(task, res, task)
98 |
99 |
100 | def get_failed_commit(all_tasks):
101 | """get failed commits for this week"""
102 | failed_commit = []
103 | for task, values in all_tasks.items():
104 | for cm in values['commits']:
105 | if cm not in failed_commit:
106 | failed_commit.append(cm)
107 | return failed_commit
108 |
109 |
110 | if __name__ == '__main__':
111 | import traceback
112 | try:
113 | with open('teamcity.json') as json_file:
114 | build_dict = json.load(json_file)
115 |
116 | records = CommitRecord.get_all()
117 | need_commits = get_target_commits(records)
118 | all_tasks = get_all_tasks(need_commits)
119 | gen_html(all_tasks, build_dict, len(need_commits))
120 | except Exception as e:
121 | print(e)
122 | print(traceback.format_exc())
123 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/Dockerfile:
--------------------------------------------------------------------------------
1 | # A image for building paddle binaries
2 | # Use cuda devel base image for both cpu and gpu environment
3 |
4 | # When you modify it, please be aware of cudnn-runtime version
5 | # and libcudnn.so.x in paddle/scripts/docker/build.sh
6 | # Because we use cuda9, otherwise bounch of this can be replaced with
7 | # paddlepaddle/paddle:latest-dev
8 | FROM nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04
9 | MAINTAINER PaddlePaddle Authors
10 |
11 | ARG UBUNTU_MIRROR
12 | RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com/ubuntu#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi'
13 |
14 | # ENV variables
15 | ARG WITH_GPU
16 | ARG WITH_AVX
17 | ARG WITH_DOC
18 |
19 | ENV WOBOQ OFF
20 | ENV WITH_GPU=${WITH_GPU:-ON}
21 | ENV WITH_AVX=${WITH_AVX:-ON}
22 | ENV WITH_DOC=${WITH_DOC:-OFF}
23 |
24 | ENV HOME /root
25 | # Add bash enhancements
26 | COPY ./paddle/scripts/docker/root/ /root/
27 |
28 | RUN apt-get update && \
29 | apt-get install -y --allow-downgrades \
30 | git python-pip python-dev openssh-server bison \
31 | libnccl2 libnccl-dev \
32 | wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \
33 | curl sed grep graphviz libjpeg-dev zlib1g-dev \
34 | python-matplotlib gcc-4.8 g++-4.8 \
35 | automake locales clang-format swig doxygen cmake \
36 | liblapack-dev liblapacke-dev \
37 | clang-3.8 llvm-3.8 libclang-3.8-dev \
38 | net-tools libtool && \
39 | apt-get clean -y
40 |
41 | # Install Go and glide
42 | RUN wget -qO- https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \
43 | tar -xz -C /usr/local && \
44 | mkdir /root/gopath && \
45 | mkdir /root/gopath/bin && \
46 | mkdir /root/gopath/src
47 | ENV GOROOT=/usr/local/go GOPATH=/root/gopath
48 | # should not be in the same line with GOROOT definition, otherwise docker build could not find GOROOT.
49 | ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin
50 | # install glide
51 | RUN curl -s -q https://glide.sh/get | sh
52 |
53 | # Install TensorRT
54 | # following TensorRT.tar.gz is not the default official one, we do two miny changes:
55 | # 1. Remove the unnecessary files to make the library small. TensorRT.tar.gz only contains include and lib now,
56 | # and its size is only one-third of the official one.
57 | # 2. Manually add ~IPluginFactory() in IPluginFactory class of NvInfer.h, otherwise, it couldn't work in paddle.
58 | # See https://github.com/PaddlePaddle/Paddle/issues/10129 for details.
59 | RUN wget -qO- http://paddlepaddledeps.bj.bcebos.com/TensorRT-4.0.0.3.Ubuntu-16.04.4.x86_64-gnu.cuda-8.0.cudnn7.0.tar.gz | \
60 | tar -xz -C /usr/local && \
61 | cp -rf /usr/local/TensorRT/include /usr && \
62 | cp -rf /usr/local/TensorRT/lib /usr
63 |
64 | # git credential to skip password typing
65 | RUN git config --global credential.helper store
66 |
67 | # Fix locales to en_US.UTF-8
68 | RUN localedef -i en_US -f UTF-8 en_US.UTF-8
69 |
70 | # FIXME: due to temporary ipykernel dependency issue, specify ipykernel jupyter
71 | # version util jupyter fixes this issue.
72 |
73 | # specify sphinx version as 1.5.6 and remove -U option for [pip install -U
74 | # sphinx-rtd-theme] since -U option will cause sphinx being updated to newest
75 | # version(1.7.1 for now), which causes building documentation failed.
76 | RUN pip install --upgrade pip==9.0.3 && \
77 | pip install -U wheel && \
78 | pip install -U docopt PyYAML sphinx==1.5.6 && \
79 | pip install sphinx-rtd-theme==0.1.9 recommonmark
80 |
81 | RUN pip install pre-commit 'ipython==5.3.0' && \
82 | pip install 'ipykernel==4.6.0' 'jupyter==1.0.0' && \
83 | pip install opencv-python
84 |
85 | COPY ./python/requirements.txt /root/
86 | RUN pip install -r /root/requirements.txt
87 |
88 | # To fix https://github.com/PaddlePaddle/Paddle/issues/1954, we use
89 | # the solution in https://urllib3.readthedocs.io/en/latest/user-guide.html#ssl-py2
90 | RUN apt-get install -y libssl-dev libffi-dev
91 | RUN pip install certifi urllib3[secure]
92 |
93 |
94 | # Install woboq_codebrowser to /woboq
95 | RUN git clone https://github.com/woboq/woboq_codebrowser /woboq && \
96 | (cd /woboq \
97 | cmake -DLLVM_CONFIG_EXECUTABLE=/usr/bin/llvm-config-3.8 \
98 | -DCMAKE_BUILD_TYPE=Release . \
99 | make)
100 |
101 |
102 | # Configure OpenSSH server. c.f. https://docs.docker.com/engine/examples/running_ssh_service
103 | RUN mkdir /var/run/sshd
104 | RUN echo 'root:root' | chpasswd
105 | RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config
106 | RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config
107 | EXPOSE 22
108 |
109 | # build the app and host it
110 | RUN apt-get update && apt-get install -y python3 python3-pip mongodb openjdk-8-jdk ccache
111 | RUN pip3 install flask flask-cache pymongo xonsh numpy
112 |
113 | RUN apt-get update && apt install -y uwsgi nginx supervisor software-properties-common python-software-properties
114 |
115 | ENV NGINX_MAX_UPLOAD 0
116 |
117 | # By default, Nginx listens on port 80.
118 | # To modify this, change LISTEN_PORT environment variable.
119 | # (in a Dockerfile or with an option for `docker run`)
120 | ENV LISTEN_PORT 80
121 |
122 | # Which uWSGI .ini file should be used, to make it customizable
123 | ENV UWSGI_INI /app/uwsgi.ini
124 |
125 | # URL under which static (not modified by Python) files will be requested
126 | # They will be served by Nginx directly, without being handled by uWSGI
127 | ENV STATIC_URL /static
128 | # Absolute path in where the static files wil be
129 | ENV STATIC_PATH /app/static
130 |
131 | # If STATIC_INDEX is 1, serve / with /static/index.html directly (or the static URL configured)
132 | # ENV STATIC_INDEX 1
133 | ENV STATIC_INDEX 0
134 |
135 | # Add demo app
136 | RUN git clone --recursive https://github.com/PaddlePaddle/continuous_evaluation
137 |
138 | # Make /app/* available to be imported by Python globally to better support several use cases like Alembic migrations.
139 | ENV PYTHONPATH=/app
140 | EXPOSE 80
141 |
142 | # development image default do build work
143 | WORKDIR /continuous_evaluation/web/
144 | CMD = ["python3", "main.py"]
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # A image for building paddle binaries
2 | # Use cuda devel base image for both cpu and gpu environment
3 |
4 | # When you modify it, please be aware of cudnn-runtime version
5 | # and libcudnn.so.x in paddle/scripts/docker/build.sh
6 | # Because we use cuda9, otherwise bounch of this can be replaced with
7 | # paddlepaddle/paddle:latest-dev
8 | FROM nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04
9 | MAINTAINER PaddlePaddle Authors
10 |
11 | ARG UBUNTU_MIRROR
12 | RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com/ubuntu#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi'
13 |
14 | # ENV variables
15 | ARG WITH_GPU
16 | ARG WITH_AVX
17 | ARG WITH_DOC
18 |
19 | ENV WOBOQ OFF
20 | ENV WITH_GPU=${WITH_GPU:-ON}
21 | ENV WITH_AVX=${WITH_AVX:-ON}
22 | ENV WITH_DOC=${WITH_DOC:-OFF}
23 |
24 | ENV HOME /root
25 | # Add bash enhancements
26 | COPY ./paddle/scripts/docker/root/ /root/
27 |
28 | RUN apt-get update && \
29 | apt-get install -y --allow-downgrades \
30 | git python-pip python-dev openssh-server bison \
31 | libnccl2 libnccl-dev \
32 | wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \
33 | curl sed grep graphviz libjpeg-dev zlib1g-dev \
34 | python-matplotlib gcc-4.8 g++-4.8 \
35 | automake locales clang-format swig doxygen cmake \
36 | liblapack-dev liblapacke-dev \
37 | clang-3.8 llvm-3.8 libclang-3.8-dev \
38 | net-tools libtool && \
39 | apt-get clean -y
40 |
41 | # Install Go and glide
42 | RUN wget -qO- https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \
43 | tar -xz -C /usr/local && \
44 | mkdir /root/gopath && \
45 | mkdir /root/gopath/bin && \
46 | mkdir /root/gopath/src
47 | ENV GOROOT=/usr/local/go GOPATH=/root/gopath
48 | # should not be in the same line with GOROOT definition, otherwise docker build could not find GOROOT.
49 | ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin
50 | # install glide
51 | RUN curl -s -q https://glide.sh/get | sh
52 |
53 | # Install TensorRT
54 | # following TensorRT.tar.gz is not the default official one, we do two miny changes:
55 | # 1. Remove the unnecessary files to make the library small. TensorRT.tar.gz only contains include and lib now,
56 | # and its size is only one-third of the official one.
57 | # 2. Manually add ~IPluginFactory() in IPluginFactory class of NvInfer.h, otherwise, it couldn't work in paddle.
58 | # See https://github.com/PaddlePaddle/Paddle/issues/10129 for details.
59 | RUN wget -qO- http://paddlepaddledeps.bj.bcebos.com/TensorRT-4.0.0.3.Ubuntu-16.04.4.x86_64-gnu.cuda-8.0.cudnn7.0.tar.gz | \
60 | tar -xz -C /usr/local && \
61 | cp -rf /usr/local/TensorRT/include /usr && \
62 | cp -rf /usr/local/TensorRT/lib /usr
63 |
64 | # git credential to skip password typing
65 | RUN git config --global credential.helper store
66 |
67 | # Fix locales to en_US.UTF-8
68 | RUN localedef -i en_US -f UTF-8 en_US.UTF-8
69 |
70 | # FIXME: due to temporary ipykernel dependency issue, specify ipykernel jupyter
71 | # version util jupyter fixes this issue.
72 |
73 | # specify sphinx version as 1.5.6 and remove -U option for [pip install -U
74 | # sphinx-rtd-theme] since -U option will cause sphinx being updated to newest
75 | # version(1.7.1 for now), which causes building documentation failed.
76 | RUN pip install --upgrade pip==9.0.3 && \
77 | pip install -U wheel && \
78 | pip install -U docopt PyYAML sphinx==1.5.6 && \
79 | pip install sphinx-rtd-theme==0.1.9 recommonmark
80 |
81 | RUN pip install pre-commit 'ipython==5.3.0' && \
82 | pip install 'ipykernel==4.6.0' 'jupyter==1.0.0' && \
83 | pip install opencv-python
84 |
85 | COPY ./python/requirements.txt /root/
86 | RUN pip install -r /root/requirements.txt
87 |
88 | # To fix https://github.com/PaddlePaddle/Paddle/issues/1954, we use
89 | # the solution in https://urllib3.readthedocs.io/en/latest/user-guide.html#ssl-py2
90 | RUN apt-get install -y libssl-dev libffi-dev
91 | RUN pip install certifi urllib3[secure]
92 |
93 |
94 | # Install woboq_codebrowser to /woboq
95 | RUN git clone https://github.com/woboq/woboq_codebrowser /woboq && \
96 | (cd /woboq \
97 | cmake -DLLVM_CONFIG_EXECUTABLE=/usr/bin/llvm-config-3.8 \
98 | -DCMAKE_BUILD_TYPE=Release . \
99 | make)
100 | RUN pip install sklearn;
101 | RUN pip install pandas;
102 | RUN pip install wget;
103 |
104 | # Configure OpenSSH server. c.f. https://docs.docker.com/engine/examples/running_ssh_service
105 | RUN mkdir /var/run/sshd
106 | RUN echo 'root:root' | chpasswd
107 | RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config
108 | RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config
109 | EXPOSE 22
110 |
111 | # build the app and host it
112 | RUN apt-get update && apt-get install -y python3 python3-pip mongodb openjdk-8-jdk ccache
113 | RUN pip3 install flask flask-cache pymongo xonsh numpy
114 |
115 | RUN apt-get update && apt install -y uwsgi nginx supervisor software-properties-common python-software-properties
116 |
117 | ENV NGINX_MAX_UPLOAD 0
118 |
119 | # By default, Nginx listens on port 80.
120 | # To modify this, change LISTEN_PORT environment variable.
121 | # (in a Dockerfile or with an option for `docker run`)
122 | ENV LISTEN_PORT 80
123 |
124 | # Which uWSGI .ini file should be used, to make it customizable
125 | ENV UWSGI_INI /app/uwsgi.ini
126 |
127 | # URL under which static (not modified by Python) files will be requested
128 | # They will be served by Nginx directly, without being handled by uWSGI
129 | ENV STATIC_URL /static
130 | # Absolute path in where the static files wil be
131 | ENV STATIC_PATH /app/static
132 |
133 | # If STATIC_INDEX is 1, serve / with /static/index.html directly (or the static URL configured)
134 | # ENV STATIC_INDEX 1
135 | ENV STATIC_INDEX 0
136 |
137 | # Add demo app
138 | RUN git clone --recursive https://github.com/PaddlePaddle/continuous_evaluation
139 |
140 | # Make /app/* available to be imported by Python globally to better support several use cases like Alembic migrations.
141 | ENV PYTHONPATH=/app
142 | EXPOSE 80
143 |
144 | # development image default do build work
145 | WORKDIR /continuous_evaluation/web/
146 | CMD = ["python3", "main.py"]
147 |
--------------------------------------------------------------------------------
/tools/html.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #coding: utf-8
3 | ################################################################################
4 | #
5 | # Copyright (c) 2017 Baidu.com, Inc. All Rights Reserved
6 | #
7 | ################################################################################
8 | """
9 | This class is used to run remote command
10 |
11 | Authors: guochaorong(guochaorong@baidu.com)
12 | Date: 2018/07/11
13 | """
14 |
15 | #from common import config
16 | import time
17 | from datetime import datetime, timedelta
18 |
19 | #CONF = config.CommonConf()
20 |
21 |
22 | class Html(object):
23 | """html"""
24 |
25 | def __init__(self, html_name_only="index.html"):
26 | self.html_name = html_name_only
27 | pass
28 |
29 | def html_create_logs(self):
30 | """html create"""
31 | f = open(self.html_name, 'w')
32 | message = """
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 | | timestamp |
41 | file no |
42 | class name |
43 | log type |
44 | log detail |
45 |
46 |
47 |
48 |
49 | """
50 | f.write(message)
51 | f.close()
52 |
53 | def html_create(self, period, duty, sums=0, suc=0):
54 | begin = (datetime.now() + timedelta(hours=8) - timedelta(days=period)
55 | ).strftime('%Y-%m-%d %H:%M:%S')
56 | dd_str = (
57 | datetime.now() + timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S')
58 | """html create"""
59 | f = open(self.html_name, 'w')
60 | message = """
61 |
62 |
63 |
64 |
65 | CE weekly report:
66 |
67 |
68 | 开始时间
69 | %s
70 | 结束时间
71 | %s
72 |
73 |
74 | 本周summary:
75 | 一共执行全量模型:%s次,成功:%s次, 失败:%s次
76 |
77 |
78 | 本周值班主要问题:
79 |
80 | 值班人问题记录
81 | %s
82 | 所有模型执行情况:
83 |
84 |
85 |
86 |
87 |
88 | | models |
89 | result of model's kpis |
90 | detail msg |
91 |
92 |
93 |
94 |
95 | """ % (begin, dd_str, sums, suc, sums - suc, duty)
96 | f.write(message)
97 | f.close()
98 |
99 | def html_add_script(self):
100 | """html add script"""
101 | file = open(self.html_name, 'r')
102 | content = file.read()
103 |
104 | pos = content.find("")
105 | if pos != -1:
106 | contentadd = """
107 |
109 | """
110 | content = content[:pos] + contentadd + content[pos:]
111 | file = open(self.html_name, "w")
112 | file.write(content)
113 | file.close()
114 |
115 | def html_add_logs(self, asctime, filenum, classname, logtype, info):
116 | """html add param"""
117 | file = open(self.html_name, 'r')
118 | content = file.read()
119 | contentadd = """
120 | | """ + asctime + """ |
121 | """ + filenum + """ |
122 | """ + classname + """ |
123 | """ + logtype + """ |
124 | """ + info + """ |
125 |
126 | """
127 | print(contentadd)
128 | pos = content.find("")
129 | if pos != -1:
130 | content = content[:pos] + contentadd + content[pos:]
131 | file = open(self.html_name, "w")
132 | file.write(content)
133 | file.close()
134 |
135 | def html_add_param(self, case, result, log):
136 | """html add param"""
137 | file = open(self.html_name, 'r')
138 | content = file.read()
139 | contentadd = """
140 | | """ + case + """ |
141 | """ + str(result) + """ |
142 | %s |
143 |
144 | """ % (log, log)
145 | pos = content.find("")
146 | if pos != -1:
147 | content = content[:pos] + contentadd + content[pos:]
148 | file = open(self.html_name, "w")
149 | file.write(content)
150 | file.close()
151 |
152 | def html_add_scene(self, scene="create and delete blb"):
153 | """html add scene"""
154 | file = open(self.html_name, 'r')
155 | content = file.read()
156 | contentadd = """
157 | | scene name :""" + scene + """ |
158 |
159 | """
160 | pos = content.find("")
161 | if pos != -1:
162 | content = content[:pos] + contentadd + content[pos:]
163 | file = open(self.html_name, "w")
164 | file.write(content)
165 | file.close()
166 |
167 | def html_add_describe(self):
168 | """html add describe"""
169 | #region = CONF.get_conf("region", "sandbox")
170 | now = time.strftime('%y-%m-%d %H:%M:%S', time.localtime(time.time()))
171 |
172 | file = open(self.html_name, 'r')
173 | content = file.read()
174 | contentadd = """
175 | | region :""" + region + """ |
176 | data :""" + now + """ |
177 |
178 | """
179 | pos = content.find("")
180 | if pos != -1:
181 | content = content[:pos] + contentadd + content[pos:]
182 | file = open(self.html_name, "w")
183 | file.write(content)
184 | file.close()
185 |
186 | def html_Statistics(self):
187 | """html atatistics"""
188 | ok_count = 0
189 | fail_count = 0
190 | with open(self.html_name, 'r') as f:
191 | alllines = f.readlines()
192 | for line in alllines:
193 | if 'OK' in line:
194 | ok_count += 1
195 | if 'Fail' in line:
196 | fail_count += 1
197 | if ((ok_count + fail_count) != 0):
198 | count = float(ok_count) / (ok_count + fail_count)
199 | else:
200 | count = 0
201 | contentadd = """
202 | | ok_count:""" + str(ok_count) + """ |
203 | fail_count:""" + str(fail_count) + """ |
204 | percent:""" + str('%.3f' % count) + """ |
205 |
206 | """
207 | file = open(self.html_name, 'r')
208 | content = file.read()
209 | pos = content.find("")
210 | if pos != -1:
211 | content = content[:pos] + contentadd + content[pos:]
212 | file = open(self.html_name, "w")
213 | file.write(content)
214 | file.close()
215 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/tools/html.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #coding: utf-8
3 | ################################################################################
4 | #
5 | # Copyright (c) 2017 Baidu.com, Inc. All Rights Reserved
6 | #
7 | ################################################################################
8 | """
9 | This class is used to run remote command
10 |
11 | Authors: guochaorong(guochaorong@baidu.com)
12 | Date: 2018/07/11
13 | """
14 |
15 | #from common import config
16 | import time
17 | from datetime import datetime, timedelta
18 |
19 | #CONF = config.CommonConf()
20 |
21 |
22 | class Html(object):
23 | """html"""
24 |
25 | def __init__(self, html_name_only="index.html"):
26 | self.html_name = html_name_only
27 | pass
28 |
29 | def html_create_logs(self):
30 | """html create"""
31 | f = open(self.html_name, 'w')
32 | message = """
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 | | timestamp |
41 | file no |
42 | class name |
43 | log type |
44 | log detail |
45 |
46 |
47 |
48 |
49 | """
50 | f.write(message)
51 | f.close()
52 |
53 | def html_create(self, period, duty, sums=0, suc=0):
54 | begin = (datetime.now() + timedelta(hours=8) - timedelta(days=period)
55 | ).strftime('%Y-%m-%d %H:%M:%S')
56 | dd_str = (
57 | datetime.now() + timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S')
58 | """html create"""
59 | f = open(self.html_name, 'w')
60 | message = """
61 |
62 |
63 |
64 |
65 | CE weekly report:
66 |
67 |
68 | 开始时间
69 | %s
70 | 结束时间
71 | %s
72 |
73 |
74 | 本周summary:
75 | 一共执行全量模型:%s次,成功:%s次, 失败:%s次
76 |
77 |
78 | 本周值班主要问题:
79 |
80 | 值班人问题记录
81 | %s
82 | 所有模型执行情况:
83 |
84 |
85 |
86 |
87 |
88 | | models |
89 | result of model's kpis |
90 | detail msg |
91 |
92 |
93 |
94 |
95 | """ % (begin, dd_str, sums, suc, sums - suc, duty)
96 | f.write(message)
97 | f.close()
98 |
99 | def html_add_script(self):
100 | """html add script"""
101 | file = open(self.html_name, 'r')
102 | content = file.read()
103 |
104 | pos = content.find("")
105 | if pos != -1:
106 | contentadd = """
107 |
109 | """
110 | content = content[:pos] + contentadd + content[pos:]
111 | file = open(self.html_name, "w")
112 | file.write(content)
113 | file.close()
114 |
115 | def html_add_logs(self, asctime, filenum, classname, logtype, info):
116 | """html add param"""
117 | file = open(self.html_name, 'r')
118 | content = file.read()
119 | contentadd = """
120 | | """ + asctime + """ |
121 | """ + filenum + """ |
122 | """ + classname + """ |
123 | """ + logtype + """ |
124 | """ + info + """ |
125 |
126 | """
127 | print(contentadd)
128 | pos = content.find("")
129 | if pos != -1:
130 | content = content[:pos] + contentadd + content[pos:]
131 | file = open(self.html_name, "w")
132 | file.write(content)
133 | file.close()
134 |
135 | def html_add_param(self, case, result, log):
136 | """html add param"""
137 | file = open(self.html_name, 'r')
138 | content = file.read()
139 | contentadd = """
140 | | """ + case + """ |
141 | """ + str(result) + """ |
142 | %s |
143 |
144 | """ % (log, log)
145 | pos = content.find("")
146 | if pos != -1:
147 | content = content[:pos] + contentadd + content[pos:]
148 | file = open(self.html_name, "w")
149 | file.write(content)
150 | file.close()
151 |
152 | def html_add_scene(self, scene="create and delete blb"):
153 | """html add scene"""
154 | file = open(self.html_name, 'r')
155 | content = file.read()
156 | contentadd = """
157 | | scene name :""" + scene + """ |
158 |
159 | """
160 | pos = content.find("")
161 | if pos != -1:
162 | content = content[:pos] + contentadd + content[pos:]
163 | file = open(self.html_name, "w")
164 | file.write(content)
165 | file.close()
166 |
167 | def html_add_describe(self):
168 | """html add describe"""
169 | #region = CONF.get_conf("region", "sandbox")
170 | now = time.strftime('%y-%m-%d %H:%M:%S', time.localtime(time.time()))
171 |
172 | file = open(self.html_name, 'r')
173 | content = file.read()
174 | contentadd = """
175 | | region :""" + region + """ |
176 | data :""" + now + """ |
177 |
178 | """
179 | pos = content.find("")
180 | if pos != -1:
181 | content = content[:pos] + contentadd + content[pos:]
182 | file = open(self.html_name, "w")
183 | file.write(content)
184 | file.close()
185 |
186 | def html_Statistics(self):
187 | """html atatistics"""
188 | ok_count = 0
189 | fail_count = 0
190 | with open(self.html_name, 'r') as f:
191 | alllines = f.readlines()
192 | for line in alllines:
193 | if 'OK' in line:
194 | ok_count += 1
195 | if 'Fail' in line:
196 | fail_count += 1
197 | if ((ok_count + fail_count) != 0):
198 | count = float(ok_count) / (ok_count + fail_count)
199 | else:
200 | count = 0
201 | contentadd = """
202 | | ok_count:""" + str(ok_count) + """ |
203 | fail_count:""" + str(fail_count) + """ |
204 | percent:""" + str('%.3f' % count) + """ |
205 |
206 | """
207 | file = open(self.html_name, 'r')
208 | content = file.read()
209 | pos = content.find("")
210 | if pos != -1:
211 | content = content[:pos] + contentadd + content[pos:]
212 | file = open(self.html_name, "w")
213 | file.write(content)
214 | file.close()
215 |
--------------------------------------------------------------------------------
/web/api.py:
--------------------------------------------------------------------------------
1 | __all__ = [
2 | "CommitRecord",
3 | "TaskRecord",
4 | "KpiRecord",
5 | ]
6 |
7 | import sys
8 | sys.path.append('pypage')
9 | sys.path.append('..')
10 | import _config
11 | import json
12 | from db import MongoDB
13 | from datetime import datetime, timedelta
14 | from kpi import Kpi
15 |
16 | db = MongoDB(_config.db_name, _config.db_host, _config.db_port)
17 |
18 |
19 | class objdict(dict):
20 | def __setattr__(self, key, value):
21 | self[key] = value
22 |
23 | def __getattr__(self, item):
24 | return self[item]
25 |
26 |
27 | class CommitRecord:
28 | def __init__(self, commit=''):
29 | self.commit = commit
30 | self.short_commit = ""
31 | self.date = None # datetime
32 | self.info = ""
33 |
34 | @staticmethod
35 | def get_all():
36 | ''' Get all commit records, and sort by latest to oldest.
37 | returns: list of CommitRecord
38 | '''
39 | # sort by 'date' in ascending order
40 | commits = db.find_sections(_config.table_name,
41 | {'type': 'kpi'}, {'commitid': 1, "_id": 0}, "date")
42 | commit_ids = []
43 | for commit in commits:
44 | if commit['commitid'] not in commit_ids:
45 | commit_ids.append(commit['commitid'])
46 |
47 | records = []
48 | for commit in commit_ids:
49 | commitobj = CommitRecord(commit)
50 | tasks = commitobj.__get_db_record()
51 | commitobj.commit = commit
52 | commitobj.shortcommit = commit[:7]
53 | commitobj.date = datetime.utcfromtimestamp(int(tasks[0]['date'])) + \
54 | timedelta(hours=8)
55 |
56 | commitobj.passed = tasks_success(tasks)
57 | records.append(commitobj)
58 | return records
59 |
60 | @staticmethod
61 | def get_tasks(commit):
62 | ''' Get the task details belong to a commit.
63 | returns: dict of TaskRecord
64 | keys: task name,
65 | values: TaskRecord '''
66 | record = CommitRecord(commit)
67 | tasks = record.__get_db_record()
68 | print (tasks)
69 | res = objdict()
70 | for task in tasks:
71 | taskobj = TaskRecord(commit, task['task'], task['infos'],
72 | task['passed'])
73 | taskobj.kpis = taskobj.get_kpis()
74 | res[taskobj.name] = taskobj
75 | return res
76 |
77 | def __get_db_record(self):
78 | ''' get the corresponding tasks from database.
79 | '''
80 | return db.finds(_config.table_name,
81 | {'type': 'kpi',
82 | 'commitid': self.commit})
83 |
84 |
85 | class TaskRecord(objdict):
86 | def __init__(self, commit, name, infos, passed):
87 | self.name = name
88 | self.task = name
89 | # dict of KpiRecord
90 | self.kpis = []
91 | self.infos = infos
92 | self.passed = passed
93 | self.commitid = commit
94 |
95 | def get_kpis(self):
96 | '''Get kpis details belong to a task.
97 | returns dict of KpiRecord
98 | keys: kpi name,
99 | values: KpiRecord'''
100 | task_info = self.__get_db_record()
101 | kpi_infos = {}
102 | for kpi in task_info['kpis-keys']:
103 | kpiobj = KpiRecord(kpi)
104 | kpi_infos[kpi] = kpiobj.get_kpi_info(task_info)
105 | return kpi_infos
106 |
107 | def __get_db_record(self):
108 | ''' get the corresponding kpis from database'''
109 | return db.find_one(_config.table_name, {'type': 'kpi', \
110 | 'commitid': self.commitid, 'task': self.name})
111 |
112 | class KpiRecord:
113 | def __init__(self, name):
114 | self.name = name
115 | # list of list of float
116 | self.values = []
117 | self.type = ""
118 | self.avg = 0
119 | self.activeds = False
120 | self.unit = ""
121 | self.desc = ""
122 |
123 | def get_kpi_info(self, task_info):
124 | '''Get the kpi infos according to the kpi name'''
125 | for i in range(len(task_info['kpis-keys'])):
126 | if self.name == task_info['kpis-keys'][i]:
127 | break
128 | def safe_get_fields(field):
129 | if field in task_info:
130 | return task_info[field]
131 | return None
132 | #To keep the kpi datas in order, we should process the data one by one.
133 | kpi_vals = json.loads(task_info['kpis-values'])
134 | self.values = kpi_vals[i]
135 | self.type = task_info['kpi-types'][i]
136 | self.avg = '%.4f' % Kpi.dic.get(self.type).cal_kpi(data=kpi_vals[i])
137 | infos = parse_infos(task_info['infos'])
138 | self.info = infos[self.name]
139 |
140 | activeds = safe_get_fields('kpi-activeds')
141 | self.activeds = activeds[i] if activeds else True
142 |
143 | unit_reprs = safe_get_fields('kpi-unit-reprs')
144 | descs = safe_get_fields('kpi-descs')
145 |
146 | self.unit = "(%s)" % unit_reprs[i] if unit_reprs else ""
147 | self.desc = descs[i] if descs else ""
148 |
149 | self.set_infos()
150 |
151 | return (self.values, self.type, self.avg, self.info, self.activeds,
152 | self.unit, self.desc)
153 |
154 | def set_infos(self):
155 | #key = ['acc', 'cost', 'loss', 'speed', 'memory', 'duration', 'ppl']
156 | types = ['train', 'test']
157 | for t in types:
158 | if '_acc' in self.name and t in self.name:
159 | if not self.desc:
160 | self.desc = '%s accuracy, 0 to 1' % t
161 | if self.unit == "(None)":
162 | self.unit = '(100%)'
163 | elif 'cost' in self.name and t in self.name:
164 | if not self.desc:
165 | self.desc = '%s loss function value' % t
166 | if self.unit == "(None)":
167 | self.unit = '(100%)'
168 | elif 'speed' in self.name and t in self.name:
169 | if not self.desc:
170 | self.desc = '%s speed ' % t
171 | if self.unit == "(None)":
172 | self.unit = '(images/s)'
173 | elif 'gpu_memory' in self.name:
174 | if not self.desc:
175 | self.desc = 'gpu memory usage'
176 | if self.unit == "(None)":
177 | self.unit = '(MiB)'
178 | elif 'duration' in self.name:
179 | if not self.desc:
180 | self.desc = 'time takes for exec'
181 | if self.unit == "(None)":
182 | self.unit = '(s)'
183 | elif 'ppl' in self.name and t in self.name:
184 | if not self.desc:
185 | self.desc = 'the ppl of %s ' % t
186 |
187 |
188 | class objdict(dict):
189 | def __setattr__(self, key, value):
190 | self[key] = value
191 |
192 | def __getattr__(self, item):
193 | return self[item]
194 |
195 |
196 | def parse_infos(infos):
197 | '''
198 | input format: [kpi0] xxxx [kpi1] xxx
199 |
200 | return dic of (kpi, info)
201 | '''
202 | res = {}
203 | for info in infos:
204 | lb = info.find('[') + 1
205 | rb = info.find(']', lb)
206 | kpi = info[lb:rb]
207 | info = info[rb + 2:]
208 | res[kpi] = info
209 | return res
210 |
211 |
212 | def tasks_success(tasks):
213 | for task in tasks:
214 | if not task['passed']: return False
215 | return True
216 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/web/api.py:
--------------------------------------------------------------------------------
1 | __all__ = [
2 | "CommitRecord",
3 | "TaskRecord",
4 | "KpiRecord",
5 | ]
6 |
7 | import sys
8 | sys.path.append('pypage')
9 | sys.path.append('..')
10 | import _config
11 | import json
12 | from db import MongoDB
13 | from datetime import datetime, timedelta
14 | from kpi import Kpi
15 |
16 | db = MongoDB(_config.db_name, _config.db_host, _config.db_port)
17 |
18 |
19 | class objdict(dict):
20 | def __setattr__(self, key, value):
21 | self[key] = value
22 |
23 | def __getattr__(self, item):
24 | return self[item]
25 |
26 |
27 | class CommitRecord:
28 | def __init__(self, commit=''):
29 | self.commit = commit
30 | self.short_commit = ""
31 | self.date = None # datetime
32 | self.info = ""
33 |
34 | @staticmethod
35 | def get_all(table_name):
36 | ''' Get all commit records, and sort by latest to oldest.
37 | returns: list of CommitRecord
38 | '''
39 | # sort by 'date' in ascending order
40 | commits = db.find_sections(table_name,
41 | {'type': 'kpi'}, {'commitid': 1, "_id": 0}, "date")
42 | commit_ids = []
43 | for commit in commits:
44 | if commit['commitid'] not in commit_ids:
45 | commit_ids.append(commit['commitid'])
46 |
47 | records = []
48 | for commit in commit_ids:
49 | commitobj = CommitRecord(commit)
50 | tasks = commitobj.__get_db_record(table_name)
51 | commitobj.commit = commit
52 | commitobj.shortcommit = commit[:7]
53 | commitobj.date = datetime.utcfromtimestamp(int(tasks[0]['date'])) + \
54 | timedelta(hours=8)
55 |
56 | commitobj.passed = tasks_success(tasks)
57 | records.append(commitobj)
58 | return records
59 |
60 | @staticmethod
61 | def get_tasks(table_name, commit):
62 | ''' Get the task details belong to a commit.
63 | returns: dict of TaskRecord
64 | keys: task name,
65 | values: TaskRecord '''
66 | record = CommitRecord(commit)
67 | tasks = record.__get_db_record(table_name)
68 | #print (tasks)
69 | res = objdict()
70 | for task in tasks:
71 | taskobj = TaskRecord(commit, task['task'], task['infos'],
72 | task['passed'])
73 | taskobj.kpis = taskobj.get_kpis(table_name)
74 | res[taskobj.name] = taskobj
75 | return res
76 |
77 | def __get_db_record(self, table_name):
78 | ''' get the corresponding tasks from database.
79 | '''
80 | return db.finds(table_name,
81 | {'type': 'kpi',
82 | 'commitid': self.commit})
83 |
84 | @staticmethod
85 | def get_all_tables():
86 | '''
87 | get all tables
88 | '''
89 | return db.all_tables()
90 |
91 |
92 | class TaskRecord(objdict):
93 | def __init__(self, commit, name, infos, passed):
94 | self.name = name
95 | self.task = name
96 | # dict of KpiRecord
97 | self.kpis = []
98 | self.infos = infos
99 | self.passed = passed
100 | self.commitid = commit
101 |
102 | def get_kpis(self, table_name):
103 | '''Get kpis details belong to a task.
104 | returns dict of KpiRecord
105 | keys: kpi name,
106 | values: KpiRecord'''
107 | task_info = self.__get_db_record(table_name)
108 | kpi_infos = {}
109 | for kpi in task_info['kpis-keys']:
110 | kpiobj = KpiRecord(kpi)
111 | kpi_infos[kpi] = kpiobj.get_kpi_info(task_info)
112 | return kpi_infos
113 |
114 | def __get_db_record(self, table_name):
115 | ''' get the corresponding kpis from database'''
116 | return db.find_one(table_name, {'type': 'kpi', \
117 | 'commitid': self.commitid, 'task': self.name})
118 |
119 | class KpiRecord:
120 | def __init__(self, name):
121 | self.name = name
122 | # list of list of float
123 | self.values = []
124 | self.type = ""
125 | self.avg = 0
126 | self.activeds = False
127 | self.unit = ""
128 | self.desc = ""
129 |
130 | def get_kpi_info(self, task_info):
131 | '''Get the kpi infos according to the kpi name'''
132 | for i in range(len(task_info['kpis-keys'])):
133 | if self.name == task_info['kpis-keys'][i]:
134 | break
135 | def safe_get_fields(field):
136 | if field in task_info:
137 | return task_info[field]
138 | return None
139 | #To keep the kpi datas in order, we should process the data one by one.
140 | kpi_vals = json.loads(task_info['kpis-values'])
141 | self.values = kpi_vals[i]
142 | self.type = task_info['kpi-types'][i]
143 | self.avg = '%.4f' % Kpi.dic.get(self.type).cal_kpi(data=kpi_vals[i])
144 | infos = parse_infos(task_info['infos'])
145 | self.info = infos[self.name]
146 |
147 | activeds = safe_get_fields('kpi-activeds')
148 | self.activeds = activeds[i] if activeds else True
149 |
150 | unit_reprs = safe_get_fields('kpi-unit-reprs')
151 | descs = safe_get_fields('kpi-descs')
152 |
153 | self.unit = "(%s)" % unit_reprs[i] if unit_reprs else ""
154 | self.desc = descs[i] if descs else ""
155 |
156 | self.set_infos()
157 |
158 | return (self.values, self.type, self.avg, self.info, self.activeds,
159 | self.unit, self.desc)
160 |
161 | def set_infos(self):
162 | #key = ['acc', 'cost', 'loss', 'speed', 'memory', 'duration', 'ppl']
163 | types = ['train', 'test']
164 | for t in types:
165 | if '_acc' in self.name and t in self.name:
166 | if not self.desc:
167 | self.desc = '%s accuracy, 0 to 1' % t
168 | if self.unit == "(None)":
169 | self.unit = '(100%)'
170 | elif 'cost' in self.name and t in self.name:
171 | if not self.desc:
172 | self.desc = '%s loss function value' % t
173 | if self.unit == "(None)":
174 | self.unit = '(100%)'
175 | elif 'speed' in self.name and t in self.name:
176 | if not self.desc:
177 | self.desc = '%s speed ' % t
178 | if self.unit == "(None)":
179 | self.unit = '(images/s)'
180 | elif 'gpu_memory' in self.name:
181 | if not self.desc:
182 | self.desc = 'gpu memory usage'
183 | if self.unit == "(None)":
184 | self.unit = '(MiB)'
185 | elif 'duration' in self.name:
186 | if not self.desc:
187 | self.desc = 'time takes for exec'
188 | if self.unit == "(None)":
189 | self.unit = '(s)'
190 | elif 'ppl' in self.name and t in self.name:
191 | if not self.desc:
192 | self.desc = 'the ppl of %s ' % t
193 |
194 |
195 | class objdict(dict):
196 | def __setattr__(self, key, value):
197 | self[key] = value
198 |
199 | def __getattr__(self, item):
200 | return self[item]
201 |
202 |
203 | def parse_infos(infos):
204 | '''
205 | input format: [kpi0] xxxx [kpi1] xxx
206 |
207 | return dic of (kpi, info)
208 | '''
209 | res = {}
210 | for info in infos:
211 | lb = info.find('[') + 1
212 | rb = info.find(']', lb)
213 | kpi = info[lb:rb]
214 | info = info[rb + 2:]
215 | res[kpi] = info
216 | return res
217 |
218 |
219 | def tasks_success(tasks):
220 | for task in tasks:
221 | if not task['passed']: return False
222 | return True
223 |
--------------------------------------------------------------------------------
/kpi.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 | import os
3 | import json
4 | import numpy as np
5 | import logging
6 | from _config import pjoin
7 |
8 |
9 | class TestError(Exception):
10 | pass
11 |
12 |
13 | class Kpi(object):
14 | dic = {}
15 |
16 | def __init__(self,
17 | name,
18 | desc='',
19 | out_file=None,
20 | his_file=None,
21 | develop_file=None,
22 | actived=False,
23 | unit_repr=None):
24 | ''' Interface for Kpi tracker.
25 | actived: whether this test is turn on
26 | The test will yield error if failed only if it is actived.
27 | unit_repr: the unit of the KPI, for train_duration, ms for example.
28 | desc: the description of this task. '''
29 | self.name = name
30 | self.desc = desc
31 | self.out_file = out_file
32 | self.his_file = "latest_kpis/" + out_file if his_file is None else his_file
33 | self.develop_file = "develop_kpis/" + out_file if develop_file is None else develop_file
34 | self.actived = actived
35 | self.unit_repr = unit_repr
36 | self.records = []
37 |
38 | def add_record(self, rcd):
39 | self.records.append(rcd)
40 |
41 | def evaluate(self):
42 | ''' Run the evaluation based on the records collected and history records. '''
43 | raise NotImplementedError
44 |
45 | def persist(self):
46 | ''' Persist the evalution result in some way. '''
47 | raise NotImplementedError
48 |
49 | @staticmethod
50 | def compare_with(cur, other):
51 | ''' compare `cur` with `other` and return a float ratio to indicate how much
52 | changes cur based on other.
53 | The `other` is the denominator, the result is like +/- (other-cur)/other, the
54 | `+/-` will make the result a positive ratio if `cur` is better, negative other-
55 | wise.
56 | '''
57 | raise NotImplementedError
58 |
59 | @staticmethod
60 | def cal_kpi(data):
61 | ''' calculate the KPI(a scalar) based on `self.cur_data`.
62 | This is just a default implementation, free to customize. '''
63 | return np.average(data)
64 |
65 | @property
66 | def cur_data(self):
67 | raise NotImplementedError
68 |
69 | @property
70 | def baseline_data(self):
71 | raise NotImplementedError
72 |
73 | @staticmethod
74 | def __register__(factor):
75 | '''
76 | factor shoud a subclass inherients Kpi
77 | '''
78 | assert issubclass(factor, Kpi)
79 | key = factor.__name__
80 | assert Kpi.dic.setdefault(key, factor) is factor, \
81 | "duplicate register %s with a different class" % key
82 | Kpi.dic[key] = factor
83 |
84 |
85 | class GreaterWorseKpi(Kpi):
86 | ''' Evaluator for any factors that large value is bad, trainning cost for example. '''
87 |
88 | def __init__(self,
89 | name,
90 | diff_thre,
91 | skip_head=2,
92 | actived=False,
93 | unit_repr=None,
94 | desc=None):
95 | '''
96 | diff_thre: difference threshold.
97 | '''
98 | super(GreaterWorseKpi, self).__init__(
99 | name,
100 | out_file='%s_factor.txt' % name,
101 | actived=actived,
102 | unit_repr=unit_repr,
103 | desc=desc)
104 | self.skip_head = skip_head
105 | self.diff_thre = diff_thre
106 |
107 | def evaluate(self, root):
108 | '''
109 | It seems that compare every batch is too sensitive. So we just compare KPI.
110 | '''
111 | self.root = root
112 | cur_data = load_records_from(pjoin(root, self.out_file))[
113 | self.skip_head:]
114 | his_data = load_records_from(pjoin(root, self.his_file))[
115 | self.skip_head:]
116 |
117 | self.ratio_develop = 0
118 | if os.path.exists(self.develop_file):
119 | develop_data = load_records_from(pjoin(root, self.develop_file))[
120 | self.skip_head:]
121 | if len(develop_data) > 0:
122 | self.ratio_develop = self.compare_with(cur_data, develop_data)
123 |
124 | self.ratio = self.compare_with(cur_data, his_data)
125 | return (-self.ratio) < self.diff_thre
126 |
127 | @staticmethod
128 | def compare_with(cur, other):
129 | cur_kpi = GreaterWorseKpi.cal_kpi(cur)
130 | other_kpi = GreaterWorseKpi.cal_kpi(other)
131 | return (other_kpi - cur_kpi) / other_kpi
132 |
133 | @property
134 | def cur_data(self):
135 | return load_records_from(pjoin(self.root, self.out_file))
136 |
137 | @property
138 | def baseline_data(self):
139 | return load_records_from(pjoin(self.root, self.his_file))
140 |
141 | def persist(self):
142 | lines = []
143 | is_iterable = False
144 | if self.records:
145 | try:
146 | is_iterable = iter(self.records[0]) is not None
147 | except Exception as e:
148 | pass
149 | for rcd in self.records:
150 | if not is_iterable: rcd = [rcd]
151 | rcd = np.array(rcd)
152 | rcd = rcd.tolist()
153 | lines.append(json.dumps(rcd))
154 |
155 | # empty records still needs to create an empty file.
156 | with open(self.out_file, 'w') as f:
157 | f.write('\n'.join(lines))
158 |
159 | @property
160 | def fail_info(self):
161 | info = "[{name}] failed, diff ratio: {ratio} larger than {thre}.".format(
162 | name=self.name, ratio=-self.ratio, thre=self.diff_thre)
163 | if not self.actived:
164 | info = "Task is disabled, " + info
165 | return info
166 |
167 | @property
168 | def success_info(self):
169 | info = "[{name}] pass".format(name=self.name)
170 | if not self.actived:
171 | info = "Task is disabled, " + info
172 | return info
173 |
174 | @property
175 | def detail_info(self):
176 | trend=""
177 | if self.ratio < 0:
178 | trend = "-"
179 | else:
180 | trend = "+"
181 | if not self.actived:
182 | trend = "="
183 | info = "{name},{ratio},{tren}".format(name=self.name, ratio=abs(self.ratio), tren=trend)
184 | return info
185 |
186 | @property
187 | def develop_info(self):
188 | trend=""
189 | if self.ratio_develop < 0:
190 | trend = "-"
191 | else:
192 | trend = "+"
193 | if not self.actived:
194 | trend = "="
195 | info = "{name},{ratio},{tren}".format(name=self.name, ratio=abs(self.ratio_develop), tren=trend)
196 | return info
197 |
198 |
199 | class LessWorseKpi(GreaterWorseKpi):
200 | ''' Evaluator for any factors that less value is bad, trainning acc for example. '''
201 |
202 | def __init__(self,
203 | name,
204 | diff_thre,
205 | skip_head=2,
206 | actived=False,
207 | unit_repr=None,
208 | desc=None):
209 | '''
210 | diff_thre: difference threshold.
211 | '''
212 | super(LessWorseKpi, self).__init__(
213 | name,
214 | diff_thre,
215 | skip_head,
216 | actived=actived,
217 | unit_repr=unit_repr,
218 | desc=desc)
219 | self.skip_head = skip_head
220 | self.diff_thre = diff_thre
221 |
222 | def evaluate(self, root):
223 | self.root = root
224 | cur_data = load_records_from(pjoin(root, self.out_file))[
225 | self.skip_head:]
226 | his_data = load_records_from(pjoin(root, self.his_file))[
227 | self.skip_head:]
228 |
229 | self.ratio_develop = 0
230 | if os.path.exists(self.develop_file):
231 | develop_data = load_records_from(pjoin(root, self.develop_file))[
232 | self.skip_head:]
233 | if len(develop_data) > 0:
234 | self.ratio_develop = self.compare_with(cur_data, develop_data)
235 |
236 | self.ratio = self.compare_with(cur_data, his_data)
237 | return (-self.ratio) < self.diff_thre
238 |
239 | @staticmethod
240 | def compare_with(cur, other):
241 | cur_kpi = LessWorseKpi.cal_kpi(cur)
242 | other_kpi = LessWorseKpi.cal_kpi(other)
243 | return (cur_kpi - other_kpi) / other_kpi
244 |
245 | @property
246 | def cur_data(self):
247 | return load_records_from(pjoin(self.root, self.out_file))
248 |
249 | @property
250 | def baseline_data(self):
251 | return load_records_from(pjoin(self.root, self.his_file))
252 |
253 | @property
254 | def fail_info(self):
255 | info = "[{name}] failed, diff ratio: {ratio} larger than {thre}.".format(
256 | name=self.name, ratio=-self.ratio, thre=self.diff_thre)
257 | if not self.actived:
258 | info = "Task is disabled, " + info
259 | return info
260 |
261 | @property
262 | def success_info(self):
263 | info = "[{name}] pass".format(name=self.name)
264 | if not self.actived:
265 | info = "Task is disabled, " + info
266 | return info
267 |
268 | @property
269 | def detail_info(self):
270 | trend=""
271 | if self.ratio < 0:
272 | trend = "-"
273 | else:
274 | trend = "+"
275 | if not self.actived:
276 | trend = "="
277 | info = "{name},{ratio},{tren}".format(name=self.name, ratio=abs(self.ratio), tren=trend)
278 | return info
279 |
280 | @property
281 | def develop_info(self):
282 | trend=""
283 | if self.ratio_develop < 0:
284 | trend = "-"
285 | else:
286 | trend = "+"
287 | if not self.actived:
288 | trend = "="
289 | info = "{name},{ratio},{tren}".format(name=self.name, ratio=abs(self.ratio_develop), tren=trend)
290 | return info
291 |
292 |
293 | CostKpi = GreaterWorseKpi
294 |
295 | DurationKpi = GreaterWorseKpi
296 |
297 | AccKpi = LessWorseKpi
298 |
299 |
300 | def load_records_from(file):
301 | '''
302 | each line of the data format is
303 |
304 | for example, a real record might be:
305 | [[0.1, 0.3], [0.4, 0.2]]
306 | '''
307 | datas = []
308 | with open(file) as f:
309 | for line in f.readlines():
310 | data = json.loads(line.strip())
311 | datas.append(np.array(data))
312 | return np.array(datas)
313 |
314 |
315 | Kpi.__register__(GreaterWorseKpi)
316 | Kpi.__register__(LessWorseKpi)
317 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/kpi.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 | import os
3 | import json
4 | import numpy as np
5 | import logging
6 | from _config import pjoin
7 |
8 |
9 | class TestError(Exception):
10 | pass
11 |
12 |
13 | class Kpi(object):
14 | dic = {}
15 |
16 | def __init__(self,
17 | name,
18 | desc='',
19 | out_file=None,
20 | his_file=None,
21 | develop_file=None,
22 | actived=False,
23 | unit_repr=None):
24 | ''' Interface for Kpi tracker.
25 | actived: whether this test is turn on
26 | The test will yield error if failed only if it is actived.
27 | unit_repr: the unit of the KPI, for train_duration, ms for example.
28 | desc: the description of this task. '''
29 | self.name = name
30 | self.desc = desc
31 | self.out_file = out_file
32 | self.his_file = "latest_kpis/" + out_file if his_file is None else his_file
33 | self.develop_file = "develop_kpis/" + out_file if develop_file is None else develop_file
34 | self.actived = actived
35 | self.unit_repr = unit_repr
36 | self.records = []
37 |
38 | def add_record(self, rcd):
39 | self.records.append(rcd)
40 |
41 | def evaluate(self):
42 | ''' Run the evaluation based on the records collected and history records. '''
43 | raise NotImplementedError
44 |
45 | def persist(self):
46 | ''' Persist the evalution result in some way. '''
47 | raise NotImplementedError
48 |
49 | @staticmethod
50 | def compare_with(cur, other):
51 | ''' compare `cur` with `other` and return a float ratio to indicate how much
52 | changes cur based on other.
53 | The `other` is the denominator, the result is like +/- (other-cur)/other, the
54 | `+/-` will make the result a positive ratio if `cur` is better, negative other-
55 | wise.
56 | '''
57 | raise NotImplementedError
58 |
59 | @staticmethod
60 | def cal_kpi(data):
61 | ''' calculate the KPI(a scalar) based on `self.cur_data`.
62 | This is just a default implementation, free to customize. '''
63 | return np.average(data)
64 |
65 | @property
66 | def cur_data(self):
67 | raise NotImplementedError
68 |
69 | @property
70 | def baseline_data(self):
71 | raise NotImplementedError
72 |
73 | @staticmethod
74 | def __register__(factor):
75 | '''
76 | factor shoud a subclass inherients Kpi
77 | '''
78 | assert issubclass(factor, Kpi)
79 | key = factor.__name__
80 | assert Kpi.dic.setdefault(key, factor) is factor, \
81 | "duplicate register %s with a different class" % key
82 | Kpi.dic[key] = factor
83 |
84 |
85 | class GreaterWorseKpi(Kpi):
86 | ''' Evaluator for any factors that large value is bad, trainning cost for example. '''
87 |
88 | def __init__(self,
89 | name,
90 | diff_thre,
91 | skip_head=2,
92 | actived=False,
93 | unit_repr=None,
94 | desc=None):
95 | '''
96 | diff_thre: difference threshold.
97 | '''
98 | super(GreaterWorseKpi, self).__init__(
99 | name,
100 | out_file='%s_factor.txt' % name,
101 | actived=actived,
102 | unit_repr=unit_repr,
103 | desc=desc)
104 | self.skip_head = skip_head
105 | self.diff_thre = diff_thre
106 |
107 | def evaluate(self, root):
108 | '''
109 | It seems that compare every batch is too sensitive. So we just compare KPI.
110 | '''
111 | self.root = root
112 | cur_data = load_records_from(pjoin(root, self.out_file))[
113 | self.skip_head:]
114 | his_data = load_records_from(pjoin(root, self.his_file))[
115 | self.skip_head:]
116 |
117 | self.ratio_develop = 0
118 | if os.path.exists(self.develop_file):
119 | develop_data = load_records_from(pjoin(root, self.develop_file))[
120 | self.skip_head:]
121 | if len(develop_data) > 0:
122 | self.ratio_develop = self.compare_with(cur_data, develop_data)
123 |
124 | self.ratio = self.compare_with(cur_data, his_data)
125 | return (-self.ratio) < self.diff_thre
126 |
127 | @staticmethod
128 | def compare_with(cur, other):
129 | cur_kpi = GreaterWorseKpi.cal_kpi(cur)
130 | other_kpi = GreaterWorseKpi.cal_kpi(other)
131 | return (other_kpi - cur_kpi) / other_kpi
132 |
133 | @property
134 | def cur_data(self):
135 | return load_records_from(pjoin(self.root, self.out_file))
136 |
137 | @property
138 | def baseline_data(self):
139 | return load_records_from(pjoin(self.root, self.his_file))
140 |
141 | def persist(self):
142 | lines = []
143 | is_iterable = False
144 | if self.records:
145 | try:
146 | is_iterable = iter(self.records[0]) is not None
147 | except Exception as e:
148 | pass
149 | for rcd in self.records:
150 | if not is_iterable: rcd = [rcd]
151 | rcd = np.array(rcd)
152 | rcd = rcd.tolist()
153 | lines.append(json.dumps(rcd))
154 |
155 | # empty records still needs to create an empty file.
156 | with open(self.out_file, 'w') as f:
157 | f.write('\n'.join(lines))
158 |
159 | @property
160 | def fail_info(self):
161 | info = "[{name}] failed, diff ratio: {ratio} larger than {thre}.".format(
162 | name=self.name, ratio=-self.ratio, thre=self.diff_thre)
163 | if not self.actived:
164 | info = "Task is disabled, " + info
165 | return info
166 |
167 | @property
168 | def success_info(self):
169 | info = "[{name}] pass".format(name=self.name)
170 | if not self.actived:
171 | info = "Task is disabled, " + info
172 | return info
173 |
174 | @property
175 | def detail_info(self):
176 | trend=""
177 | if self.ratio < 0:
178 | trend = "-"
179 | else:
180 | trend = "+"
181 | if not self.actived:
182 | trend = "="
183 | info = "{name},{ratio},{tren}".format(name=self.name, ratio=abs(self.ratio), tren=trend)
184 | return info
185 |
186 | @property
187 | def develop_info(self):
188 | trend=""
189 | if self.ratio_develop < 0:
190 | trend = "-"
191 | else:
192 | trend = "+"
193 | if not self.actived:
194 | trend = "="
195 | info = "{name},{ratio},{tren}".format(name=self.name, ratio=abs(self.ratio_develop), tren=trend)
196 | return info
197 |
198 |
199 | class LessWorseKpi(GreaterWorseKpi):
200 | ''' Evaluator for any factors that less value is bad, trainning acc for example. '''
201 |
202 | def __init__(self,
203 | name,
204 | diff_thre,
205 | skip_head=2,
206 | actived=False,
207 | unit_repr=None,
208 | desc=None):
209 | '''
210 | diff_thre: difference threshold.
211 | '''
212 | super(LessWorseKpi, self).__init__(
213 | name,
214 | diff_thre,
215 | skip_head,
216 | actived=actived,
217 | unit_repr=unit_repr,
218 | desc=desc)
219 | self.skip_head = skip_head
220 | self.diff_thre = diff_thre
221 |
222 | def evaluate(self, root):
223 | self.root = root
224 | cur_data = load_records_from(pjoin(root, self.out_file))[
225 | self.skip_head:]
226 | his_data = load_records_from(pjoin(root, self.his_file))[
227 | self.skip_head:]
228 |
229 | self.ratio_develop = 0
230 | if os.path.exists(self.develop_file):
231 | develop_data = load_records_from(pjoin(root, self.develop_file))[
232 | self.skip_head:]
233 | if len(develop_data) > 0:
234 | self.ratio_develop = self.compare_with(cur_data, develop_data)
235 |
236 | self.ratio = self.compare_with(cur_data, his_data)
237 | return (-self.ratio) < self.diff_thre
238 |
239 | @staticmethod
240 | def compare_with(cur, other):
241 | cur_kpi = LessWorseKpi.cal_kpi(cur)
242 | other_kpi = LessWorseKpi.cal_kpi(other)
243 | return (cur_kpi - other_kpi) / other_kpi
244 |
245 | @property
246 | def cur_data(self):
247 | return load_records_from(pjoin(self.root, self.out_file))
248 |
249 | @property
250 | def baseline_data(self):
251 | return load_records_from(pjoin(self.root, self.his_file))
252 |
253 | @property
254 | def fail_info(self):
255 | info = "[{name}] failed, diff ratio: {ratio} larger than {thre}.".format(
256 | name=self.name, ratio=-self.ratio, thre=self.diff_thre)
257 | if not self.actived:
258 | info = "Task is disabled, " + info
259 | return info
260 |
261 | @property
262 | def success_info(self):
263 | info = "[{name}] pass".format(name=self.name)
264 | if not self.actived:
265 | info = "Task is disabled, " + info
266 | return info
267 |
268 | @property
269 | def detail_info(self):
270 | trend=""
271 | if self.ratio < 0:
272 | trend = "-"
273 | else:
274 | trend = "+"
275 | if not self.actived:
276 | trend = "="
277 | info = "{name},{ratio},{tren}".format(name=self.name, ratio=abs(self.ratio), tren=trend)
278 | return info
279 |
280 | @property
281 | def develop_info(self):
282 | trend=""
283 | if self.ratio_develop < 0:
284 | trend = "-"
285 | else:
286 | trend = "+"
287 | if not self.actived:
288 | trend = "="
289 | info = "{name},{ratio},{tren}".format(name=self.name, ratio=abs(self.ratio_develop), tren=trend)
290 | return info
291 |
292 |
293 | CostKpi = GreaterWorseKpi
294 |
295 | DurationKpi = GreaterWorseKpi
296 |
297 | AccKpi = LessWorseKpi
298 |
299 |
300 | def load_records_from(file):
301 | '''
302 | each line of the data format is
303 |
304 | for example, a real record might be:
305 | [[0.1, 0.3], [0.4, 0.2]]
306 | '''
307 | datas = []
308 | with open(file) as f:
309 | for line in f.readlines():
310 | data = json.loads(line.strip())
311 | datas.append(np.array(data))
312 | return np.array(datas)
313 |
314 |
315 | Kpi.__register__(GreaterWorseKpi)
316 | Kpi.__register__(LessWorseKpi)
317 |
--------------------------------------------------------------------------------
/main.xsh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env xonsh
2 | $RAISE_SUBPROC_ERROR = True
3 | $XONSH_SHOW_TRACEBACK = True
4 |
5 | import sys; sys.path.insert(0, '')
6 | import _config
7 | from _config import pjoin
8 | from utils import PathRecover, log
9 | import persistence as pst
10 | import os
11 | import repo
12 | import argparse
13 | import traceback
14 | import time
15 | import json
16 | import shutil
17 |
18 | $ceroot=_config.workspace
19 | develop_evaluate=_config.develop_evaluate
20 | os.environ['ceroot'] = _config.workspace
21 | mode = os.environ.get('mode', 'evaluation')
22 | specific_tasks = os.environ.get('specific_tasks', None)
23 | specific_tasks = specific_tasks.split(',') if specific_tasks else []
24 | case_type = os.environ.get('case_type', None)
25 | tasks_env = os.environ.get('tasks_env', None)
26 |
27 |
28 | def parse_args():
29 | parser= argparse.ArgumentParser("Tool for running CE models")
30 | parser.add_argument(
31 | '--modified',
32 | action='store_true',
33 | help='if set, we will just run modified models.')
34 | args = parser.parse_args()
35 | return args
36 |
37 | def main():
38 | #try_start_mongod()
39 | args = parse_args()
40 | suc, exception_task = evaluate_tasks(args)
41 | if suc:
42 | display_success_info()
43 | if mode == "evaluation" and (not args.modified) and (not specific_tasks):
44 | update_baseline()
45 | exit 0
46 | else:
47 | if (not args.modified) and (not specific_tasks):
48 | display_fail_info(exception_task)
49 | sys.exit(-1)
50 |
51 |
52 | def update_baseline():
53 | ''' update the baseline in a git repo using current base. '''
54 | log.warn('updating baseline')
55 | commit = repo.get_commit(_config.paddle_path)
56 | with PathRecover():
57 | message = "evalute [%s]" % commit
58 | for task_name in get_tasks():
59 | task_dir = pjoin(_config.baseline_path, task_name)
60 | cd @(task_dir)
61 | print('task_dir', task_dir)
62 | if os.path.isdir('latest_kpis'):
63 | # update baseline if the latest kpi is better than history
64 | tracking_kpis = get_kpi_tasks(task_name)
65 |
66 | for kpi in tracking_kpis:
67 | # if the kpi is not actived, do not update baseline.
68 | if not kpi.actived: continue
69 | kpi.root = task_dir
70 | better_ratio = kpi.compare_with(kpi.cur_data, kpi.baseline_data)
71 | if better_ratio > _config.kpi_update_threshold:
72 | log.warn('current kpi %s better than history by %f, update baseline' % (kpi.out_file, better_ratio))
73 | cp @(kpi.out_file) @(kpi.his_file)
74 |
75 | if $(git diff):
76 | log.warn('update github baseline')
77 | '''
78 | due to the selected update controled by `_config.kpi_update_threshold`, if one task passed, there might be no baselines to update.
79 | '''
80 | git pull origin master
81 | git commit -a -m @(message)
82 | git push
83 | else:
84 | log.warn('no baseline need to update')
85 |
86 |
87 | def refresh_baseline_workspace():
88 | ''' download baseline. '''
89 | if mode != "baseline_test":
90 | # ssh from home is not very stable, can be solved by retry.
91 | max_retry = 10
92 | for cnt in range(max_retry):
93 | try:
94 | # production mode, clean baseline and rerun
95 | rm -rf @(_config.baseline_path)
96 | git clone @(_config.baseline_repo_url) @(_config.baseline_path)
97 | log.info("git clone %s suc" % _config.baseline_repo_url)
98 | break
99 | except Exception as e:
100 | if cnt == max_retry - 1:
101 | raise Exception("git clone failed %s " % e)
102 | else:
103 | log.warn('git clone failed %d, %s' % (cnt, e))
104 | time.sleep(3)
105 |
106 |
107 | def evaluate_tasks(args):
108 | '''
109 | Evaluate all the tasks. It will continue to run all the tasks even
110 | if any task is failed to get a summary.
111 | '''
112 | cd @(_config.workspace)
113 | print("_config.workspace", _config.workspace)
114 | paddle_commit = repo.get_commit(_config.paddle_path)
115 | commit_time = repo.get_commit_date(_config.paddle_path)
116 | log.warn('commit', paddle_commit)
117 | all_passed = True
118 | exception_task = {}
119 |
120 | # get tasks that need to evaluate
121 | if specific_tasks:
122 | tasks = specific_tasks
123 | log.warn('run specific tasks', tasks)
124 | elif args.modified:
125 | tasks = [v for v in get_changed_tasks()]
126 | log.warn('run changed tasks', tasks)
127 | else:
128 | tasks = [v for v in get_tasks()]
129 | log.warn('run all tasks', tasks)
130 |
131 | #get develop kpis of all tasks and write to develop_kpis
132 | if develop_evaluate == 'True':
133 | prepare_develop_kpis(tasks)
134 |
135 | for task in tasks:
136 | try:
137 | passed, eval_infos, kpis, kpi_values, kpi_types, detail_infos, develop_infos = evaluate(task)
138 | if mode != "baseline_test":
139 | log.warn('add evaluation %s result to mongodb' % task)
140 | kpi_objs = get_kpi_tasks(task)
141 | if (not args.modified) and (not specific_tasks):
142 | pst.add_evaluation_record(commitid = paddle_commit,
143 | date = commit_time,
144 | task = "%s_%s" % (tasks_env, task) if tasks_env else task,
145 | passed = passed,
146 | infos = eval_infos,
147 | kpis = kpis,
148 | kpi_values = kpi_values,
149 | kpi_types = kpi_types,
150 | kpi_objs = kpi_objs,
151 | detail_infos = detail_infos,
152 | develop_infos = develop_infos)
153 | if not passed:
154 | all_passed = False
155 | except Exception as e:
156 | exception_task[task] = traceback.format_exc()
157 | all_passed = False
158 |
159 | return all_passed, exception_task
160 |
161 |
162 | def prepare_develop_kpis(tasks):
163 | '''
164 | '''
165 | # get develop kpis from db
166 | develop_kpis = pst.get_kpis_from_db(tasks)
167 | # save kpi to file
168 | for task in tasks:
169 | try:
170 | if task not in develop_kpis:
171 | continue
172 | kpis = develop_kpis[task]
173 | kpis_keys = kpis['kpis-keys']
174 | kpis_values = json.loads(kpis['kpis-values'])
175 | assert len(kpis_keys)==len(kpis_values)
176 | for i in range(len(kpis_keys)):
177 | save_kpis(task, kpis_keys[i], kpis_values[i])
178 | except Exception as e:
179 | log.warn(e)
180 |
181 |
182 | def save_kpis(task_name, kpi_name, kpi_value):
183 | '''
184 | '''
185 | develop_dir = "develop_kpis"
186 | task_dir = pjoin(_config.baseline_path, task_name)
187 | with PathRecover():
188 | os.chdir(task_dir)
189 | if not os.path.exists(develop_dir):
190 | os.makedirs(develop_dir)
191 | os.chdir(develop_dir)
192 | file_name = kpi_name + "_factor.txt"
193 | with open(file_name, 'w') as fout:
194 | for item in kpi_value:
195 | fout.write(str(item) + '\n')
196 |
197 |
198 | def evaluate(task_name):
199 | '''
200 | task_name: str
201 | name of a task directory.
202 | returns:
203 | passed: bool
204 | whether this task passes the evaluation.
205 | eval_infos: list of str
206 | human-readable evaluations result for all the kpis of this task.
207 | kpis: dict of (kpi_name, list_of_float)
208 | '''
209 | task_dir = pjoin(_config.baseline_path, task_name)
210 | log.warn('evaluating model', task_name)
211 |
212 | with PathRecover():
213 | try:
214 | cd @(task_dir)
215 | ./run.xsh
216 | except Exception as e:
217 | print(e)
218 |
219 |
220 | tracking_kpis = get_kpi_tasks(task_name)
221 |
222 | # evaluate all the kpis
223 | eval_infos = []
224 | detail_infos = []
225 | develop_infos = []
226 | kpis = []
227 | kpi_values = []
228 | kpi_types = []
229 | passed = True
230 | for kpi in tracking_kpis:
231 | suc = kpi.evaluate(task_dir)
232 | if (not suc) and kpi.actived:
233 | ''' Only if the kpi is actived, its evaluation result would affect the overall tasks's result. '''
234 | passed = False
235 | log.error("Task [%s] failed!" % task_name)
236 | log.error("details:", kpi.fail_info)
237 | kpis.append(kpi.name)
238 | kpi_values.append(kpi.cur_data)
239 | kpi_types.append(kpi.__class__.__name__)
240 | # if failed, still continue to evaluate the other kpis to get full statistics.
241 | eval_infos.append(kpi.fail_info if not suc else kpi.success_info)
242 | detail_infos.append(kpi.detail_info)
243 | develop_infos.append(kpi.develop_info)
244 |
245 | if develop_evaluate == 'False':
246 | develop_infos = []
247 | log.info("evaluation kpi info: %s %s %s" % (passed, eval_infos, kpis))
248 | return passed, eval_infos, kpis, kpi_values, kpi_types, detail_infos, develop_infos
249 |
250 |
251 | def get_tasks():
252 | with PathRecover():
253 | cd @(_config.workspace)
254 | subdirs = $(ls @(_config.baseline_path)).split()
255 | if case_type:
256 | return filter(lambda x : x.startswith('%s_' % case_type), subdirs)
257 | else:
258 | return filter(lambda x : not (x.startswith('__') or x.startswith('model_')
259 | or x.endswith('.md') or x.startswith('models')), subdirs)
260 |
261 |
262 | def display_fail_info(exception_task):
263 | paddle_commit = repo.get_commit(_config.paddle_path)
264 | infos = pst.db.finds(_config.table_name, {'commitid': paddle_commit, 'type': 'kpi' })
265 | log.error('Evaluate [%s] failed!' % paddle_commit)
266 | log.warn('The details:')
267 | detail_info = ''
268 | for info in infos:
269 | if not info['passed']:
270 | log.warn('task:', info['task'])
271 | detail_info += info['task'] + ' '
272 | log.warn('passed: ', info['passed'])
273 | log.warn('infos', '\n'.join(info['infos']))
274 | log.warn('kpis keys', info['kpis-keys'])
275 | log.warn('kpis values', info['kpis-values'])
276 | if exception_task:
277 | for task, info in exception_task.items():
278 | detail_info += task + ' '
279 | log.error("%s %s" %(task, info))
280 | with open("fail_models", 'w') as f:
281 | f.write(detail_info)
282 |
283 |
284 | def display_success_info():
285 | paddle_commit = repo.get_commit(_config.paddle_path)
286 | log.warn('Evaluate [%s] successed!' % paddle_commit)
287 |
288 |
289 | def try_start_mongod():
290 | out = $(ps ax | grep mongod).strip().split('\n')
291 | print('out', out)
292 | if len(out) < 1: # there are no mongod service
293 | log.warn('starting mongodb')
294 | mkdir -p /chunwei/ce_mongo.db
295 | mongod --dbpath /chunwei/ce_mongo.db &
296 |
297 |
298 | def get_kpi_tasks(task_name):
299 | with PathRecover():
300 | cd @(_config.workspace)
301 | env = {}
302 | try:
303 | exec('from tasks.%s.continuous_evaluation import tracking_kpis'
304 | % task_name, env)
305 | log.info("import from continuous_evaluation suc.")
306 | except Exception as e:
307 | exec('from tasks.%s._ce import tracking_kpis'
308 | % task_name, env)
309 |
310 | tracking_kpis = env['tracking_kpis']
311 | print(tracking_kpis)
312 | return tracking_kpis
313 |
314 |
315 | def get_changed_tasks():
316 | tasks = []
317 | cd @(_config.baseline_path)
318 | out = $(git diff master | grep "diff --git")
319 | out = out.strip()
320 | for item in out.split('\n'):
321 | task = item.split()[3].split('/')[1]
322 | if task not in tasks:
323 | tasks.append(task)
324 | log.warn("changed tasks: %s" % tasks)
325 | return tasks
326 |
327 | main()
328 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | XONSH_SHOW_TRACEBACK = True
3 | XONSH_SHOW_TRACEBACK = True
4 |
5 | import sys; sys.path.insert(0, '')
6 | import _config
7 | from _config import pjoin
8 | from utils import PathRecover, log
9 | import persistence as pst
10 | import os
11 | import repo
12 | import argparse
13 | import traceback
14 | import time
15 | import json
16 | import shutil
17 | import subprocess
18 |
19 | ceroot = _config.workspace
20 | develop_evaluate = _config.develop_evaluate
21 | os.environ['ceroot'] = _config.workspace
22 | mode = os.environ.get('mode', 'evaluation')
23 | specific_tasks = os.environ.get('specific_tasks', None)
24 | specific_tasks = specific_tasks.split(',') if specific_tasks else []
25 | case_type = os.environ.get('case_type', None)
26 | tasks_env = os.environ.get('tasks_env', None)
27 |
28 |
29 | def parse_args():
30 | parser = argparse.ArgumentParser("Tool for running CE models")
31 | parser.add_argument(
32 | '--modified',
33 | action='store_true',
34 | help='if set, we will just run modified models.')
35 | args = parser.parse_args()
36 | return args
37 |
38 | def main():
39 | #try_start_mongod()
40 | args = parse_args()
41 |
42 | suc, exception_task = evaluate_tasks(args)
43 | if suc:
44 | display_success_info()
45 | sys.exit(0)
46 | else:
47 | if (not args.modified) and (not specific_tasks):
48 | display_fail_info(exception_task)
49 | sys.exit(-1)
50 |
51 |
52 | def update_baseline():
53 | ''' update the baseline in a git repo using current base. '''
54 | log.warn('updating baseline')
55 | commit = repo.get_commit(_config.paddle_path)
56 | with PathRecover():
57 | message = "evalute [%s]" % commit
58 | for task_name in get_tasks():
59 | task_dir = pjoin(_config.baseline_path, task_name)
60 | os.chdir(task_dir)
61 | log.info('task_dir', task_dir)
62 | if os.path.isdir('latest_kpis'):
63 | # update baseline if the latest kpi is better than history
64 | tracking_kpis = get_kpi_tasks(task_name)
65 |
66 | for kpi in tracking_kpis:
67 | # if the kpi is not actived, do not update baseline.
68 | if not kpi.actived: continue
69 | kpi.root = task_dir
70 | better_ratio = kpi.compare_with(kpi.cur_data, kpi.baseline_data)
71 | if better_ratio > _config.kpi_update_threshold:
72 | log.warn('current kpi %s better than history by %f, update baseline' % (kpi.out_file, better_ratio))
73 | shutil.copyfile(kpi.out_file, kpi.his_file)
74 |
75 |
76 | os.chdir(_config.baseline_path)
77 | os.system('pwd')
78 | os.system('git diff')
79 | cmd = "git diff"
80 | status = subprocess.check_output(cmd, shell=True)
81 | if status:
82 | log.warn('update github baseline')
83 | '''
84 | due to the selected update controled by `_config.kpi_update_threshold`, if one task passed, there might be no baselines to update.
85 | '''
86 | cmd = 'git pull origin master; git commit -a -m "%s"; git push' % message
87 | os.system(cmd)
88 | else:
89 | log.warn('no baseline need to update')
90 |
91 |
92 | def refresh_baseline_workspace():
93 | ''' download baseline. '''
94 | if mode != "baseline_test":
95 | # ssh from home is not very stable, can be solved by retry.
96 | max_retry = 10
97 | for cnt in range(max_retry):
98 | try:
99 | if os.path.exists(_config.baseline_path):
100 | shutil.rmtree(_config.baseline_path)
101 | cmd = "git clone %s %s" % (_config.baseline_repo_url, _config.baseline_path)
102 | log.info("cmd", cmd)
103 | tp = os.system(cmd)
104 | log.info("gitclone", tp)
105 | log.info("git clone %s suc" % _config.baseline_repo_url)
106 | break
107 | except Exception as e:
108 | if cnt == max_retry - 1:
109 | raise Exception("git clone failed %s " % e)
110 | else:
111 | log.warn('git clone failed %d, %s' % (cnt, e))
112 | time.sleep(3)
113 |
114 |
115 | def evaluate_tasks(args):
116 | '''
117 | Evaluate all the tasks. It will continue to run all the tasks even
118 | if any task is failed to get a summary.
119 | '''
120 | os.chdir(_config.workspace)
121 | paddle_commit = repo.get_commit(_config.paddle_path)
122 | commit_time = repo.get_commit_date(_config.paddle_path)
123 | log.warn('commit', paddle_commit)
124 | all_passed = True
125 | exception_task = {}
126 |
127 | if specific_tasks:
128 | tasks = specific_tasks
129 | log.warn('run specific tasks', tasks)
130 | elif args.modified:
131 | tasks = [v for v in get_changed_tasks()]
132 | log.warn('run changed tasks', tasks)
133 | else:
134 | tasks = [v for v in get_tasks()]
135 | log.warn('run all tasks', tasks)
136 |
137 | log.info("tasks", tasks)
138 | #get develop kpis of all tasks and write to develop_kpis
139 |
140 | if develop_evaluate == 'True':
141 | prepare_develop_kpis(tasks)
142 |
143 | for task in tasks:
144 | try:
145 | log.info("befor run task")
146 | passed, eval_infos, kpis, kpi_values, kpi_types, detail_infos, develop_infos = evaluate(task)
147 | log.info("after run task", passed)
148 |
149 | if mode != "baseline_test":
150 | log.warn('add evaluation %s result to mongodb' % task)
151 | kpi_objs = get_kpi_tasks(task)
152 | if (not args.modified) and (not specific_tasks):
153 | log.info("befor update record")
154 | pst.add_evaluation_record(commitid = paddle_commit,
155 | date = commit_time,
156 | task = "%s_%s" % (tasks_env, task) if tasks_env else task,
157 | passed = passed,
158 | infos = eval_infos,
159 | kpis = kpis,
160 | kpi_values = kpi_values,
161 | kpi_types = kpi_types,
162 | kpi_objs = kpi_objs,
163 | detail_infos = detail_infos,
164 | develop_infos = develop_infos)
165 | log.info("after update record")
166 | log.warn('after add evaluation %s result to mongodb' % task)
167 | if not passed:
168 | all_passed = False
169 | except Exception as e:
170 | exception_task[task] = traceback.format_exc()
171 | all_passed = False
172 |
173 | return all_passed, exception_task
174 |
175 |
176 | def prepare_develop_kpis(tasks):
177 | '''
178 | '''
179 | # get develop kpis from db
180 | develop_kpis = pst.get_kpis_from_db(tasks)
181 |
182 | # save kpi to file
183 | for task in tasks:
184 | try:
185 | if task not in develop_kpis:
186 | continue
187 | kpis = develop_kpis[task]
188 | kpis_keys = kpis['kpis-keys']
189 | kpis_values = json.loads(kpis['kpis-values'])
190 | assert len(kpis_keys)==len(kpis_values)
191 | for i in range(len(kpis_keys)):
192 | save_kpis(task, kpis_keys[i], kpis_values[i])
193 | except Exception as e:
194 | log.warn(e)
195 |
196 |
197 | def save_kpis(task_name, kpi_name, kpi_value):
198 | '''
199 | save kpis
200 | '''
201 | develop_dir = "develop_kpis"
202 | task_dir = pjoin(_config.baseline_path, task_name)
203 | with PathRecover():
204 | os.chdir(task_dir)
205 | if not os.path.exists(develop_dir):
206 | os.makedirs(develop_dir)
207 | os.chdir(develop_dir)
208 | file_name = kpi_name + "_factor.txt"
209 | with open(file_name, 'w') as fout:
210 | for item in kpi_value:
211 | fout.write(str(item) + '\n')
212 |
213 |
214 | def evaluate(task_name):
215 | '''
216 | task_name: str
217 | name of a task directory.
218 | returns:
219 | passed: bool
220 | whether this task passes the evaluation.
221 | eval_infos: list of str
222 | human-readable evaluations result for all the kpis of this task.
223 | kpis: dict of (kpi_name, list_of_float)
224 | '''
225 | task_dir = pjoin(_config.baseline_path, task_name)
226 | log.warn('evaluating model', task_name)
227 |
228 | with PathRecover():
229 | try:
230 | os.chdir(task_dir)
231 | log.info("befor ./run.xsh")
232 | cmd = "./run.xsh" if _config.system == "linux" else "run.bat"
233 | os.system(cmd)
234 | log.info("after ./run.xsh")
235 | except Exception as e:
236 | print(e)
237 |
238 | log.info("befor get_kpi_tasks")
239 | tracking_kpis = get_kpi_tasks(task_name)
240 | log.info("after get_kpi_tasks")
241 |
242 | # evaluate all the kpis
243 | eval_infos = []
244 | detail_infos = []
245 | develop_infos = []
246 | kpis = []
247 | kpi_values = []
248 | kpi_types = []
249 | passed = True
250 | log.info("befor check kpi")
251 | for kpi in tracking_kpis:
252 | suc = kpi.evaluate(task_dir)
253 | if (not suc) and kpi.actived:
254 | ''' Only if the kpi is actived, its evaluation result would affect the overall tasks's result. '''
255 | passed = False
256 | log.info("Task [%s] failed!" % task_name)
257 | log.info("details:", kpi.fail_info)
258 | kpis.append(kpi.name)
259 | kpi_values.append(kpi.cur_data)
260 | kpi_types.append(kpi.__class__.__name__)
261 | # if failed, still continue to evaluate the other kpis to get full statistics.
262 | eval_infos.append(kpi.fail_info if not suc else kpi.success_info)
263 | detail_infos.append(kpi.detail_info)
264 | develop_infos.append(kpi.develop_info)
265 | log.info("after check kpi")
266 | log.info("evaluation kpi info: %s %s %s" % (passed, eval_infos, kpis))
267 | if develop_evaluate == 'False':
268 | develop_infos = []
269 | return passed, eval_infos, kpis, kpi_values, kpi_types, detail_infos, develop_infos
270 |
271 |
272 | def get_tasks():
273 | with PathRecover():
274 | os.chdir(_config.workspace)
275 | subdirs = os.listdir(_config.baseline_path)
276 | subdirs = filter(lambda x : os.path.isdir(_config.baseline_path + "/" + x), subdirs)
277 |
278 | log.info("subdirs", subdirs)
279 | if case_type:
280 | return filter(lambda x : x.startswith('%s_' % case_type), subdirs)
281 | else:
282 | return filter(lambda x : not (x.startswith('__') or x.startswith('model_')
283 | or x.endswith('.md') or x.startswith('.') or x.startswith('models')), subdirs)
284 |
285 |
286 | def display_fail_info(exception_task):
287 | paddle_commit = repo.get_commit(_config.paddle_path)
288 | infos = pst.db.finds(_config.table_name, {'commitid': paddle_commit, 'type': 'kpi' })
289 | log.info('Evaluate [%s] failed!' % paddle_commit)
290 | log.warn('The details:')
291 | detail_info = ''
292 | for info in infos:
293 | if not info['passed']:
294 | log.warn('task:', info['task'])
295 | detail_info += info['task'] + ' '
296 | log.warn('passed: ', info['passed'])
297 | log.warn('infos', '\n'.join(info['infos']))
298 | log.warn('kpis keys', info['kpis-keys'])
299 | log.warn('kpis values', info['kpis-values'])
300 | if exception_task:
301 | for task, info in exception_task.items():
302 | detail_info += task + ' '
303 | log.info("%s %s" %(task, info))
304 | with open("fail_models", 'w') as f:
305 | f.write(detail_info)
306 |
307 |
308 | def display_success_info():
309 | paddle_commit = repo.get_commit(_config.paddle_path)
310 | log.warn('Evaluate [%s] successed!' % paddle_commit)
311 |
312 |
313 | def try_start_mongod():
314 | cmd = "ps ax | grep mongod"
315 | output = subprocess.check_output(cmd, shell=True)
316 | output = output.decode()
317 | out = output.strip().split('\n')
318 | log.info('out', out)
319 | if len(out) < 1: # there are no mongod service
320 | log.warn('starting mongodb')
321 | os.makedirs(r"/chunwei/ce_mongo.db")
322 | cmd = "mongod --dbpath /chunwei/ce_mongo.db &"
323 | os.system(cmd)
324 |
325 |
326 | def get_kpi_tasks(task_name):
327 | with PathRecover():
328 | os.chdir(_config.workspace)
329 | env = {}
330 | try:
331 | log.info("task_name", task_name)
332 | log.info('from tasks.%s.continuous_evaluation import tracking_kpis'
333 | % task_name, env)
334 | exec('from tasks.%s.continuous_evaluation import tracking_kpis'
335 | % task_name, env)
336 | log.info("import from continuous_evaluation suc.")
337 | except Exception as e:
338 | exec('from tasks.%s._ce import tracking_kpis'
339 | % task_name, env)
340 |
341 | tracking_kpis = env['tracking_kpis']
342 | log.info("tracking_kpis", tracking_kpis)
343 | return tracking_kpis
344 |
345 |
346 | def get_changed_tasks():
347 | tasks = []
348 | os.chdir(_config.baseline_path)
349 | cmd = 'git diff master | grep "diff --git"'
350 | if os.system(cmd):
351 | return tasks
352 | out = subprocess.check_output(cmd, shell=True)
353 | out = out.decode()
354 | out = out.strip()
355 | for item in out.split('\n'):
356 | try:
357 | task = item.split()[3].split('/')[1]
358 | if task not in tasks:
359 | tasks.append(task)
360 | except:
361 | continue
362 | log.warn("changed tasks: %s" % tasks)
363 | return tasks
364 |
365 | main()
366 |
--------------------------------------------------------------------------------
/web/view.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append('pypage')
3 | sys.path.append('..')
4 | import _config
5 | import json
6 | from db import MongoDB
7 | from api import *
8 | from pypage import *
9 | from pypage import layout as lyt
10 | from datetime import datetime, timedelta
11 | from kpi import Kpi
12 | from persistence import db
13 |
14 | status_page = Page(
15 | "Evaluation Status", filename="pypage-status.html").enable_bootstrap()
16 | # defail for a commit
17 | commit_detail_page = Page(
18 | "Evaluation Details", filename="pypage-detail.html").enable_bootstrap()
19 | compare_page = Page(
20 | "Continuous Evaluation", filename="pypage-search.html").enable_bootstrap()
21 |
22 | dist_page = Page(
23 | "Distributation", filename="pypage-search.html").enable_bootstrap().enable_echarts()
24 |
25 | def build_index_page():
26 | page = Page('Continous Evaluation', debug=True).enable_bootstrap()
27 |
28 | commit_snip = CommitStatusSnip()
29 | commit_detail_snip = CommitDetailSnip()
30 |
31 | with page.body:
32 | # add navbar
33 | NavSnip().html
34 |
35 | main_container = create_middle_align_box()
36 | with main_container:
37 | with lyt.row():
38 | with lyt.col(size=4):
39 | Tag('h3', 'Evaluated Commits')
40 | Tag('p', 'green means successful, grey means fail.')
41 | commit_snip.html
42 | with lyt.col():
43 | Tag('h3', 'Latest Evaluation')
44 | commit_detail_snip.html
45 |
46 | return page.compile_str(), (commit_snip, commit_detail_snip)
47 |
48 |
49 | def build_commit_detail_page():
50 | page = Page('Commit Evaluation Details').enable_bootstrap()
51 |
52 | commit_detail_snip = CommitDetailSnip()
53 |
54 | with page.body:
55 | NavSnip().html
56 |
57 | main_container = create_middle_align_box()
58 | with main_container:
59 | Tag('h2', 'Commit details')
60 | commit_detail_snip.html
61 |
62 | return page.compile_str(), (commit_detail_snip, )
63 |
64 |
65 | def build_compare_page():
66 | page = Page('Commit Compare').enable_bootstrap()
67 |
68 | commit_compare_select_snip = CommitCompareSelectSnip()
69 | commit_compare_result_snip = CommitCompareResultSnip()
70 |
71 | with page.body:
72 | NavSnip().html
73 | main_container = create_middle_align_box()
74 | with main_container:
75 | with lyt.row():
76 | with lyt.fluid_container():
77 | commit_compare_select_snip.html
78 | with lyt.row():
79 | with lyt.fluid_container():
80 | commit_compare_result_snip.html
81 |
82 | return page.compile_str(), (
83 | commit_compare_select_snip,
84 | commit_compare_result_snip, )
85 |
86 | def build_scalar_page(task_name):
87 | page = Page('KPI Distribution').enable_bootstrap().enable_echarts()
88 |
89 | scalar_snip = ScalarSnip(80, task_name)
90 |
91 | with page.body:
92 | NavSnip().html
93 | main_container = create_middle_align_box()
94 | with main_container:
95 | scalar_snip.html
96 |
97 | return page.compile_str(), (
98 | scalar_snip,
99 | )
100 |
101 | def create_middle_align_box():
102 | with lyt.fluid_container():
103 | with lyt.row():
104 | lyt.col()
105 | with lyt.col(size=10):
106 | res = lyt.fluid_container()
107 | lyt.col()
108 | return res
109 |
110 |
111 | class NavSnip(Snippet):
112 | ''' Navegation '''
113 |
114 | @property
115 | def html(self):
116 | navbar(
117 | 'CE',
118 | links=[
119 | '/',
120 | '/commit/compare',
121 | ],
122 | link_txts=['index', 'compare'],
123 | theme='dark',
124 | color='dark')
125 |
126 | def logic(self):
127 | return {}
128 |
129 |
130 | class CommitDetailSnip(Snippet):
131 | ''' Display commit details. '''
132 |
133 | @property
134 | def html(self):
135 | '''
136 | variables:
137 | - version
138 | '''
139 | with Tag('p').as_row():
140 | with IF('version.passed') as f:
141 | badge(VAL('version.commit')).set_success()
142 | f.add(STMT('else'), -1)
143 | badge(VAL('version.commit')).set_danger()
144 | RawHtml('
')
145 |
146 | with lyt.fluid_container():
147 | Tag('h2', 'Tasks').as_row()
148 | with FOR('name,task in version.kpis.items()'):
149 | Tag('h4', VAL('name')).as_row()
150 | Tag('span', 'show scalars' % VAL('name')).as_row()
151 | with lyt.row():
152 | with table().set_striped():
153 | RawHtml('')
154 | RawHtml(
155 | '| KPI | KPI values | error | ')
156 | RawHtml('
')
157 |
158 | with FOR('kpiname, kpi in task.kpis.items()'):
159 | with table.row():
160 | table.col(VAL('kpiname'))
161 | with table.col():
162 | RawHtml(
163 | '{{ kpi[2] }}{{kpi[5]}}
')
164 | #with table.col():
165 | # RawHtml(
166 | # '{{ kpi[6] }}
')
167 | with table.col():
168 | # alert kpi which is activated.
169 | with IF('kpi[3] != "pass" and kpi[4]'):
170 | alert(c=VAL('kpi[3]')).set_danger()
171 |
172 | def logic(self, commitid):
173 | task_kpis = CommitRecord.get_tasks(commitid)
174 | res = objdict(version=dict(
175 | commit=commitid,
176 | passed=tasks_success(task_kpis),
177 | kpis=task_kpis, ))
178 | return res
179 |
180 |
181 | class CommitCompareSelectSnip(Snippet):
182 | ''' Comparasion select form. '''
183 |
184 | @property
185 | def html(self):
186 | Tag('h2', 'Compare').as_row()
187 | with Tag('form',
188 | class_='container-fluid',
189 | method='GET',
190 | action='/commit/compare').as_row():
191 | with Tag('select', name='cur', class_='form-control').as_col(5):
192 | with FOR('rcd in %s' % self.KEY('records')):
193 | Tag('option',
194 | c=VAL('rcd.shortcommit') + " " + VAL('rcd.date'),
195 | value=VAL('rcd.commit'),
196 | style="color:green")
197 |
198 | with Tag('select', name='base', class_='form-control').as_col(5):
199 | with FOR('rcd in %s' % self.KEY('records')):
200 | Tag('option',
201 | c=VAL('rcd.shortcommit') + " " + VAL('rcd.date'),
202 | value=VAL('rcd.commit'))
203 |
204 | with lyt.col():
205 | Tag('button',
206 | class_='btn btn-primary',
207 | c='Submit',
208 | type='submit').as_col()
209 | RawHtml('
')
210 |
211 | def logic(self):
212 | records_ = CommitRecord.get_all()
213 | return {self.KEY('records'): records_}
214 |
215 |
216 | class CommitStatusSnip(Snippet):
217 | ''' A list of commits. '''
218 |
219 | @property
220 | def html(self):
221 | ''' Commit list with links to details. '''
222 |
223 | with Tag('ul', class_='list-group'):
224 | href_val = '/commit/details?commit=%s' % VAL('commit.commit')
225 | with FOR('commit in %s' % self.KEY('commits')):
226 | with IF('commit.passed') as f:
227 | with Tag(
228 | 'a',
229 | class_='list-group-item list-group-item-action list-group-item-success',
230 | href=href_val):
231 | Tag('b', VAL('commit.shortcommit'))
232 | Tag('span', VAL('commit.date'))
233 |
234 | f.add(STMT('else'), -1)
235 |
236 | with Tag(
237 | 'a',
238 | class_='list-group-item list-group-item-action list-group-item-secondary',
239 | href=href_val):
240 | Tag('b', VAL('commit.shortcommit'))
241 | Tag('span', VAL('commit.date'))
242 |
243 | def logic(self):
244 | commits = CommitRecord.get_all()
245 | return {self.KEY('commits'): [v for v in reversed(commits)], }
246 |
247 |
248 | class CommitCompareResultSnip(Snippet):
249 | ''' Comparasion result. '''
250 |
251 | @property
252 | def html(self):
253 | with lyt.row():
254 | with Tag('p'):
255 | Tag('span', 'Comparation between')
256 | Tag('b', self.VAL('cur_commit'))
257 | Tag('span', 'and history')
258 | Tag('b', self.VAL('base_commit'))
259 | RawHtml('
')
260 |
261 | with lyt.row():
262 | Tag('h2', c='Tasks KPI diff')
263 | with lyt.row():
264 | with lyt.fluid_container():
265 | with FOR('task in %s' % self.KEY('tasks')):
266 | with lyt.row():
267 | Tag('h3', VAL('task.name'))
268 | with table().set_striped():
269 | RawHtml('')
270 | RawHtml(
271 | '| KPI %s | improvement proportion(red better) | '
272 | % (VAL('task.kpis[5]')))
273 | RawHtml('
')
274 |
275 | with FOR('kpi in task.kpis'):
276 | with table.row():
277 | table.col(VAL('kpi.name'))
278 | with IF('kpi.ratio > 0.01 or kpi.ratio < -0.01'
279 | ) as f1:
280 | with IF('kpi.ratio > 0') as f2:
281 | table.col(VAL(
282 | "'%.2f' % kpi.ratio | float") +
283 | '%',
284 | style='color: red;')
285 | f2.add(STMT('else'), -1)
286 | table.col(VAL(
287 | "'%.2f' % kpi.ratio | float") +
288 | '%',
289 | style='color: green;')
290 | f1.add(STMT('else'), -1)
291 | table.col(
292 | VAL("'%.2f' % kpi.ratio | float") +
293 | '%')
294 |
295 | def logic(self, cur_commit, base_commit):
296 | print('cur', cur_commit)
297 | print('base', base_commit)
298 |
299 | cur_rcds = CommitRecord.get_tasks(cur_commit)
300 | base_rcds = CommitRecord.get_tasks(base_commit)
301 | res = []
302 | for name in cur_rcds.keys():
303 | cur_task = cur_rcds.get(name, None)
304 |
305 | base_task = base_rcds.get(name, None)
306 | # if eithor do not have some task, skip it.
307 | if not (cur_task and base_task): continue
308 |
309 | record = objdict()
310 | res.append(record)
311 | record.name = name
312 | record.kpis = []
313 | for kpi in cur_task.kpis.keys():
314 | cur_kpi = cur_task.kpis.get(kpi, None)
315 | base_kpi = base_task.kpis.get(kpi, None)
316 | if not (cur_kpi and base_kpi): continue
317 | kpi_ = objdict()
318 | kpi_type = Kpi.dic.get(cur_kpi[1])
319 |
320 | kpi_.name = kpi
321 | kpi_.ratio = kpi_type.compare_with(
322 | cur_kpi[0], base_kpi[0]) * 100. # get a percentage
323 | record.kpis.append(kpi_)
324 | return {
325 | self.KEY('tasks'): res,
326 | self.KEY('cur_commit'): cur_commit[:7],
327 | self.KEY('base_commit'): base_commit[:7],
328 | }
329 |
330 |
331 | class ScalarSnip(Snippet):
332 | '''
333 | Scalars for the latest N records for all the kpis
334 |
335 | One page for each task.
336 | '''
337 | def __init__(self, N, task_name):
338 | super().__init__()
339 | self.N = N
340 | self.task_name = task_name
341 |
342 | @property
343 | def html(self):
344 | with lyt.row():
345 | Tag('h3', self.VAL('task_name'))
346 | RawHtml('
')
347 |
348 | with lyt.fluid_container():
349 | with FOR('kpi, dist in %s' % self.KEY('kpis')):
350 | with lyt.row():
351 | RawHtml("{{ dist |safe }}")
352 |
353 | def logic(self):
354 | # should be sorted by freshness
355 | commits = CommitRecord.get_all()
356 | kpis = {}
357 | last_N_commit = commits[-self.N:-1] + [commits[-1]]
358 | for commit in last_N_commit:
359 | rcd = CommitRecord.get_tasks(commit.commit)
360 | if self.task_name not in rcd: continue
361 | for (kpi,val) in rcd[self.task_name].kpis.items():
362 | kpis.setdefault(kpi+'--x', []).append(commit.shortcommit)
363 | kpis.setdefault(kpi, []).append(float(val[2]))
364 | res = []
365 | for (kpi, vals) in kpis.items():
366 | print(kpi, vals)
367 | if not kpi.endswith('--x'):
368 | dist, js_deps = scalar(kpi, kpis[kpi+'--x'], kpis[kpi])
369 | res.append((kpi, dist,))
370 |
371 | return {self.KEY('kpis'): res, 'script_list': js_deps}
372 |
373 |
374 | def passed_commits():
375 | pass
376 |
377 |
378 | class objdict(dict):
379 | def __setattr__(self, key, value):
380 | self[key] = value
381 |
382 | def __getattr__(self, item):
383 | try:
384 | return self[item]
385 | except:
386 | print('valid keys:', [k for k in self.keys()])
387 | exit(1)
388 |
389 |
390 |
391 | def tasks_success(tasks):
392 | for task in tasks.values():
393 | if not task['passed']: return False
394 | return True
395 |
--------------------------------------------------------------------------------
/continuous_evaluation_py23/web/view.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append('pypage')
3 | sys.path.append('..')
4 | import _config
5 | import json
6 | from db import MongoDB
7 | from api import *
8 | from pypage import *
9 | from pypage import layout as lyt
10 | from datetime import datetime, timedelta
11 | from kpi import Kpi
12 | from persistence import db
13 |
14 | status_page = Page(
15 | "Evaluation Status", filename="pypage-status.html").enable_bootstrap()
16 | # defail for a commit
17 | commit_detail_page = Page(
18 | "Evaluation Details", filename="pypage-detail.html").enable_bootstrap()
19 | compare_page = Page(
20 | "Continuous Evaluation", filename="pypage-search.html").enable_bootstrap()
21 |
22 | dist_page = Page(
23 | "Distributation", filename="pypage-search.html").enable_bootstrap().enable_echarts()
24 |
25 |
26 | def build_index_page():
27 | page = Page('Continous Evaluation', debug=True).enable_bootstrap()
28 | with page.body:
29 | # add navbar
30 | NavSnip().html
31 | tables_snip = TablesSnip()
32 | main_container = create_middle_align_box()
33 | with main_container:
34 | with lyt.row():
35 | with lyt.col(size=4):
36 | Tag('h3', '各任务运行数据存放表')
37 | Tag('p', '若无数据,点击会报错.')
38 | tables_snip.html
39 | return page.compile_str(), (tables_snip, )
40 |
41 |
42 |
43 |
44 | def build_main_page(table_name):
45 | page = Page('Continous Evaluation', debug=True).enable_bootstrap()
46 |
47 | commit_snip = CommitStatusSnip(table_name)
48 | commit_detail_snip = CommitDetailSnip(table_name)
49 |
50 | with page.body:
51 | # add navbar
52 |
53 | main_container = create_middle_align_box()
54 | with main_container:
55 | with lyt.row():
56 | with lyt.col(size=4):
57 | Tag('h3', '例次运行状态')
58 | Tag('p', 'green means successful, grey means fail.')
59 | commit_snip.html
60 | with lyt.col():
61 | Tag('h3', 'Latest Evaluation')
62 | commit_detail_snip.html
63 |
64 | return page.compile_str(), (commit_snip, commit_detail_snip)
65 |
66 |
67 | def build_commit_detail_page(table_name):
68 | page = Page('Commit Evaluation Details').enable_bootstrap()
69 |
70 | commit_detail_snip = CommitDetailSnip(table_name)
71 |
72 | with page.body:
73 | NavSnip().html
74 |
75 | main_container = create_middle_align_box()
76 | with main_container:
77 | Tag('h2', 'Commit details')
78 | commit_detail_snip.html
79 |
80 | return page.compile_str(), (commit_detail_snip, )
81 |
82 |
83 | def build_compare_page():
84 | page = Page('Commit Compare').enable_bootstrap()
85 |
86 | commit_compare_select_snip = CommitCompareSelectSnip()
87 | commit_compare_result_snip = CommitCompareResultSnip()
88 |
89 | with page.body:
90 | NavSnip().html
91 | main_container = create_middle_align_box()
92 | with main_container:
93 | with lyt.row():
94 | with lyt.fluid_container():
95 | commit_compare_select_snip.html
96 | with lyt.row():
97 | with lyt.fluid_container():
98 | commit_compare_result_snip.html
99 |
100 | return page.compile_str(), (
101 | commit_compare_select_snip,
102 | commit_compare_result_snip, )
103 |
104 | def build_scalar_page(task_name):
105 | page = Page('KPI Distribution').enable_bootstrap().enable_echarts()
106 |
107 | scalar_snip = ScalarSnip(80, task_name)
108 |
109 | with page.body:
110 | NavSnip().html
111 | main_container = create_middle_align_box()
112 | with main_container:
113 | scalar_snip.html
114 |
115 | return page.compile_str(), (
116 | scalar_snip,
117 | )
118 |
119 | def create_middle_align_box():
120 | with lyt.fluid_container():
121 | with lyt.row():
122 | lyt.col()
123 | with lyt.col(size=10):
124 | res = lyt.fluid_container()
125 | lyt.col()
126 | return res
127 |
128 |
129 | class NavSnip(Snippet):
130 | ''' Navegation '''
131 |
132 | @property
133 | def html(self):
134 | navbar(
135 | 'CE',
136 | links=[
137 | '/',
138 | ],
139 | link_txts=['index'],
140 | theme='dark',
141 | color='dark')
142 |
143 | def logic(self):
144 | return {}
145 |
146 |
147 | class CommitDetailSnip(Snippet):
148 | ''' Display commit details. '''
149 |
150 | def __init__(self, table_name):
151 | super().__init__()
152 | self.table_name = table_name
153 |
154 | @property
155 | def html(self):
156 | '''
157 | variables:
158 | - version
159 | '''
160 | with Tag('p').as_row():
161 | with IF('version.passed') as f:
162 | badge(VAL('version.commit')).set_success()
163 | f.add(STMT('else'), -1)
164 | badge(VAL('version.commit')).set_danger()
165 | RawHtml('
')
166 |
167 | with lyt.fluid_container():
168 | Tag('h2', '模型').as_row()
169 | with FOR('name,task in version.kpis.items()'):
170 | Tag('h4', VAL('name')).as_row()
171 | Tag('span', 'show scalars' % (self.table_name, VAL('name')))
172 | with lyt.row():
173 | with table().set_striped():
174 | RawHtml('')
175 | RawHtml(
176 | '| KPI | KPI values | error | ')
177 | RawHtml('
')
178 |
179 | with FOR('kpiname, kpi in task.kpis.items()'):
180 | with table.row():
181 | table.col(VAL('kpiname'))
182 | with table.col():
183 | RawHtml(
184 | '{{ kpi[2] }}{{kpi[5]}}
')
185 | #with table.col():
186 | # RawHtml(
187 | # '{{ kpi[6] }}
')
188 | with table.col():
189 | # alert kpi which is activated.
190 | with IF('kpi[3] != "pass" and kpi[4]'):
191 | alert(c=VAL('kpi[3]')).set_danger()
192 |
193 | def logic(self, table_name, commitid):
194 | task_kpis = CommitRecord.get_tasks(table_name, commitid)
195 | res = objdict(version=dict(
196 | commit=commitid,
197 | passed=tasks_success(task_kpis),
198 | kpis=task_kpis, ))
199 | return res
200 |
201 |
202 | class TablesSnip(Snippet):
203 | '''
204 | TablesSnip
205 | '''
206 | @property
207 | def html(self):
208 | with Tag('ul', class_='list-group'):
209 | href_val = '/main?table=%s' % VAL('table')
210 | with FOR('table in %s' % self.KEY('tables')):
211 | with Tag(
212 | 'a',
213 | class_='list-group-item list-group-item-action list-group-item-success',
214 | href=href_val):
215 | Tag('b', VAL('table'))
216 |
217 |
218 | def logic(self):
219 | tables = CommitRecord.get_all_tables()
220 | return {self.KEY('tables'): [v for v in reversed(tables)], }
221 |
222 |
223 |
224 | class CommitCompareSelectSnip(Snippet):
225 | ''' Comparasion select form. '''
226 |
227 | @property
228 | def html(self):
229 | Tag('h2', 'Compare').as_row()
230 | with Tag('form',
231 | class_='container-fluid',
232 | method='GET',
233 | action='/commit/compare').as_row():
234 | with Tag('select', name='cur', class_='form-control').as_col(5):
235 | with FOR('rcd in %s' % self.KEY('records')):
236 | Tag('option',
237 | c=VAL('rcd.shortcommit') + " " + VAL('rcd.date'),
238 | value=VAL('rcd.commit'),
239 | style="color:green")
240 |
241 | with Tag('select', name='base', class_='form-control').as_col(5):
242 | with FOR('rcd in %s' % self.KEY('records')):
243 | Tag('option',
244 | c=VAL('rcd.shortcommit') + " " + VAL('rcd.date'),
245 | value=VAL('rcd.commit'))
246 |
247 | with lyt.col():
248 | Tag('button',
249 | class_='btn btn-primary',
250 | c='Submit',
251 | type='submit').as_col()
252 | RawHtml('
')
253 |
254 | def logic(self):
255 | records_ = CommitRecord.get_all()
256 | return {self.KEY('records'): records_}
257 |
258 |
259 | class CommitStatusSnip(Snippet):
260 | ''' A list of commits. '''
261 |
262 | def __init__(self, table_name):
263 | super().__init__()
264 | self.table_name = table_name
265 |
266 | @property
267 | def html(self):
268 | ''' Commit list with links to details. '''
269 |
270 | with Tag('ul', class_='list-group'):
271 | href_val = '/commit/details?table=%s&commit=%s' % (self.table_name, VAL('commit.commit'))
272 | with FOR('commit in %s' % self.KEY('commits')):
273 | with IF('commit.passed') as f:
274 | with Tag(
275 | 'a',
276 | class_='list-group-item list-group-item-action list-group-item-success',
277 | href=href_val):
278 | Tag('b', VAL('commit.shortcommit'))
279 | Tag('span', VAL('commit.date'))
280 |
281 | f.add(STMT('else'), -1)
282 |
283 | with Tag(
284 | 'a',
285 | class_='list-group-item list-group-item-action list-group-item-secondary',
286 | href=href_val):
287 | Tag('b', VAL('commit.shortcommit'))
288 | Tag('span', VAL('commit.date'))
289 |
290 | def logic(self, table_name):
291 | commits = CommitRecord.get_all(table_name)
292 | return {self.KEY('commits'): [v for v in reversed(commits)], }
293 |
294 |
295 | class CommitCompareResultSnip(Snippet):
296 | ''' Comparasion result. '''
297 |
298 | @property
299 | def html(self):
300 | with lyt.row():
301 | with Tag('p'):
302 | Tag('span', 'Comparation between')
303 | Tag('b', self.VAL('cur_commit'))
304 | Tag('span', 'and history')
305 | Tag('b', self.VAL('base_commit'))
306 | RawHtml('
')
307 |
308 | with lyt.row():
309 | Tag('h2', c='Tasks KPI diff')
310 | with lyt.row():
311 | with lyt.fluid_container():
312 | with FOR('task in %s' % self.KEY('tasks')):
313 | with lyt.row():
314 | Tag('h3', VAL('task.name'))
315 | with table().set_striped():
316 | RawHtml('')
317 | RawHtml(
318 | '| KPI %s | improvement proportion(red better) | '
319 | % (VAL('task.kpis[5]')))
320 | RawHtml('
')
321 |
322 | with FOR('kpi in task.kpis'):
323 | with table.row():
324 | table.col(VAL('kpi.name'))
325 | with IF('kpi.ratio > 0.01 or kpi.ratio < -0.01'
326 | ) as f1:
327 | with IF('kpi.ratio > 0') as f2:
328 | table.col(VAL(
329 | "'%.2f' % kpi.ratio | float") +
330 | '%',
331 | style='color: red;')
332 | f2.add(STMT('else'), -1)
333 | table.col(VAL(
334 | "'%.2f' % kpi.ratio | float") +
335 | '%',
336 | style='color: green;')
337 | f1.add(STMT('else'), -1)
338 | table.col(
339 | VAL("'%.2f' % kpi.ratio | float") +
340 | '%')
341 |
342 | def logic(self, cur_commit, base_commit):
343 | print('cur', cur_commit)
344 | print('base', base_commit)
345 |
346 | cur_rcds = CommitRecord.get_tasks(cur_commit)
347 | base_rcds = CommitRecord.get_tasks(base_commit)
348 | res = []
349 | for name in cur_rcds.keys():
350 | cur_task = cur_rcds.get(name, None)
351 |
352 | base_task = base_rcds.get(name, None)
353 | # if eithor do not have some task, skip it.
354 | if not (cur_task and base_task): continue
355 |
356 | record = objdict()
357 | res.append(record)
358 | record.name = name
359 | record.kpis = []
360 | for kpi in cur_task.kpis.keys():
361 | cur_kpi = cur_task.kpis.get(kpi, None)
362 | base_kpi = base_task.kpis.get(kpi, None)
363 | if not (cur_kpi and base_kpi): continue
364 | kpi_ = objdict()
365 | kpi_type = Kpi.dic.get(cur_kpi[1])
366 |
367 | kpi_.name = kpi
368 | kpi_.ratio = kpi_type.compare_with(
369 | cur_kpi[0], base_kpi[0]) * 100. # get a percentage
370 | record.kpis.append(kpi_)
371 | return {
372 | self.KEY('tasks'): res,
373 | self.KEY('cur_commit'): cur_commit[:7],
374 | self.KEY('base_commit'): base_commit[:7],
375 | }
376 |
377 |
378 | class ScalarSnip(Snippet):
379 | '''
380 | Scalars for the latest N records for all the kpis
381 |
382 | One page for each task.
383 | '''
384 | def __init__(self, N, task_name):
385 | super().__init__()
386 | self.N = N
387 | self.task_name = task_name
388 |
389 | @property
390 | def html(self):
391 | with lyt.row():
392 | Tag('h2', '各指标趋势图').as_row()
393 | Tag('h3', self.VAL('task_name'))
394 | RawHtml('
')
395 |
396 | with lyt.fluid_container():
397 | with FOR('kpi, dist in %s' % self.KEY('kpis')):
398 | with lyt.row():
399 | RawHtml("{{ dist |safe }}")
400 |
401 | def logic(self, table_name):
402 | # should be sorted by freshness
403 | commits = CommitRecord.get_all(table_name)
404 | kpis = {}
405 | last_N_commit = commits[-self.N:-1] + [commits[-1]]
406 | for commit in last_N_commit:
407 | rcd = CommitRecord.get_tasks(table_name, commit.commit)
408 | if self.task_name not in rcd: continue
409 | for (kpi,val) in rcd[self.task_name].kpis.items():
410 | kpis.setdefault(kpi+'--x', []).append(commit.shortcommit)
411 | kpis.setdefault(kpi, []).append(float(val[2]))
412 | res = []
413 | for (kpi, vals) in kpis.items():
414 | print(kpi, vals)
415 | if not kpi.endswith('--x'):
416 | dist, js_deps = scalar(kpi, kpis[kpi+'--x'], kpis[kpi])
417 | res.append((kpi, dist,))
418 |
419 | return {self.KEY('kpis'): res, 'script_list': js_deps}
420 |
421 |
422 | def passed_commits():
423 | pass
424 |
425 |
426 | class objdict(dict):
427 | def __setattr__(self, key, value):
428 | self[key] = value
429 |
430 | def __getattr__(self, item):
431 | try:
432 | return self[item]
433 | except:
434 | print('valid keys:', [k for k in self.keys()])
435 | exit(1)
436 |
437 |
438 |
439 | def tasks_success(tasks):
440 | for task in tasks.values():
441 | if not task['passed']: return False
442 | return True
443 |
--------------------------------------------------------------------------------