├── requirements.txt
├── LICENSE
├── README.md
├── plugins
├── check_timeline.py
├── sym.py
└── check_movies.py
└── .gitignore
/requirements.txt:
--------------------------------------------------------------------------------
1 | requests
2 | python-dateutil
3 | pillow
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright 2017 HIKAKIN_SYM
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # HIKAKIN_SYM
2 |
3 | ヒカキンシンメトリーBot
4 |
5 | ## about
6 | [@HIKAKIN_SYM](https://twitter.com/@HIKAKIN_SYM)は、ヒカキン動画のサムネイルに含まれる顔の位置を[Cloud Vision API](https://cloud.google.com/vision/?hl=ja)を使って認識し、シンメトリーさせ投稿するBotです。
7 | @SYM_HIKAKINをリスペクトしていますが作成者は異なり無関係です。
8 | ライセンスについては`LISENCE`をご覧ください。
9 |
10 | ## technologies
11 | - Python3
12 | - Cloud Vision API (Google Cloud Platform)
13 |
14 | ## updates
15 | 2017/1/1: 1.0 公開
16 | - 整えられるだけコードを整えて公開
17 |
18 | 2017/1/1: 2.0 OpenCV対応
19 | - **OpenCVを用いた顔検出も行えるようになりました。** MCVA(Microsoft Computer Vision API)を用いて顔を検出できなかった場合、より検出閾値の広い顔検出として利用することができます。しかし、完全に誤検出を防ぎたい場合はオフにすると良いです。
20 |
21 | 2017/2/11: 3.0 使用WebAPI変更
22 | - 顔認識に使用するWebAPIを**Computer Vision API**から**FaceAPI**に変更しました。より高度な検出を期待できるため、実験的に稼働Botの`use_cv`を`false`にしました。
23 |
24 | 2017/4/23: 4.0 ディレクトリ構造を変更
25 | - ディレクトリ構造を大幅に変更し、 `plugins` の内容のみを公開するようにしました。その他、小さな変更も加えました。
26 |
27 | 2017/11/2: 5.0 再開
28 | - 諸事情により止めていたBotをリファクタリングしたり使用APIを変更したりして再稼働しました。
29 | - リポジトリを変更しました。[旧リポジトリ](https://github.com/HIKAKIN-SYM/Bot_TBFW)
30 | - 顔検出を`Face API/OpenCV`から`Cloud Vision API`に変更しました。
31 | - データベースをMySQLからSQLiteに変更しました。
32 | - 依存BotフレームワークをTBFW後続の[PyChroner](https://github.com/NephyProject/PyChroner)に変更しました。
33 |
34 | ## setup
35 | リポジトリを`PyChroner`の`plugins`内にcloneしてください。
36 | ```bash
37 | git clone git@github.com:HIKAKIN-SYM/Bot.git hikakin_sym
38 | cd hikakin_sym
39 | pip3 install -r requirements.txt
40 | ```
41 | `config.json`/`secret`に必要なパラメータは以下のとおりです。
42 |
43 | ```json
44 | {
45 | "hikakin_sym": {
46 | "youtube_key": "hogepiyo",
47 | "cloudvision_key": "hogepiyo"
48 | }
49 | }
50 | ```
--------------------------------------------------------------------------------
/plugins/check_timeline.py:
--------------------------------------------------------------------------------
1 | # Compatible with Python3
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | import importlib.util
6 | from pychroner import PluginMeta, PluginType
7 |
8 |
9 | @PluginMeta(PluginType.TwitterTimeline, twitterAccount="HIKAKIN_SYM")
10 | def do(plugin_api, stream):
11 | logger = plugin_api.getLogger()
12 | config = plugin_api.config.secret.hikakin_sym
13 | twitter = plugin_api.getTwitterAccount("HIKAKIN_SYM").getTweepyHandler(retry_count=5, retry_delay=10)
14 | spec = importlib.util.spec_from_file_location("sym", os.path.join(os.path.dirname(os.path.abspath(__file__)), "sym.py"))
15 | mod = importlib.util.module_from_spec(spec)
16 | spec.loader.exec_module(mod)
17 | sym = mod.Symmetry(api_key=config["cloudvision_key"], save_dir=plugin_api.dirs.cache, logger=logger)
18 |
19 | if stream["user"]["screen_name"].lower() == "hikakin":
20 | result = stream.get("extended_entities", stream.get("entities", {})).get("media")
21 | if result is not None:
22 | logger.info("Twitterにて新しい画像付きツイートを検出しました。")
23 | for i in result:
24 | img_url = i.get("media_url_https")
25 |
26 | if img_url is not None:
27 | sym_result = sym.do(img_url)
28 | for c, n in enumerate(sym_result):
29 | text = ""
30 | media_ids = [twitter.media_upload(m).media_id_string for m in n]
31 | twitter.update_status(status=text, media_ids=media_ids)
32 | [[os.remove(m) for m in n if os.path.exists(m)] for n in sym_result]
33 |
34 | else:
35 | logger.info("ツイートに画像が添付されていませんでした。")
36 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 |
49 | # Translations
50 | *.mo
51 | *.pot
52 |
53 | # Django stuff:
54 | *.log
55 | local_settings.py
56 |
57 | # Flask stuff:
58 | instance/
59 | .webassets-cache
60 |
61 | # Scrapy stuff:
62 | .scrapy
63 |
64 | # Sphinx documentation
65 | docs/_build/
66 |
67 | # PyBuilder
68 | target/
69 |
70 | # Jupyter Notebook
71 | .ipynb_checkpoints
72 |
73 | # pyenv
74 | .python-version
75 |
76 | # celery beat schedule file
77 | celerybeat-schedule
78 |
79 | # SageMath parsed files
80 | *.sage.py
81 |
82 | # dotenv
83 | .env
84 |
85 | # virtualenv
86 | .venv
87 | venv/
88 | ENV/
89 |
90 | # Spyder project settings
91 | .spyderproject
92 | .spyproject
93 |
94 | # Rope project settings
95 | .ropeproject
96 |
97 | # mkdocs documentation
98 | /site
99 |
100 | # mypy
101 | .mypy_cache/
102 |
--------------------------------------------------------------------------------
/plugins/sym.py:
--------------------------------------------------------------------------------
1 | # Compatible with Python3
2 | # -*- coding: utf-8 -*-
3 |
4 | import json
5 | import base64
6 | import os
7 | import hashlib
8 | import requests
9 | from PIL import Image, ImageDraw
10 |
11 |
12 | class Symmetry:
13 | def __init__(self, api_key, save_dir, logger):
14 | if not api_key and not save_dir and not logger:
15 | raise ValueError("必要な引数が不足しています。")
16 | self.api_key = api_key
17 | self.save_dir = save_dir
18 | self.logger = logger
19 |
20 | def do(self, url): # シンメトリー実行関数
21 | self.logger.info("対象URL: {}".format(url))
22 | hashed_url = hashlib.md5(url.encode()).hexdigest()
23 | original_path = os.path.join(self.save_dir, f"{hashed_url}.jpg")
24 | with requests.get(url) as r:
25 | with open(original_path, "wb") as f:
26 | f.write(r.content)
27 | body = {
28 | "requests": [
29 | {
30 | "image": {
31 | "content": base64.b64encode(r.content).decode()
32 | },
33 | "features": [
34 | {
35 | "type": "FACE_DETECTION",
36 | "maxResults": 10
37 | }
38 | ]
39 | }
40 | ]
41 | }
42 |
43 | api_uri = "https://vision.googleapis.com/v1/images:annotate"
44 |
45 | apireq = requests.post(api_uri, headers={"Content-Type": "application/json"}, params={"key": self.api_key}, data=json.dumps(body))
46 | apireq.raise_for_status()
47 |
48 | try:
49 | faces = apireq.json()["responses"][0]["faceAnnotations"]
50 | except KeyError:
51 | return []
52 |
53 | result = []
54 |
55 | if len(faces):
56 | with Image.open(original_path, "r") as original:
57 | original_width = original.size[0]
58 | original_height = original.size[1]
59 | rectangle_img = original
60 | draw_img = ImageDraw.Draw(rectangle_img)
61 | img_height = original_height + 20
62 |
63 | for count, face in enumerate(faces):
64 | self.logger.info("{}個目の顔を処理しています。".format(count+1))
65 |
66 | box = [(v.get('x', 0.0), v.get('y', 0.0))
67 | for v in face['fdBoundingPoly']['vertices']]
68 | draw_img.line(box + [box[0]], width=1, fill='#FFF')
69 |
70 | x = [v.get('x', 0.0) for v in face['fdBoundingPoly']['vertices']]
71 | coord = int((max(x) + min(x))/2)
72 |
73 | img1_width = coord * 2 + 20
74 | img1 = Image.new('RGB', (img1_width, img_height), (255, 255, 255))
75 |
76 | img1_left = original.crop((0, 0, coord, original_height))
77 | img1.paste(img1_left, (10, 10))
78 |
79 | img1_right = img1_left.transpose(Image.FLIP_LEFT_RIGHT)
80 | img1.paste(img1_right, (coord+10, 10))
81 |
82 | img1_filename = os.path.join(self.save_dir, "{}_{}_left.jpg".format(hashed_url, count))
83 | img1.save(img1_filename, 'JPEG', quality=80, optimize=True)
84 |
85 | img2_width = (original_width - coord) * 2 + 20
86 | img2 = Image.new('RGB', (img2_width, img_height), (255, 255, 255))
87 |
88 | img2_left = original.crop((coord, 0, original_width, original_height))
89 | img2_size = original_width - coord
90 | img2.paste(img2_left, (img2_size+10, 10))
91 |
92 | img2_right = img2_left.transpose(Image.FLIP_LEFT_RIGHT)
93 | img2.paste(img2_right, (10, 10))
94 |
95 | img2_filename = os.path.join(self.save_dir, "{}_{}_right.jpg".format(hashed_url, count))
96 | img2.save(img2_filename, 'JPEG', quality=80, optimize=True)
97 |
98 | imgs = [
99 | original_path,
100 | img1_filename,
101 | img2_filename
102 | ]
103 |
104 | result.append(imgs)
105 |
106 | rectangle_img.save(original_path, 'JPEG', quality=80, optimize=True)
107 |
108 | return result
109 |
110 | else:
111 | self.logger.info("顔は見つかりませんでした。処理を終了します。")
112 | os.remove(original_path)
113 | return False
114 |
--------------------------------------------------------------------------------
/plugins/check_movies.py:
--------------------------------------------------------------------------------
1 | # Compatible with Python3
2 | # -*- coding: utf-8 -*-
3 |
4 | import os
5 | import sqlite3
6 | from datetime import datetime
7 | import requests
8 | import dateutil.parser
9 | import importlib.util
10 | from pychroner import PluginMeta, PluginType
11 |
12 |
13 | @PluginMeta(PluginType.Schedule, multipleMinute=10)
14 | def do(plugin_api):
15 | now = datetime.now()
16 | logger = plugin_api.getLogger()
17 | config = plugin_api.config.secret.hikakin_sym
18 | user_db_path = os.path.join(plugin_api.dirs.cache, "hikakin_movies.db")
19 | twitter = plugin_api.getTwitterAccount("HIKAKIN_SYM").getTweepyHandler(retry_count=5, retry_delay=10)
20 | spec = importlib.util.spec_from_file_location("sym", os.path.join(os.path.dirname(os.path.abspath(__file__)), "sym.py"))
21 | mod = importlib.util.module_from_spec(spec)
22 | spec.loader.exec_module(mod)
23 | sym = mod.Symmetry(api_key=config["cloudvision_key"], save_dir=plugin_api.dirs.cache, logger=logger)
24 |
25 | """ ヒカキン関連チャンネルID
26 | HIKAKIN: UClLV6D8S4CrVJL64-aQvwTw
27 | HikakinTV: UCZf__ehlCEBPop-_sldpBUQ
28 | HikakinGames: UCX1xppLvuj03ubLio8jslyA
29 | HikakinBlog: UCQMoeRP9SDaFipXDBIp3pFA
30 | SeikinTV: UCg4nOl7_gtStrLwF0_xoV0A(こいつはヒカキンじゃねえから除外)
31 |
32 | channelIDs = ["UClLV6D8S4CrVJL64-aQvwTw", "UCZf__ehlCEBPop-_sldpBUQ", "UCX1xppLvuj03ubLio8jslyA",
33 | "UCQMoeRP9SDaFipXDBIp3pFA"]
34 |
35 | # チャンネルIDから投稿動画のプレイリストを取得する場合
36 | target = "https://www.googleapis.com/youtube/v3/channels"
37 | playlist_ids = []
38 | params = {}
39 | params["key"] = setting["auth"]["youtube_key"]
40 | params["part"] = "contentDetails"
41 |
42 | for i in channelIDs:
43 | params["id"] = i
44 | r = requests.get(target, params=params).json()
45 | playlistId = r["items"][0]["contentDetails"]["relatedPlaylists"]["uploads"]
46 | playlist_ids.append(playlistId)
47 | """
48 |
49 | playlist_ids = [
50 | 'UUlLV6D8S4CrVJL64-aQvwTw',
51 | 'UUZf__ehlCEBPop-_sldpBUQ',
52 | 'UUX1xppLvuj03ubLio8jslyA',
53 | 'UUQMoeRP9SDaFipXDBIp3pFA'
54 | ]
55 |
56 | target = "https://www.googleapis.com/youtube/v3/playlistItems"
57 | playlists = []
58 | params = {"key": config["youtube_key"], "part": "snippet", "maxResults": 10}
59 |
60 | for i in playlist_ids: # プレイリストIDを使い投稿動画を取得
61 | params["playlistId"] = i
62 | r = requests.get(target, params=params)
63 | r.raise_for_status()
64 | playlists.append(r.json())
65 |
66 | with sqlite3.connect(user_db_path, check_same_thread=False) as db:
67 | if not db.execute("SELECT * FROM sqlite_master WHERE name = ?", ("movies",)).fetchone():
68 | logger.info("動画の初回登録を開始します。")
69 | db.execute("CREATE TABLE movies(video_id TEXT, title TEXT, thumb TEXT, published_at TEXT, created_at TEXT)")
70 | for playlist in playlists:
71 | for item in playlist["items"]:
72 | published_at = dateutil.parser.parse(item["snippet"]["publishedAt"])
73 | thumb = max([[d["width"], d["url"]] for types, d in item["snippet"]["thumbnails"].items()], key=lambda x: x[0])[1]
74 | data = [item["snippet"]["resourceId"]["videoId"], item["snippet"]["title"], thumb, published_at, now]
75 | db.execute('INSERT INTO movies VALUES (?, ?, ?, ?, ?)', data)
76 | logger.info("挿入: {}/{}".format(item["snippet"]["resourceId"]["videoId"], item["snippet"]["title"]))
77 |
78 | else:
79 | for playlist in playlists:
80 | for item in playlist["items"]:
81 | sql = 'SELECT * FROM movies WHERE video_id = ?'
82 | query = db.execute(sql, (item["snippet"]["resourceId"]["videoId"],)).fetchone()
83 |
84 | if not query:
85 | published_at = dateutil.parser.parse(item["snippet"]["publishedAt"])
86 | thumb = max([[d["width"], d["url"]] for types, d in item["snippet"]["thumbnails"].items()], key=lambda x: x[0])[1]
87 | sql = 'INSERT INTO movies VALUES (?, ?, ?, ?, ?)'
88 | data = (item["snippet"]["resourceId"]["videoId"], item["snippet"]["title"], thumb, published_at, now)
89 | db.execute(sql, data)
90 | db.commit()
91 | logger.info("挿入: {}/{}".format(item["snippet"]["resourceId"]["videoId"], item["snippet"]["title"]))
92 |
93 | logger.info("『{}』の顔認識を開始します。".format(item["snippet"]["title"]))
94 | sym_result = sym.do(thumb)
95 |
96 | logger.info(thumb)
97 |
98 | for c, n in enumerate(sym_result):
99 | logger.info("{}枚目のアップロードを開始します。".format(c + 1))
100 | media_ids = [twitter.media_upload(m).media_id_string for m in n]
101 | twitter.update_status(status="", media_ids=media_ids)
102 | logger.info("アップロードを完了しました。")
103 | [[os.remove(m) for m in n if os.path.exists(m)] for n in sym_result]
104 |
--------------------------------------------------------------------------------