├── celery_projects ├── word_count │ ├── __init__.py │ ├── __pycache__ │ │ ├── tasks.cpython-35.pyc │ │ ├── __init__.cpython-35.pyc │ │ └── celery.cpython-35.pyc │ ├── tasks.py │ └── celery.py ├── jpgs │ ├── flower1.jpg │ ├── flower3.jpg │ ├── flower4.jpg │ ├── flower5.jpg │ ├── htop1.jpg │ ├── flower22.jpg │ └── MyPicture7.jpg ├── start_workers.sh ├── celeryconfig.py ├── restart_swarm.sh ├── text │ └── test.txt ├── CeleryOnDockerSwarm.md └── Celery on Docker Swarm as Bluemix-liked IoT platform.ipynb ├── Celery_config_plotter ├── test.py ├── celeryconfig.xls ├── celeryconfig.py ├── CeleryConfigPlotter.py └── CeleryConfigPlotter.ipynb └── README.md /celery_projects/word_count/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Celery_config_plotter/test.py: -------------------------------------------------------------------------------- 1 | import CeleryConfigPlotter 2 | 3 | CeleryConfigPlotter.genConfigFile() 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [README.md](https://github.com/Wei1234c/CeleryOnDockerSwarm/blob/master/celery_projects/CeleryOnDockerSwarm.md) -------------------------------------------------------------------------------- /celery_projects/jpgs/flower1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wei1234c/CeleryOnDockerSwarm/HEAD/celery_projects/jpgs/flower1.jpg -------------------------------------------------------------------------------- /celery_projects/jpgs/flower3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wei1234c/CeleryOnDockerSwarm/HEAD/celery_projects/jpgs/flower3.jpg -------------------------------------------------------------------------------- /celery_projects/jpgs/flower4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wei1234c/CeleryOnDockerSwarm/HEAD/celery_projects/jpgs/flower4.jpg -------------------------------------------------------------------------------- /celery_projects/jpgs/flower5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wei1234c/CeleryOnDockerSwarm/HEAD/celery_projects/jpgs/flower5.jpg -------------------------------------------------------------------------------- /celery_projects/jpgs/htop1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wei1234c/CeleryOnDockerSwarm/HEAD/celery_projects/jpgs/htop1.jpg -------------------------------------------------------------------------------- /celery_projects/jpgs/flower22.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wei1234c/CeleryOnDockerSwarm/HEAD/celery_projects/jpgs/flower22.jpg -------------------------------------------------------------------------------- /celery_projects/jpgs/MyPicture7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wei1234c/CeleryOnDockerSwarm/HEAD/celery_projects/jpgs/MyPicture7.jpg -------------------------------------------------------------------------------- /Celery_config_plotter/celeryconfig.xls: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wei1234c/CeleryOnDockerSwarm/HEAD/Celery_config_plotter/celeryconfig.xls -------------------------------------------------------------------------------- /celery_projects/word_count/__pycache__/tasks.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wei1234c/CeleryOnDockerSwarm/HEAD/celery_projects/word_count/__pycache__/tasks.cpython-35.pyc -------------------------------------------------------------------------------- /celery_projects/word_count/__pycache__/__init__.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wei1234c/CeleryOnDockerSwarm/HEAD/celery_projects/word_count/__pycache__/__init__.cpython-35.pyc -------------------------------------------------------------------------------- /celery_projects/word_count/__pycache__/celery.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wei1234c/CeleryOnDockerSwarm/HEAD/celery_projects/word_count/__pycache__/celery.cpython-35.pyc -------------------------------------------------------------------------------- /celery_projects/word_count/tasks.py: -------------------------------------------------------------------------------- 1 | from celery import group 2 | from word_count.celery import app 3 | 4 | 5 | @app.task 6 | def mapper(word): 7 | return (word, 1) if len(word) >= 5 else None # 過濾掉太短的word -------------------------------------------------------------------------------- /celery_projects/word_count/celery.py: -------------------------------------------------------------------------------- 1 | from celery import Celery 2 | from kombu import Exchange, Queue 3 | 4 | 5 | app = Celery() 6 | app.config_from_object('celeryconfig') 7 | 8 | if __name__ == '__main__': 9 | app.start() -------------------------------------------------------------------------------- /celery_projects/start_workers.sh: -------------------------------------------------------------------------------- 1 | # ./start_workers.sh 2 | 3 | echo "Starting Celery cluster containers _________________________________________________" 4 | 5 | eval $(docker-machine env --swarm master01) 6 | 7 | PROJECT=$1 # project 名稱 8 | WORKER_START_ID=$2 # worker container 編號 第一個 9 | WORKER_LAST_ID=$3 # worker container 編號 最後一個 10 | CONCURRENCY=$4 # 每個 worker 可以有幾個 subprocesses 11 | 12 | for (( i=${WORKER_START_ID}; i<=${WORKER_LAST_ID}; i=i+1 )) 13 | do 14 | docker run -d --name=${PROJECT}_celery${i} --hostname=${PROJECT}_celery${i} --net=mynet --volume=/data/celery_projects:/celery_projects wei1234c/celery_armv7 /bin/sh -c "cd /celery_projects && celery -A ${PROJECT} worker -n worker${i}.%h --concurrency=${CONCURRENCY} --loglevel=INFO" 15 | done 16 | 17 | -------------------------------------------------------------------------------- /celery_projects/celeryconfig.py: -------------------------------------------------------------------------------- 1 | 2 | #_____________________________Import Kombu classes_____________________________ 3 | from kombu import Exchange, Queue 4 | 5 | #___________________________CELERY_TIMEZONE & Misc.____________________________ 6 | CELERY_TIMEZONE = 'Asia/Taipei' 7 | CELERYD_POOL_RESTARTS = True 8 | 9 | #__________________________________BROKER_URL__________________________________ 10 | BROKER_URL = 'redis://192.168.0.114:6379/0' 11 | 12 | #____________________________CELERY_RESULT_BACKEND_____________________________ 13 | CELERY_RESULT_BACKEND = 'redis://192.168.0.114:6379/1' 14 | 15 | #________________________________CELERY_IMPORTS________________________________ 16 | CELERY_IMPORTS = ('word_count.tasks',) 17 | 18 | #________________________________CELERY_QUEUES_________________________________ 19 | CELERY_QUEUES = ( 20 | Queue('word_counting', Exchange('celery', type = 'direct'), routing_key='word_counting'), 21 | ) 22 | 23 | #________________________________CELERY_ROUTES_________________________________ 24 | CELERY_ROUTES = { 25 | 'word_count.tasks.mapper': { 26 | 'queue': 'word_counting', 27 | 'routing_key': 'word_counting', 28 | }, 29 | } 30 | -------------------------------------------------------------------------------- /celery_projects/restart_swarm.sh: -------------------------------------------------------------------------------- 1 | echo "# Set Docker hosts IPs ____________________________________" 2 | rpi201='192.168.0.109' 3 | rpi202='192.168.0.114' 4 | master01=${rpi202} 5 | node01=${rpi201} 6 | echo ${master01} 7 | echo ${node01} 8 | 9 | 10 | 11 | eval $(docker-machine env master01) 12 | 13 | echo "# Create Consul server ____________________________________" 14 | docker run -d --restart=always -p 8500:8500 --name=consul --hostname=consul nimblestratus/rpi-consul -server -bootstrap 15 | 16 | 17 | 18 | echo "# Create Swarm manager ____________________________________" 19 | docker run -d \ 20 | --restart=always \ 21 | --name swarm-agent-master \ 22 | -p 3376:3376 \ 23 | -v /etc/docker:/etc/docker \ 24 | hypriot/rpi-swarm \ 25 | manage \ 26 | --tlsverify \ 27 | --tlscacert=/etc/docker/ca.pem \ 28 | --tlscert=/etc/docker/server.pem \ 29 | --tlskey=/etc/docker/server-key.pem \ 30 | -H tcp://0.0.0.0:3376 \ 31 | --strategy spread consul://${master01}:8500 32 | 33 | docker run -d \ 34 | --restart=always \ 35 | --name swarm-agent \ 36 | hypriot/rpi-swarm \ 37 | join --advertise ${master01}:2376 consul://${master01}:8500 38 | 39 | 40 | echo " Create Swarm node ____________________________________" 41 | eval $(docker-machine env node01) 42 | docker run -d \ 43 | --restart=always \ 44 | --name swarm-agent \ 45 | hypriot/rpi-swarm \ 46 | join --advertise ${node01}:2376 consul://${master01}:8500 47 | 48 | 49 | 50 | docker $(docker-machine config --swarm master01) info -------------------------------------------------------------------------------- /Celery_config_plotter/celeryconfig.py: -------------------------------------------------------------------------------- 1 | 2 | #_____________________________Import Kombu classes_____________________________ 3 | from kombu import Exchange, Queue 4 | 5 | #___________________________CELERY_TIMEZONE & Misc.____________________________ 6 | CELERY_TIMEZONE = 'Asia/Taipei' 7 | CELERYD_POOL_RESTARTS = True 8 | 9 | #__________________________________BROKER_URL__________________________________ 10 | BROKER_URL = 'redis://weilin.noip.me:6379/0' 11 | 12 | #____________________________CELERY_RESULT_BACKEND_____________________________ 13 | CELERY_RESULT_BACKEND = 'redis://weilin.noip.me:6379/1' 14 | 15 | #________________________________CELERY_IMPORTS________________________________ 16 | CELERY_IMPORTS = ('word_count.tasks',) 17 | 18 | #________________________________CELERY_QUEUES_________________________________ 19 | CELERY_QUEUES = ( 20 | Queue('word_counting', Exchange('celery', type = 'direct'), routing_key='word_counting'), 21 | ) 22 | 23 | #________________________________CELERY_ROUTES_________________________________ 24 | CELERY_ROUTES = { 25 | 'word_count.tasks.mapper': { 26 | 'queue': 'word_counting', 27 | 'routing_key': 'word_counting', 28 | }, 29 | } 30 | 31 | #_______________________________Workers Scripts________________________________ 32 | #[Node - localhost] : celery -A word_count worker -n worker1.%h -Q word_counting --concurrency=10 --loglevel=INFO 33 | 34 | #____________________________________FLOWER____________________________________ 35 | #[Flower] : celery -A word_count flower 36 | -------------------------------------------------------------------------------- /Celery_config_plotter/CeleryConfigPlotter.py: -------------------------------------------------------------------------------- 1 | 2 | # coding: utf-8 3 | 4 | # # Celery結構規劃 5 | 6 | # In[1]: 7 | 8 | from celery import Celery 9 | 10 | 11 | # In[2]: 12 | 13 | import pandas as pd 14 | from pandas import Series, DataFrame 15 | 16 | 17 | # In[3]: 18 | 19 | import os 20 | from pprint import pprint 21 | 22 | 23 | # ### 預設的參數 24 | 25 | # In[4]: 26 | 27 | def listDefaultCeleryConfigurations(): 28 | app = Celery() 29 | configs = app.conf.__dict__['_order'][2] 30 | configs = sorted([(k, v) for k, v in configs.items()]) 31 | for k, v in configs: 32 | print ('{0} = {1}'.format(k, ("'" + v + "'") if isinstance(v, str) else v) ) 33 | 34 | 35 | # In[5]: 36 | 37 | listDefaultCeleryConfigurations() 38 | 39 | 40 | # --- 41 | # ### 抓取 規劃檔案 內容 42 | 43 | # In[6]: 44 | 45 | def getExcelData(file): 46 | df = pd.read_excel(file) 47 | df.dropna(axis=0, how='all', inplace=True) 48 | 49 | return df 50 | 51 | 52 | # --- 53 | # #### Import Kombu classes 54 | 55 | # In[7]: 56 | 57 | def import_Kombu_classes(plan, summary): 58 | output = [] 59 | output.extend(['', '#{0:_^78}'.format('Import Kombu classes')]) 60 | output.append('{0}'.format('from kombu import Exchange, Queue')) 61 | summary.extend(output) 62 | 63 | return summary 64 | 65 | 66 | # --- 67 | # #### CELERY_TIMEZONE & Misc. 68 | 69 | # In[8]: 70 | 71 | def set_CELERY_TIMEZONE_Misc(plan, summary): 72 | # 自訂的 73 | CELERY_TIMEZONE = 'Asia/Taipei' 74 | 75 | output = [] 76 | output.extend(['', '#{0:_^78}'.format('CELERY_TIMEZONE & Misc.')]) 77 | output.append("CELERY_TIMEZONE = '{0}'".format(CELERY_TIMEZONE)) 78 | output.append('CELERYD_POOL_RESTARTS = True') 79 | summary.extend(output) 80 | 81 | return summary 82 | 83 | 84 | # --- 85 | # #### BROKER_URL 86 | # BROKER_URL = 'redis://netbrain.noip.me:6379/0' 87 | 88 | # In[9]: 89 | 90 | def set_BROKER_URL(plan, summary): 91 | BROKER_URL = plan.Broker.drop_duplicates()[0] 92 | 93 | output = [] 94 | output.extend(['', '#{0:_^78}'.format('BROKER_URL')]) 95 | output.append("BROKER_URL = '{0}'".format(BROKER_URL)) 96 | summary.extend(output) 97 | 98 | return summary 99 | 100 | 101 | # --- 102 | # #### CELERY_RESULT_BACKEND 103 | # CELERY_RESULT_BACKEND = 'redis://netbrain.noip.me:6379/1' 104 | 105 | # In[10]: 106 | 107 | def set_CELERY_RESULT_BACKEND(plan, summary): 108 | CELERY_RESULT_BACKEND = plan.Result_backend.drop_duplicates()[0] 109 | 110 | output = [] 111 | output.extend(['', '#{0:_^78}'.format('CELERY_RESULT_BACKEND')]) 112 | output.append("CELERY_RESULT_BACKEND = '{0}'".format(CELERY_RESULT_BACKEND)) 113 | summary.extend(output) 114 | 115 | return summary 116 | 117 | 118 | # --- 119 | # #### CELERY_IMPORTS 120 | # CELERY_IMPORTS = ('proj.tasks', ) 121 | 122 | # In[11]: 123 | 124 | def set_CELERY_IMPORTS(plan, summary): 125 | Celery_app_tasks = plan[['Celery_app', 'Tasks_module']].drop_duplicates() 126 | modules = ('{0}.{1}'.format(Celery_app_tasks.ix[i, 'Celery_app'], Celery_app_tasks.ix[i, 'Tasks_module']) for i in range(len(Celery_app_tasks))) 127 | CELERY_IMPORTS = tuple(modules) 128 | 129 | output = [] 130 | output.extend(['', '#{0:_^78}'.format('CELERY_IMPORTS')]) 131 | output.append('CELERY_IMPORTS = {0}'.format(CELERY_IMPORTS)) 132 | summary.extend(output) 133 | 134 | return summary 135 | 136 | 137 | # --- 138 | # #### CELERY_QUEUES 139 | 140 | # CELERY_QUEUES = ( 141 | # Queue('feed_tasks', routing_key='feed.#'), 142 | # Queue('regular_tasks', routing_key='task.#'), 143 | # Queue('image_tasks', exchange=Exchange('mediatasks', type='direct'), routing_key='image.compress'), 144 | # ) 145 | # 146 | # CELERY_QUEUES = ( 147 | # Queue('default', Exchange('default'), routing_key='default'), 148 | # Queue('videos', Exchange('media'), routing_key='media.video'), 149 | # Queue('images', Exchange('media'), routing_key='media.image'), 150 | # ) 151 | 152 | # In[12]: 153 | 154 | def set_CELERY_QUEUES(plan, summary): 155 | queues = plan[['Queue', 'Exchange', 'Exchange_Type', 'Routing_Key']].drop_duplicates() 156 | output = [] 157 | output.extend(['', '#{0:_^78}'.format('CELERY_QUEUES')]) 158 | 159 | output.append('CELERY_QUEUES = (') 160 | 161 | for i in range(len(queues)): 162 | output.append(" Queue('{queue}', Exchange('{exchange}', type = '{exchange_Type}'), routing_key='{routing_key}')," .format(queue = queues.ix[i, 'Queue'], 163 | exchange = queues.ix[i, 'Exchange'], 164 | exchange_Type = queues.ix[i, 'Exchange_Type'], 165 | routing_key = queues.ix[i, 'Routing_Key'] 166 | ) 167 | ) 168 | output.append(')') 169 | 170 | summary.extend(output) 171 | 172 | return summary 173 | 174 | 175 | # --- 176 | # #### CELERY_ROUTES 177 | 178 | # CELERY_ROUTES = { 179 | # 'feeds.tasks.import_feed': { 180 | # 'queue': 'feed_tasks', 181 | # 'routing_key': 'feed.import', 182 | # }, 183 | # } 184 | 185 | # In[13]: 186 | 187 | def set_CELERY_ROUTES(plan, summary): 188 | routes = plan[['Celery_app', 'Tasks_module', 'Task', 'Queue', 'Routing_Key']].drop_duplicates() 189 | output = [] 190 | output.extend(['', '#{0:_^78}'.format('CELERY_ROUTES')]) 191 | 192 | output.append('CELERY_ROUTES = {') 193 | 194 | for i in range(len(routes)): 195 | output.append(" '{app}.{module}.{task}': {{\n 'queue': '{queue}',\n 'routing_key': '{routing_key}',\n }}," .format(app = routes.ix[i, 'Celery_app'], 196 | module = routes.ix[i, 'Tasks_module'], 197 | task = routes.ix[i, 'Task'], 198 | queue = routes.ix[i, 'Queue'], 199 | routing_key = routes.ix[i, 'Routing_Key']) 200 | ) 201 | output.append('}') 202 | 203 | summary.extend(output) 204 | 205 | return summary 206 | 207 | 208 | # --- 209 | # #### WORKERS 210 | 211 | # In[14]: 212 | 213 | def set_Workers_Scripts(plan, summary): 214 | workers = plan[['Node', 'Celery_app', 'Worker', 'Queue', 'Concurrency', 'Log_level']].drop_duplicates() 215 | output = [] 216 | output.extend(['', '#{0:_^78}'.format('Workers Scripts')]) 217 | 218 | for i in range(len(workers)): 219 | output.append('#[Node - {node}] : celery -A {app} worker -n {worker} -Q {queue} --concurrency={concurrency} --loglevel={loglevel}' .format(node = workers.ix[i, 'Node'], 220 | app = workers.ix[i, 'Celery_app'], 221 | worker = workers.ix[i, 'Worker'], 222 | queue = workers.ix[i, 'Queue'], 223 | concurrency = workers.ix[i, 'Concurrency'], 224 | loglevel = workers.ix[i, 'Log_level'] 225 | ) 226 | ) 227 | 228 | summary.extend(output) 229 | 230 | return summary 231 | 232 | 233 | # --- 234 | # #### FLOWER 235 | 236 | # In[15]: 237 | 238 | def set_FLOWER(plan, summary): 239 | app = plan.Celery_app.drop_duplicates()[0] 240 | output = [] 241 | output.extend(['', '#{0:_^78}'.format('FLOWER')]) 242 | 243 | output.append('#[Flower] : celery -A {app} flower'.format(app = app)) 244 | summary.extend(output) 245 | 246 | return summary 247 | 248 | 249 | # ## Summarize 250 | 251 | # In[16]: 252 | 253 | def summarizeConfigurations(planExcelFile): 254 | 255 | summary = [] 256 | 257 | # listDefaultCeleryConfigurations() 258 | 259 | plan = getExcelData(planExcelFile) 260 | 261 | import_Kombu_classes(plan, summary) 262 | set_CELERY_TIMEZONE_Misc(plan, summary) 263 | set_BROKER_URL(plan, summary) 264 | set_CELERY_RESULT_BACKEND(plan, summary) 265 | set_CELERY_IMPORTS(plan, summary) 266 | set_CELERY_QUEUES(plan, summary) 267 | set_CELERY_ROUTES(plan, summary) 268 | 269 | set_Workers_Scripts(plan, summary) 270 | set_FLOWER(plan, summary) 271 | 272 | return summary 273 | 274 | 275 | # ## Output Configuration File 276 | 277 | # In[17]: 278 | 279 | def writeConfigurationFile(summary, file = 'celeryconfig.py'): 280 | with open(file, 'w', encoding = 'utf8') as f: 281 | for line in summary: f.write(line + '\n') 282 | 283 | 284 | # In[18]: 285 | 286 | def genConfigFile(): 287 | # 指定規劃檔案 288 | folder = os.getcwd() 289 | files = [file for file in os.listdir(folder) if file.rpartition('.')[2] in ('xls','xlsx')] 290 | 291 | if len(files) == 1 : 292 | file = os.path.join(folder, files[0]) 293 | summary = summarizeConfigurations(file) 294 | for line in summary: print (line) 295 | writeConfigurationFile(summary) 296 | 297 | else: 298 | print('There must be one and only one plan Excel file.') 299 | 300 | 301 | # ## Main 302 | 303 | # In[19]: 304 | 305 | if __name__ == '__main__': 306 | genConfigFile() 307 | 308 | 309 | # In[ ]: 310 | 311 | 312 | 313 | -------------------------------------------------------------------------------- /celery_projects/text/test.txt: -------------------------------------------------------------------------------- 1 | 2 | Aesop's Fables Translated by George Fyler Townsend 3 | 4 | 5 | 6 | 7 | The Wolf and the Lamb 8 | 9 | WOLF, meeting with a Lamb astray from the fold, resolved not to 10 | lay violent hands on him, but to find some plea to justify to the 11 | Lamb the Wolf's right to eat him. He thus addressed him: 12 | "Sirrah, last year you grossly insulted me." "Indeed," bleated 13 | the Lamb in a mournful tone of voice, "I was not then born." Then 14 | said the Wolf, "You feed in my pasture." "No, good sir," replied 15 | the Lamb, "I have not yet tasted grass." Again said the Wolf, 16 | "You drink of my well." "No," exclaimed the Lamb, "I never yet 17 | drank water, for as yet my mother's milk is both food and drink 18 | to me." Upon which the Wolf seized him and ate him up, saying, 19 | "Well! I won't remain supperless, even though you refute every 20 | one of my imputations." The tyrant will always find a pretext for 21 | his tyranny. 22 | 23 | 24 | The Bat and the Weasels 25 | 26 | A BAT who fell upon the ground and was caught by a Weasel pleaded 27 | to be spared his life. The Weasel refused, saying that he was by 28 | nature the enemy of all birds. The Bat assured him that he was 29 | not a bird, but a mouse, and thus was set free. Shortly 30 | afterwards the Bat again fell to the ground and was caught by 31 | another Weasel, whom he likewise entreated not to eat him. The 32 | Weasel said that he had a special hostility to mice. The Bat 33 | assured him that he was not a mouse, but a bat, and thus a second 34 | time escaped. 35 | 36 | It is wise to turn circumstances to good account. 37 | 38 | 39 | The Ass and the Grasshopper 40 | 41 | AN ASS having heard some Grasshoppers chirping, was highly 42 | enchanted; and, desiring to possess the same charms of melody, 43 | demanded what sort of food they lived on to give them such 44 | beautiful voices. They replied, "The dew." The Ass resolved that 45 | he would live only upon dew, and in a short time died of hunger. 46 | 47 | 48 | The Lion and the Mouse 49 | 50 | A LION was awakened from sleep by a Mouse running over his face. 51 | Rising up angrily, he caught him and was about to kill him, when 52 | the Mouse piteously entreated, saying: "If you would only spare 53 | my life, I would be sure to repay your kindness." The Lion 54 | laughed and let him go. It happened shortly after this that the 55 | Lion was caught by some hunters, who bound him by st ropes to the 56 | ground. The Mouse, recognizing his roar, came gnawed the rope 57 | with his teeth, and set him free, exclaim 58 | 59 | "You ridiculed the idea of my ever being able to help you, 60 | expecting to receive from me any repayment of your favor; I now 61 | you know that it is possible for even a Mouse to con benefits on 62 | a Lion." 63 | 64 | 65 | The Charcoal-Burner and the Fuller 66 | 67 | A CHARCOAL-BURNER carried on his trade in his own house. One day 68 | he met a friend, a Fuller, and entreated him to come and live 69 | with him, saying that they should be far better neighbors and 70 | that their housekeeping expenses would be lessened. The Fuller 71 | replied, "The arrangement is impossible as far as I am concerned, 72 | for whatever I should whiten, you would immediately blacken again 73 | with your charcoal." 74 | 75 | Like will draw like. 76 | 77 | 78 | The Father and His Sons 79 | 80 | A FATHER had a family of sons who were perpetually quarreling 81 | among themselves. When he failed to heal their disputes by his 82 | exhortations, he determined to give them a practical illustration 83 | of the evils of disunion; and for this purpose he one day told 84 | them to bring him a bundle of sticks. When they had done so, he 85 | placed the faggot into the hands of each of them in succession, 86 | and ordered them to break it in pieces. They tried with all 87 | their strength, and were not able to do it. He next opened the 88 | faggot, took the sticks separately, one by one, and again put 89 | them into his sons' hands, upon which they broke them easily. He 90 | then addressed them in these words: "My sons, if you are of one 91 | mind, and unite to assist each other, you will be as this faggot, 92 | uninjured by all the attempts of your enemies; but if you are 93 | divided among yourselves, you will be broken as easily as these 94 | sticks." 95 | 96 | 97 | The Boy Hunting Locusts 98 | 99 | A BOY was hunting for locusts. He had caught a goodly number, 100 | when he saw a Scorpion, and mistaking him for a locust, reached 101 | out his hand to take him. The Scorpion, showing his sting, said: 102 | If you had but touched me, my friend, you would have lost me, and 103 | all your locusts too!" 104 | 105 | 106 | The Cock and the Jewel 107 | 108 | A COCK, scratching for food for himself and his hens, found a 109 | precious stone and exclaimed: "If your owner had found thee, and 110 | not I, he would have taken thee up, and have set thee in thy 111 | first estate; but I have found thee for no purpose. I would 112 | rather have one barleycorn than all the jewels in the world." 113 | 114 | 115 | The Kingdom of the Lion 116 | 117 | THE BEASTS of the field and forest had a Lion as their king. He 118 | was neither wrathful, cruel, nor tyrannical, but just and gentle 119 | as a king could be. During his reign he made a royal 120 | proclamation for a general assembly of all the birds and beasts, 121 | and drew up conditions for a universal league, in which the Wolf 122 | and the Lamb, the Panther and the Kid, the Tiger and the Stag, 123 | the Dog and the Hare, should live together in perfect peace and 124 | amity. The Hare said, "Oh, how I have longed to see this day, in 125 | which the weak shall take their place with impunity by the side 126 | of the strong." And after the Hare said this, he ran for his 127 | life. 128 | 129 | 130 | The Wolf and the Crane 131 | 132 | A WOLF who had a bone stuck in his throat hired a Crane, for a 133 | large sum, to put her head into his mouth and draw out the bone. 134 | When the Crane had extracted the bone and demanded the promised 135 | payment, the Wolf, grinning and grinding his teeth, exclaimed: 136 | "Why, you have surely already had a sufficient recompense, in 137 | having been permitted to draw out your head in safety from the 138 | mouth and jaws of a wolf." 139 | 140 | In serving the wicked, expect no reward, and be thankful if you 141 | escape injury for your pains. 142 | 143 | 144 | The Fisherman Piping 145 | 146 | A FISHERMAN skilled in music took his flute and his nets to the 147 | seashore. Standing on a projecting rock, he played several tunes 148 | in the hope that the fish, attracted by his melody, would of 149 | their own accord dance into his net, which he had placed below. 150 | At last, having long waited in vain, he laid aside his flute, and 151 | casting his net into the sea, made an excellent haul of fish. 152 | When he saw them leaping about in the net upon the rock he said: 153 | "O you most perverse creatures, when I piped you would not dance, 154 | but now that I have ceased you do so merrily." 155 | 156 | 157 | Hercules and the Wagoner 158 | 159 | A CARTER was driving a wagon along a country lane, when the 160 | wheels sank down deep into a rut. The rustic driver, stupefied 161 | and aghast, stood looking at the wagon, and did nothing but utter 162 | loud cries to Hercules to come and help him. Hercules, it is 163 | said, appeared and thus addressed him: "Put your shoulders to the 164 | wheels, my man. Goad on your bullocks, and never more pray to me 165 | for help, until you have done your best to help yourself, or 166 | depend upon it you will henceforth pray in vain." 167 | 168 | Self-help is the best help. 169 | 170 | 171 | The Ants and the Grasshopper 172 | 173 | THE ANTS were spending a fine winter's day drying grain collected 174 | in the summertime. A Grasshopper, perishing with famine, passed 175 | by and earnestly begged for a little food. The Ants inquired of 176 | him, "Why did you not treasure up food during the summer?' He 177 | replied, "I had not leisure enough. I passed the days in 178 | singing." They then said in derision: "If you were foolish enough 179 | to sing all the summer, you must dance supperless to bed in the 180 | winter." 181 | 182 | 183 | The Traveler and His Dog 184 | 185 | A TRAVELER about to set out on a journey saw his Dog stand at the 186 | door stretching himself. He asked him sharply: "Why do you stand 187 | there gaping? Everything is ready but you, so come with me 188 | instantly." The Dog, wagging his tail, replied: "O, master! I am 189 | quite ready; it is you for whom I am waiting." 190 | 191 | The loiterer often blames delay on his more active friend. 192 | 193 | 194 | The Dog and the Shadow 195 | 196 | A DOG, crossing a bridge over a stream with a piece of flesh in 197 | his mouth, saw his own shadow in the water and took it for that 198 | of another Dog, with a piece of meat double his own in size. He 199 | immediately let go of his own, and fiercely attacked the other 200 | Dog to get his larger piece from him. He thus lost both: that 201 | which he grasped at in the water, because it was a shadow; and 202 | his own, because the stream swept it away. 203 | 204 | 205 | The Mole and His Mother 206 | 207 | A MOLE, a creature blind from birth, once said to his Mother: "I 208 | am sure than I can see, Mother!" In the desire to prove to him 209 | his mistake, his Mother placed before him a few grains of 210 | frankincense, and asked, "What is it?' The young Mole said, "It 211 | is a pebble." His Mother exclaimed: "My son, I am afraid that you 212 | are not only blind, but that you have lost your sense of smell. 213 | 214 | 215 | The Herdsman and the Lost Bull 216 | 217 | A HERDSMAN tending his flock in a forest lost a Bull-calf from 218 | the fold. After a long and fruitless search, he made a vow that, 219 | if he could only discover the thief who had stolen the Calf, he 220 | would offer a lamb in sacrifice to Hermes, Pan, and the Guardian 221 | Deities of the forest. Not long afterwards, as he ascended a 222 | small hillock, he saw at its foot a Lion feeding on the Calf. 223 | Terrified at the sight, he lifted his eyes and his hands to 224 | heaven, and said: "Just now I vowed to offer a lamb to the 225 | Guardian Deities of the forest if I could only find out who had 226 | robbed me; but now that I have discovered the thief, I would 227 | willingly add a full-grown Bull to the Calf I have lost, if I may 228 | only secure my own escape from him in safety." 229 | 230 | 231 | The Hare and the Tortoise 232 | 233 | A HARE one day ridiculed the short feet and slow pace of the 234 | Tortoise, who replied, laughing: "Though you be swift as the 235 | wind, I will beat you in a race." The Hare, believing her 236 | assertion to be simply impossible, assented to the proposal; and 237 | they agreed that the Fox should choose the course and fix the 238 | goal. On the day appointed for the race the two started 239 | together. The Tortoise never for a moment stopped, but went on 240 | with a slow but steady pace straight to the end of the course. 241 | The Hare, lying down by the wayside, fell fast asleep. At last 242 | waking up, and moving as fast as he could, he saw the Tortoise 243 | had reached the goal, and was comfortably dozing after her 244 | fatigue. 245 | 246 | Slow but steady wins the race. 247 | 248 | 249 | The Pomegranate, Apple-Tree, and Bramble 250 | 251 | THE POMEGRANATE and Apple-Tree disputed as to which was the most 252 | beautiful. When their strife was at its height, a Bramble from 253 | the neighboring hedge lifted up its voice, and said in a boastful 254 | tone: "Pray, my dear friends, in my presence at least cease from 255 | such vain disputings." 256 | 257 | 258 | The Farmer and the Stork 259 | 260 | A FARMER placed nets on his newly-sown plowlands and caught a 261 | number of Cranes, which came to pick up his seed. With them he 262 | trapped a Stork that had fractured his leg in the net and was 263 | earnestly beseeching the Farmer to spare his life. "Pray save 264 | me, Master," he said, "and let me go free this once. My broken 265 | limb should excite your pity. Besides, I am no Crane, I am a 266 | Stork, a bird of excellent character; and see how I love and 267 | slave for my father and mother. Look too, at my feathers-- 268 | they are not the least like those of a Crane." The Farmer 269 | laughed aloud and said, "It may be all as you say, I only know 270 | this: I have taken you with these robbers, the Cranes, and you 271 | must die in their company." 272 | 273 | Birds of a feather flock together. 274 | 275 | 276 | The Farmer and the Snake 277 | 278 | ONE WINTER a Farmer found a Snake stiff and frozen with cold. He 279 | had compassion on it, and taking it up, placed it in his bosom. 280 | The Snake was quickly revived by the warmth, and resuming its 281 | natural instincts, bit its benefactor, inflicting on him a mortal 282 | wound. "Oh," cried the Farmer with his last breath, "I am 283 | rightly served for pitying a scoundrel." 284 | 285 | The greatest kindness will not bind the ungrateful. 286 | 287 | -------------------------------------------------------------------------------- /celery_projects/CeleryOnDockerSwarm.md: -------------------------------------------------------------------------------- 1 | 2 | # 使用 Celery 於 Docker Swarm 之上 建構類似 Bluemix 的 IoT 平台 3 | ## Part I: Celery on Docker Swarm 4 | 5 | Wei Lin 6 | 20160126 7 | 8 | ## 緣起 9 | 前天(2016/01/24)到臺北參加了一場很棒的 關於 [Raspberry Pi](https://www.raspberrypi.org/), [Docker](https://www.docker.com/), [MQTT](http://cheng-min-i-taiwan.blogspot.tw/2015/03/raspberry-pimqtt-android.html), [Bluemix](https://console.ng.bluemix.net/) 的 [講座](https://www.facebook.com/events/1092250660807957/1098932823473074/)。㑹中提到 Bluemix 和 MQTT 是一種 publisher/subscriber 的機制,這讓我想起前幾週接觸到的 [Celery](http://www.celeryproject.org/),其採用 producer/queue/consumer 模型,我覺得兩者非常的相似。 10 | 11 | 在 Python 的領域中,Celery 是一套著名的 distributed task queue framework,用來面對 concurrent 的需求時非常好用,尤其是它的 Canvas 機制更是在平行系統上建構複雜處理流程的利器。 12 | 13 | 由於 Celery 能夠在 distributed 的環境下運作,而且原則上 越平行分散 效能越好,這讓我很自然的想要把它與 Docker Swarm 結合起來: Docker Swarm 負責建構 cluster 平台,提供 Celery 所需要的 worker containers,而 Celery 可以在 Docker Swarm 上視需要擴展 worker 與 processes 的數量,平行地完成指定的運算。 14 | 15 | Celery 所採用的機制,簡單來說就是: producer 發出要求運算的訊息到 queue 中排隊,眾多的 workers 紛紛到 queue 去撿出訊息來進行處理。類比於 Bluemix 和 MQTT: producer 就如同 publisher,queue 如同 topic 或 channel,consumer 如同 subscriber,我覺得兩者是十分類似的。 16 | 17 | 因此,如果我們可以利用 Dock Swarm 為 Celery 之類的 framework 提供一個 cluster 舞台,是否可以快速的建立一個私有(沒有30天的試用期限)的、類似於 Bluemix 的平台呢?讓其上的 devices 共同組成一個分散式的協同運算系統,視整個 IoT(Internet of Things) 為一體。 18 | 19 | 然而,除了建構 IoT 平台之外,Celery + Docker Swarm 應該還有其他的用途,因此為了 reusability,把這部分切割出來,本次實驗先只驗證 Celery 在 Docker Swarm 上可以順利的運作。 20 | 21 | 22 | ## 實驗前提與目標: 23 | - 完全使用 Docker 組裝系統: host上除了 Docker-Engine、Docker-Machine之外 並不安裝其他軟體。 24 | - 建構分散式的運算平台: 於 Docker Swarm 的環境中,使用 Celery 架構一個 distributed task queue。 25 | - Cluster平台可抽換: Celery 和 Docker Swarm decouple,也就是說 Celery 並不知道自己是跑在單一 Docker Machine 或者 Swarm 之上 (其實 Celery 也不知道自己是在 Docker 上面跑)。 26 | - 發揮 Docker Swarm 的功能: 驗證 Docker 能自動將 Celery 所需的 worker container spread 到不同的 Docker Machines 中。 27 | - 驗證 Celery 在 Docker Swarm 中可順利運作: 28 | - 由一個 producer(publisher),發送訊息到特定的 queue(topic) 之後,相應的 consumer(subscribers) 會接收、處理、回應訊息。 29 | - 就以 Hadoop/MapReduce 領域中的入門程式 "Word Count" 來作為測試案例。 30 | 31 | ## 實作步驟: 32 | 33 | ### 建立 Docker Swarm: 34 | 之前已經參考了這篇 [文章](https://www.facebook.com/groups/docker.taipei/permalink/1704032656498757) 使用兩台 RPi2 建立了一個 Docker Swarm。 35 | 36 | #### Swarm 中有兩台 Docker machines: 37 | - host rpi202(192.168.0.114) 擔任 Swarm Manager,其 Docker machine name 為 master01 38 | - host rpi201(192.168.0.109) 擔任 Swarm Node,其 Docker machine name 為 node01 39 | 40 | 41 | ```python 42 | HypriotOS: pi@rpi202 in ~ 43 | $ docker-machine ls 44 | NAME ACTIVE DRIVER STATE URL SWARM 45 | master01 hypriot Running tcp://192.168.0.114:2376 master01 (master) 46 | node01 hypriot Running tcp://192.168.0.109:2376 master01 47 | HypriotOS: pi@rpi202 in ~ 48 | $ 49 | 50 | 51 | # Swarm 中的 nodes: 52 | 53 | HypriotOS: pi@rpi202 in ~ 54 | $ docker $(docker-machine config --swarm master01) info 55 | Containers: 4 56 | Images: 51 57 | Role: primary 58 | Strategy: spread 59 | Filters: health, port, dependency, affinity, constraint 60 | Nodes: 2 61 | master01: 192.168.0.114:2376 62 | └ Status: Healthy 63 | └ Containers: 3 64 | └ Reserved CPUs: 0 / 4 65 | └ Reserved Memory: 0 B / 972 MiB 66 | └ Labels: executiondriver=native-0.2, kernelversion=4.1.8-hypriotos-v7+, operatingsystem=Raspbian GNU/Linux 8 (jessie), provider=hypriot, storagedriver=overlay 67 | node01: 192.168.0.109:2376 68 | └ Status: Healthy 69 | └ Containers: 1 70 | └ Reserved CPUs: 0 / 4 71 | └ Reserved Memory: 0 B / 972 MiB 72 | └ Labels: executiondriver=native-0.2, kernelversion=4.1.8-hypriotos-v7+, operatingsystem=Raspbian GNU/Linux 8 (jessie), provider=hypriot, storagedriver=overlay 73 | CPUs: 8 74 | Total Memory: 1.899 GiB 75 | Name: b7def5d9af98 76 | HypriotOS: pi@rpi202 in ~ 77 | $ 78 | ``` 79 | 80 | ### 建立 Docker Overlay network: 81 | 82 | 83 | ```python 84 | # Swarm 中原本的 networks: 85 | 86 | HypriotOS: pi@rpi202 in ~ 87 | $ docker $(docker-machine config --swarm master01) network ls 88 | NETWORK ID NAME DRIVER 89 | f73c178c37f9 master01/bridge bridge 90 | 414ed2b10a9d master01/none null 91 | 930b58fd0d33 master01/host host 92 | 759c7acf7c0a node01/none null 93 | 0954a9483bae node01/host host 94 | 21bce8ef40ec node01/bridge bridge 95 | HypriotOS: pi@rpi202 in ~ 96 | $ 97 | 98 | 99 | # 建立一個 overlay network: 100 | 101 | HypriotOS: pi@rpi202 in ~ 102 | $ docker network create --driver=overlay mynet 103 | 1feac8ae92873e78570b904b0d3f848b49a180c15ebe0a4ff38f1a2cbcc691c5 104 | HypriotOS: pi@rpi202 in ~ 105 | $ 106 | 107 | 108 | # Swarm 中目前的 networks,有看到新建的 overlay network "mynet": 109 | 110 | HypriotOS: pi@rpi202 in ~ 111 | $ docker $(docker-machine config --swarm master01) network ls 112 | NETWORK ID NAME DRIVER 113 | 930b58fd0d33 master01/host host 114 | 1feac8ae9287 mynet overlay 115 | f73c178c37f9 master01/bridge bridge 116 | 21bce8ef40ec node01/bridge bridge 117 | 759c7acf7c0a node01/none null 118 | 0954a9483bae node01/host host 119 | 414ed2b10a9d master01/none null 120 | HypriotOS: pi@rpi202 in ~ 121 | $ 122 | 123 | 124 | # Swarm Manager 所在的 host中 所看到的 networks,有看到新建的 overlay network "mynet": 125 | 126 | HypriotOS: pi@rpi202 in ~ 127 | $ docker network ls 128 | NETWORK ID NAME DRIVER 129 | f73c178c37f9 bridge bridge 130 | 414ed2b10a9d none null 131 | 930b58fd0d33 host host 132 | 1feac8ae9287 mynet overlay 133 | HypriotOS: pi@rpi202 in ~ 134 | $ 135 | 136 | 137 | # Swarm Node 所在的 host中 所看到的 networks,有看到新建的 overlay network "mynet": 138 | 139 | HypriotOS: pi@rpi201 in ~ 140 | $ docker network ls 141 | NETWORK ID NAME DRIVER 142 | 1feac8ae9287 mynet overlay 143 | 21bce8ef40ec bridge bridge 144 | 759c7acf7c0a none null 145 | 0954a9483bae host host 146 | HypriotOS: pi@rpi201 in ~ 147 | $ 148 | ``` 149 | 150 | ### 建立 Celery 所需的 Broker,使用 Redis 151 | 152 | 153 | ```python 154 | HypriotOS: pi@rpi202 in ~ 155 | $ docker run -d -p 6379:6379 --net=mynet --name=redis --volume=/data:/data hypriot/rpi-redis 156 | a2abf9277b5e4818da89ffa282a706506ef288426486cc25b431208564bf6e0f 157 | 158 | 159 | HypriotOS: pi@rpi202 in ~ 160 | $ docker ps 161 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 162 | a2abf9277b5e hypriot/rpi-redis "/entrypoint.sh redis" 13 hours ago Up About an hour 0.0.0.0:6379->6379/tcp redis 163 | f0ce33ca1152 hypriot/rpi-swarm "/swarm join --advert" 6 days ago Up 6 days 2375/tcp swarm-agent 164 | b7def5d9af98 hypriot/rpi-swarm "/swarm manage --tlsv" 6 days ago Up 6 days 0.0.0.0:3376->3376/tcp, 2375/tcp swarm-agent-master 165 | ad594813f8f0 nimblestratus/rpi-consul "/bin/start -server -" 6 days ago Up 6 days 53/udp, 8300-8302/tcp, 8301-8302/udp, 8400/tcp, 0.0.0.0:8500->8500/tcp consul 166 | HypriotOS: pi@rpi202 in ~ 167 | $ 168 | ``` 169 | 170 | ### 將檔案 celeryconfig.py、start_workers.sh、資料夾 word_count 複製到 兩台 hosts 的 /data/celery_projects 資料夾之下 171 | 172 | #### 可以使用 SCP 來達成: http://www.hypexr.org/linux_scp_help.php 173 | 174 | 例如: 175 | HypriotOS: pi@rpi202 in ~ 176 | $ scp -r /data/celery_projects root@rpi201:/data/ 177 | 178 | 179 | ```python 180 | HypriotOS: pi@rpi202 in /data/celery_projects 181 | $ ll 182 | total 20 183 | drwxr-xr-x 3 999 root 4096 Jan 25 23:01 ./ 184 | drwxr-xr-x 3 999 root 4096 Jan 25 23:01 ../ 185 | -rw-r--r-- 1 999 root 1079 Jan 25 21:12 celeryconfig.py 186 | -rwxr-xr-x 1 999 root 732 Jan 25 22:53 start_workers.sh* <--- 用來啟動 worker containers 的 script,後述。 187 | drwxr-xr-x 3 root root 4096 Jan 25 23:01 word_count/ 188 | HypriotOS: pi@rpi202 in /data/celery_projects 189 | $ 190 | 191 | 192 | HypriotOS: pi@rpi201 in /data/celery_projects 193 | $ ll 194 | total 20 195 | drwxr-xr-x 3 root root 4096 Jan 25 23:03 ./ 196 | drwxr-xr-x 3 999 root 4096 Jan 25 22:55 ../ 197 | -rw-r--r-- 1 root root 1079 Jan 25 21:12 celeryconfig.py 198 | -rw-r--r-- 1 root root 732 Jan 25 22:53 start_workers.sh <--- 用來啟動 worker containers 的 script,後述。 199 | drwxr-xr-x 3 root root 4096 Jan 25 23:03 word_count/ 200 | HypriotOS: pi@rpi201 in /data/celery_projects 201 | $ 202 | ``` 203 | 204 | ### 建立監控用的 [Flower ](http://docs.celeryproject.org/en/latest/userguide/monitoring.html#flower-real-time-celery-web-monitor) container 205 | 206 | 207 | ```python 208 | HypriotOS: pi@rpi202 in ~ 209 | $ docker run -d -p 5555:5555 --net=mynet --name=flower --volume=/data/celery_projects:/celery_projects wei1234c/celery_armv7 /bin/sh -c "cd /celery_projects && celery -A word_count flower" 210 | 276f00591fd7042139ddf660730d223bcf19e9f8bd369f075de417140b6dfd4a 211 | HypriotOS: pi@rpi202 in ~ 212 | $ 213 | 214 | 215 | HypriotOS: pi@rpi202 in ~ 216 | $ docker ps 217 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 218 | 276f00591fd7 wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" 31 seconds ago Up 27 seconds 0.0.0.0:5555->5555/tcp flower 219 | ``` 220 | 221 | #### Flower 的 container 起來之後,就可以用瀏覽器查看,不過目前都還是空的 222 | ![](./jpgs/flower1.jpg) 223 | 224 | ### 透過 Swarm Manager 建立並佈署 Celery worker containers 225 | 226 | #### 用來建立 worker containers 的 script 227 | start_workers.sh 228 | 229 | 230 | ```python 231 | # ./start_workers.sh 232 | 233 | echo "Starting Celery cluster containers _________________________________________________" 234 | 235 | eval $(docker-machine env --swarm master01) 236 | 237 | PROJECT=$1 # project 名稱 238 | WORKER_START_ID=$2 # worker container 編號 第一個 239 | WORKER_LAST_ID=$3 # worker container 編號 最後一個 240 | CONCURRENCY=$4 # 每個 worker 可以有幾個 subprocesses 241 | 242 | for (( i=${WORKER_START_ID}; i<=${WORKER_LAST_ID}; i=i+1 )) 243 | do 244 | docker run -d --name=${PROJECT}_celery${i} --hostname=${PROJECT}_celery${i} --net=mynet --volume=/data/celery_projects:/celery_projects wei1234c/celery_armv7 /bin/sh -c "cd /celery_projects && celery -A ${PROJECT} worker -n worker${i}.%h --concurrency=${CONCURRENCY} --loglevel=INFO" 245 | done 246 | ``` 247 | 248 | #### 初始建立 Celery worker containers 249 | 建立了 4 個 containers,每個 container 有一個 Celery worker,每個 worker 可以使用 5 個 subprocesses。 250 | 因為這個 Swarm 採取 spread 策略,所以 4 個都被 schedule 到 container 數量偏少的那一台 Docker machine 上面。 251 | 252 | 253 | ```python 254 | # CLI 參數說明: 255 | # $1 # project 名稱 256 | # $2 # worker container 編號 第一個 257 | # $3 # worker container 編號 最後一個 258 | # $4 # 每個 worker 可以有幾個 subprocesses 259 | 260 | HypriotOS: pi@rpi202 in /data/celery_projects 261 | $ ./start_workers.sh word_count 1 4 5 262 | Starting Celery cluster containers _________________________________________________ 263 | a22b08a0818b3246f90511ad21cb2a0ab37a4e72661bf559ade7e320db030505 264 | 77eabded27e4ea3aaa640480c088fa7b4b9818fc3e40fb66636cc9abe8a78e69 265 | df05a7204f40470cfd8eee21a06be45f5a306ea32df0196f3d004beac5d2f82d 266 | e67d39740ace5c2a5b9a05e6ca1adc73c5e5944e62302d02391d37f7ee6aa479 267 | 268 | 269 | 270 | # 建立了 4 個 containers,每個 container 有一個 Celery worker,每個 worker 可以使用 5 個 subprocesses。 271 | 272 | # 因為採取 spread 策略,所以 4 個都被 schedule 到 container 數量偏少的一台 Docker machine。 273 | 274 | HypriotOS: pi@rpi202 in /data/celery_projects 275 | $ docker ps 276 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 277 | e67d39740ace wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" About a minute ago Up About a minute 5555/tcp node01/word_count_celery4 278 | df05a7204f40 wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" About a minute ago Up About a minute 5555/tcp node01/word_count_celery3 279 | 77eabded27e4 wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" About a minute ago Up About a minute 5555/tcp node01/word_count_celery2 280 | a22b08a0818b wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" About a minute ago Up About a minute 5555/tcp node01/word_count_celery1 281 | 276f00591fd7 wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" 37 minutes ago Up 5 minutes 192.168.0.114:5555->5555/tcp master01/flower 282 | a2abf9277b5e hypriot/rpi-redis "/entrypoint.sh redis" 13 hours ago Up About an hour 192.168.0.114:6379->6379/tcp master01/redis 283 | 980161d10fc4 hypriot/rpi-swarm "/swarm join --advert" 6 days ago Up 6 days 2375/tcp node01/swarm-agent 284 | f0ce33ca1152 hypriot/rpi-swarm "/swarm join --advert" 6 days ago Up 6 days 2375/tcp master01/swarm-agent 285 | b7def5d9af98 hypriot/rpi-swarm "/swarm manage --tlsv" 6 days ago Up 6 days 2375/tcp, 192.168.0.114:3376->3376/tcp master01/swarm-agent-master 286 | ad594813f8f0 nimblestratus/rpi-consul "/bin/start -server -" 6 days ago Up 6 days 53/udp, 8300-8302/tcp, 8301-8302/udp, 8400/tcp, 192.168.0.114:8500->8500/tcp master01/consul 287 | HypriotOS: pi@rpi202 in /data/celery_projects 288 | $ 289 | ``` 290 | 291 | #### Flower 中就會顯示 workers,目前有4個 workers 292 | - celery@worker1.word_count_celery1 293 | - celery@worker2.word_count_celery2 294 | - celery@worker3.word_count_celery3 295 | - celery@worker4.word_count_celery4 296 | 297 | ![](./jpgs/flower22.jpg) 298 | 299 | #### 擴增 Celery worker containers 300 | 可以隨時動態的擴增 Celery 的 scale 以因應效能的需求。 301 | 這次再增建了 4 個 containers,每個 container 有一個 Celery worker,每個 worker 可以使用 5 個 subprocesses。 302 | 因為這個 Swarm 採取 spread 策略,而且當下兩台 Docker machines 上的 containers 數量相當,所以第二次擴增的這 4 個 containers 被平均地 schedule 到兩台 Docker machines 上面。 303 | 304 | 305 | ```python 306 | # $1 # project 名稱 307 | # $2 # worker container 編號 第一個 308 | # $3 # worker container 編號 最後一個 309 | # $4 # 每個 worker 可以有幾個 subprocesses 310 | 311 | HypriotOS: pi@rpi202 in /data/celery_projects 312 | $ ./start_workers.sh word_count 5 8 5 313 | Starting Celery cluster containers _________________________________________________ 314 | a4de4967fd6211266cbad04fecfc357aa81789063cca3042388019adab2a6c71 315 | 7066ba6021de870f1332858c6f96673a159d7e5031a5682d3853fa6bd8fe2252 316 | 79792c823fbf2769e4983c525598c30ba3758c23697ef66a78a54163374d3233 317 | 25c02d07ed6f1217ee68dc486a6586262ca2e3ed01a2a8690eaa2a783ad07d73 318 | 319 | 320 | HypriotOS: pi@rpi202 in /data/celery_projects 321 | $ docker ps 322 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 323 | 25c02d07ed6f wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" 18 seconds ago Up 12 seconds 5555/tcp node01/word_count_celery8 324 | 79792c823fbf wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" 39 seconds ago Up 25 seconds 5555/tcp master01/word_count_celery7 325 | 7066ba6021de wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" About a minute ago Up 45 seconds 5555/tcp node01/word_count_celery6 326 | a4de4967fd62 wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" About a minute ago Up About a minute 5555/tcp master01/word_count_celery5 327 | e67d39740ace wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" 15 minutes ago Up 15 minutes 5555/tcp node01/word_count_celery4 328 | df05a7204f40 wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" 15 minutes ago Up 15 minutes 5555/tcp node01/word_count_celery3 329 | 77eabded27e4 wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" 15 minutes ago Up 15 minutes 5555/tcp node01/word_count_celery2 330 | a22b08a0818b wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" 16 minutes ago Up 16 minutes 5555/tcp node01/word_count_celery1 331 | 276f00591fd7 wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" 51 minutes ago Up 19 minutes 192.168.0.114:5555->5555/tcp master01/flower 332 | a2abf9277b5e hypriot/rpi-redis "/entrypoint.sh redis" 14 hours ago Up 2 hours 192.168.0.114:6379->6379/tcp master01/redis 333 | 980161d10fc4 hypriot/rpi-swarm "/swarm join --advert" 6 days ago Up 6 days 2375/tcp node01/swarm-agent 334 | f0ce33ca1152 hypriot/rpi-swarm "/swarm join --advert" 6 days ago Up 6 days 2375/tcp master01/swarm-agent 335 | b7def5d9af98 hypriot/rpi-swarm "/swarm manage --tlsv" 6 days ago Up 6 days 2375/tcp, 192.168.0.114:3376->3376/tcp master01/swarm-agent-master 336 | ad594813f8f0 nimblestratus/rpi-consul "/bin/start -server -" 6 days ago Up 6 days 53/udp, 8300-8302/tcp, 8301-8302/udp, 8400/tcp, 192.168.0.114:8500->8500/tcp master01/consul 337 | HypriotOS: pi@rpi202 in /data/celery_projects 338 | $ 339 | ``` 340 | 341 | #### 在 Swarm node 上也可以看到透過 Swarm manager 遠端建立的 containers 342 | 343 | 344 | ```python 345 | HypriotOS: pi@rpi201 in /data/celery_projects 346 | $ docker ps 347 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 348 | 25c02d07ed6f wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" 16 minutes ago Up 16 minutes 5555/tcp word_count_celery8 349 | 7066ba6021de wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" 17 minutes ago Up 17 minutes 5555/tcp word_count_celery6 350 | e67d39740ace wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" 32 minutes ago Up 31 minutes 5555/tcp word_count_celery4 351 | df05a7204f40 wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" 32 minutes ago Up 32 minutes 5555/tcp word_count_celery3 352 | 77eabded27e4 wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" 32 minutes ago Up 32 minutes 5555/tcp word_count_celery2 353 | a22b08a0818b wei1234c/celery_armv7 "/bin/sh -c 'cd /cele" 32 minutes ago Up 32 minutes 5555/tcp word_count_celery1 354 | 980161d10fc4 hypriot/rpi-swarm "/swarm join --advert" 6 days ago Up 6 days 2375/tcp swarm-agent 355 | HypriotOS: pi@rpi201 in /data/celery_projects 356 | $ 357 | ``` 358 | 359 | #### Flower 中就會顯示 目前兩台 machines 中共有8個 workers = 8 x 5 = 40 個 processes 可共使用 360 | 361 | ![](./jpgs/flower3.jpg) 362 | 363 | ### 用來測試的程式碼 364 | 365 | 366 | ```python 367 | from word_count.tasks import * 368 | 369 | # 將 text 檔案拆解成 list of words 370 | def getWordsFromText(file = '.\\text\\test.txt'): 371 | with open(file) as f: 372 | lines = f.readlines() 373 | return ' '.join(lines).replace(',', '').replace('.', '').split() 374 | 375 | 376 | def reduce(word_counts): 377 | wordCounts = {} 378 | 379 | for word_count in word_counts: 380 | if word_count is not None: 381 | wordCounts[word_count[0]] = wordCounts.get(word_count[0], 0) + word_count[1] 382 | 383 | result = sorted(list(wordCounts.items()), 384 | key = lambda x: (x[1], x[0]), 385 | reverse = True) 386 | return result 387 | ``` 388 | 389 | 390 | ```python 391 | # list of words 392 | words = getWordsFromText() 393 | words[:3] 394 | ``` 395 | 396 | 397 | 398 | 399 | ["Aesop's", 'Fables', 'Translated'] 400 | 401 | 402 | 403 | 404 | ```python 405 | # 字數總計 406 | len(words) 407 | ``` 408 | 409 | 410 | 411 | 412 | 2190 413 | 414 | 415 | 416 | #### ./word_count/tasks.py 中所定義的 mapper 函數: 417 | from celery import group 418 | from word_count.celery import app 419 | 420 | @app.task 421 | def mapper(word): 422 | return (word, 1) if len(word) >= 5 else None # 過濾掉太短的word 423 | 424 | ### 發送 message 要求 Celery workers 做平行運算並回應結果 425 | 426 | 427 | ```python 428 | def count_celery(words): 429 | 430 | # 發送給 Celery 執行 431 | asyncResults = [mapper.s(word).delay() for word in words] # mapper 是定義在 ./word_count/tasks.py 中 432 | results = [asyncResult.get() for asyncResult in asyncResults if asyncResult.get() is not None] 433 | 434 | return reduce(results) 435 | 436 | %time counts = count_celery(words) 437 | counts[:5] 438 | ``` 439 | 440 | Wall time: 3min 23s 441 | 442 | 443 | 444 | 445 | 446 | [('would', 12), ('which', 8), ('their', 8), ('caught', 6), ('Farmer', 6)] 447 | 448 | 449 | 450 | #### 訊息一送出去,Flower 中就會顯示執行的狀況 451 | ![](./jpgs/flower4.jpg) 452 | 453 | #### 執行完畢 454 | ![](./jpgs/flower5.jpg) 455 | 456 | #### 過程中 CPUs 並不是非常忙碌,還可以再壓榨一些 457 | ![](./jpgs/htop1.jpg) 458 | 459 | ## Summary: 460 | 雖然Bluemix的功能完整性相對的高很多,但是個人覺得兩者的原理是類似的。考量資料安全性與成本,若我們可以使用 Docker Swarm 和 Message Queue 快速地建構私有的 IoT 平台 也許是一個可以考慮的選擇。 461 | 462 | 本次實驗先驗證 Celery 可以在 Docker Swarm 上面運作 並獲得 scalability,後續再設計一個 IoT 網路來模擬 devices 之間的互動。 463 | -------------------------------------------------------------------------------- /Celery_config_plotter/CeleryConfigPlotter.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Celery結構規劃" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": { 14 | "collapsed": true 15 | }, 16 | "outputs": [], 17 | "source": [ 18 | "from celery import Celery" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 2, 24 | "metadata": { 25 | "collapsed": false 26 | }, 27 | "outputs": [], 28 | "source": [ 29 | "import pandas as pd\n", 30 | "from pandas import Series, DataFrame" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 3, 36 | "metadata": { 37 | "collapsed": true 38 | }, 39 | "outputs": [], 40 | "source": [ 41 | "import os\n", 42 | "from pprint import pprint" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "metadata": {}, 48 | "source": [ 49 | "### 預設的參數" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 4, 55 | "metadata": { 56 | "collapsed": false 57 | }, 58 | "outputs": [], 59 | "source": [ 60 | "def listDefaultCeleryConfigurations():\n", 61 | " app = Celery()\n", 62 | " configs = app.conf.__dict__['_order'][2]\n", 63 | " configs = sorted([(k, v) for k, v in configs.items()])\n", 64 | " for k, v in configs:\n", 65 | " print ('{0} = {1}'.format(k, (\"'\" + v + \"'\") if isinstance(v, str) else v) )" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": 5, 71 | "metadata": { 72 | "collapsed": false 73 | }, 74 | "outputs": [ 75 | { 76 | "name": "stdout", 77 | "output_type": "stream", 78 | "text": [ 79 | "ADMINS = ()\n", 80 | "BROKER_CONNECTION_MAX_RETRIES = 100\n", 81 | "BROKER_CONNECTION_RETRY = True\n", 82 | "BROKER_CONNECTION_TIMEOUT = 4\n", 83 | "BROKER_FAILOVER_STRATEGY = None\n", 84 | "BROKER_HEARTBEAT = None\n", 85 | "BROKER_HEARTBEAT_CHECKRATE = 3.0\n", 86 | "BROKER_HOST = None\n", 87 | "BROKER_LOGIN_METHOD = None\n", 88 | "BROKER_PASSWORD = None\n", 89 | "BROKER_POOL_LIMIT = 10\n", 90 | "BROKER_PORT = None\n", 91 | "BROKER_TRANSPORT = None\n", 92 | "BROKER_TRANSPORT_OPTIONS = {}\n", 93 | "BROKER_URL = None\n", 94 | "BROKER_USER = None\n", 95 | "BROKER_USE_SSL = False\n", 96 | "BROKER_VHOST = None\n", 97 | "CASSANDRA_COLUMN_FAMILY = None\n", 98 | "CASSANDRA_DETAILED_MODE = False\n", 99 | "CASSANDRA_KEYSPACE = None\n", 100 | "CASSANDRA_READ_CONSISTENCY = None\n", 101 | "CASSANDRA_SERVERS = None\n", 102 | "CASSANDRA_WRITE_CONSISTENCY = None\n", 103 | "CELERYBEAT_LOG_FILE = None\n", 104 | "CELERYBEAT_LOG_LEVEL = 'INFO'\n", 105 | "CELERYBEAT_MAX_LOOP_INTERVAL = 0\n", 106 | "CELERYBEAT_SCHEDULE = {}\n", 107 | "CELERYBEAT_SCHEDULER = 'celery.beat:PersistentScheduler'\n", 108 | "CELERYBEAT_SCHEDULE_FILENAME = 'celerybeat-schedule'\n", 109 | "CELERYBEAT_SYNC_EVERY = 0\n", 110 | "CELERYD_AGENT = None\n", 111 | "CELERYD_AUTORELOADER = 'celery.worker.autoreload:Autoreloader'\n", 112 | "CELERYD_AUTOSCALER = 'celery.worker.autoscale:Autoscaler'\n", 113 | "CELERYD_CONCURRENCY = 0\n", 114 | "CELERYD_CONSUMER = 'celery.worker.consumer:Consumer'\n", 115 | "CELERYD_FORCE_EXECV = False\n", 116 | "CELERYD_HIJACK_ROOT_LOGGER = True\n", 117 | "CELERYD_LOG_COLOR = None\n", 118 | "CELERYD_LOG_FILE = None\n", 119 | "CELERYD_LOG_FORMAT = '[%(asctime)s: %(levelname)s/%(processName)s] %(message)s'\n", 120 | "CELERYD_LOG_LEVEL = 'WARN'\n", 121 | "CELERYD_MAX_TASKS_PER_CHILD = None\n", 122 | "CELERYD_POOL = 'prefork'\n", 123 | "CELERYD_POOL_PUTLOCKS = True\n", 124 | "CELERYD_POOL_RESTARTS = False\n", 125 | "CELERYD_PREFETCH_MULTIPLIER = 4\n", 126 | "CELERYD_STATE_DB = None\n", 127 | "CELERYD_TASK_LOG_FORMAT = '[%(asctime)s: %(levelname)s/%(processName)s] %(task_name)s[%(task_id)s]: %(message)s'\n", 128 | "CELERYD_TASK_SOFT_TIME_LIMIT = None\n", 129 | "CELERYD_TASK_TIME_LIMIT = None\n", 130 | "CELERYD_TIMER = None\n", 131 | "CELERYD_TIMER_PRECISION = 1.0\n", 132 | "CELERYD_WORKER_LOST_WAIT = 10.0\n", 133 | "CELERYMON_LOG_FILE = None\n", 134 | "CELERYMON_LOG_FORMAT = '[%(asctime)s: %(levelname)s] %(message)s'\n", 135 | "CELERYMON_LOG_LEVEL = 'INFO'\n", 136 | "CELERY_ACCEPT_CONTENT = ['json', 'pickle', 'msgpack', 'yaml']\n", 137 | "CELERY_ACKS_LATE = False\n", 138 | "CELERY_ALWAYS_EAGER = False\n", 139 | "CELERY_ANNOTATIONS = None\n", 140 | "CELERY_BROADCAST_EXCHANGE = 'celeryctl'\n", 141 | "CELERY_BROADCAST_EXCHANGE_TYPE = 'fanout'\n", 142 | "CELERY_BROADCAST_QUEUE = 'celeryctl'\n", 143 | "CELERY_CACHE_BACKEND = None\n", 144 | "CELERY_CACHE_BACKEND_OPTIONS = {}\n", 145 | "CELERY_CHORD_PROPAGATES = True\n", 146 | "CELERY_COUCHBASE_BACKEND_SETTINGS = None\n", 147 | "CELERY_CREATE_MISSING_QUEUES = True\n", 148 | "CELERY_DEFAULT_DELIVERY_MODE = 2\n", 149 | "CELERY_DEFAULT_EXCHANGE = 'celery'\n", 150 | "CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'\n", 151 | "CELERY_DEFAULT_QUEUE = 'celery'\n", 152 | "CELERY_DEFAULT_RATE_LIMIT = None\n", 153 | "CELERY_DEFAULT_ROUTING_KEY = 'celery'\n", 154 | "CELERY_DISABLE_RATE_LIMITS = False\n", 155 | "CELERY_EAGER_PROPAGATES_EXCEPTIONS = False\n", 156 | "CELERY_ENABLE_REMOTE_CONTROL = True\n", 157 | "CELERY_ENABLE_UTC = True\n", 158 | "CELERY_EVENT_QUEUE_EXPIRES = None\n", 159 | "CELERY_EVENT_QUEUE_TTL = None\n", 160 | "CELERY_EVENT_SERIALIZER = 'json'\n", 161 | "CELERY_IGNORE_RESULT = False\n", 162 | "CELERY_IMPORTS = ()\n", 163 | "CELERY_INCLUDE = ()\n", 164 | "CELERY_MAX_CACHED_RESULTS = 100\n", 165 | "CELERY_MESSAGE_COMPRESSION = None\n", 166 | "CELERY_MONGODB_BACKEND_SETTINGS = None\n", 167 | "CELERY_QUEUES = None\n", 168 | "CELERY_QUEUE_HA_POLICY = None\n", 169 | "CELERY_REDIRECT_STDOUTS = True\n", 170 | "CELERY_REDIRECT_STDOUTS_LEVEL = 'WARNING'\n", 171 | "CELERY_REDIS_DB = None\n", 172 | "CELERY_REDIS_HOST = None\n", 173 | "CELERY_REDIS_MAX_CONNECTIONS = None\n", 174 | "CELERY_REDIS_PASSWORD = None\n", 175 | "CELERY_REDIS_PORT = None\n", 176 | "CELERY_RESULT_BACKEND = None\n", 177 | "CELERY_RESULT_DBURI = None\n", 178 | "CELERY_RESULT_DB_SHORT_LIVED_SESSIONS = False\n", 179 | "CELERY_RESULT_DB_TABLENAMES = None\n", 180 | "CELERY_RESULT_ENGINE_OPTIONS = None\n", 181 | "CELERY_RESULT_EXCHANGE = 'celeryresults'\n", 182 | "CELERY_RESULT_EXCHANGE_TYPE = 'direct'\n", 183 | "CELERY_RESULT_PERSISTENT = None\n", 184 | "CELERY_RESULT_SERIALIZER = 'pickle'\n", 185 | "CELERY_ROUTES = None\n", 186 | "CELERY_SECURITY_CERTIFICATE = None\n", 187 | "CELERY_SECURITY_CERT_STORE = None\n", 188 | "CELERY_SECURITY_KEY = None\n", 189 | "CELERY_SEND_EVENTS = False\n", 190 | "CELERY_SEND_TASK_ERROR_EMAILS = False\n", 191 | "CELERY_SEND_TASK_SENT_EVENT = False\n", 192 | "CELERY_STORE_ERRORS_EVEN_IF_IGNORED = False\n", 193 | "CELERY_TASK_PUBLISH_RETRY = True\n", 194 | "CELERY_TASK_PUBLISH_RETRY_POLICY = {'max_retries': 3, 'interval_max': 1, 'interval_step': 0.2, 'interval_start': 0}\n", 195 | "CELERY_TASK_RESULT_EXPIRES = 1 day, 0:00:00\n", 196 | "CELERY_TASK_SERIALIZER = 'pickle'\n", 197 | "CELERY_TIMEZONE = None\n", 198 | "CELERY_TRACK_STARTED = False\n", 199 | "CELERY_WORKER_DIRECT = False\n", 200 | "EMAIL_HOST = 'localhost'\n", 201 | "EMAIL_HOST_PASSWORD = None\n", 202 | "EMAIL_HOST_USER = None\n", 203 | "EMAIL_PORT = 25\n", 204 | "EMAIL_TIMEOUT = 2\n", 205 | "EMAIL_USE_SSL = False\n", 206 | "EMAIL_USE_TLS = False\n", 207 | "SERVER_EMAIL = 'celery@localhost'\n" 208 | ] 209 | } 210 | ], 211 | "source": [ 212 | "listDefaultCeleryConfigurations()" 213 | ] 214 | }, 215 | { 216 | "cell_type": "markdown", 217 | "metadata": {}, 218 | "source": [ 219 | "---\n", 220 | "### 抓取 規劃檔案 內容" 221 | ] 222 | }, 223 | { 224 | "cell_type": "code", 225 | "execution_count": 6, 226 | "metadata": { 227 | "collapsed": true 228 | }, 229 | "outputs": [], 230 | "source": [ 231 | "def getExcelData(file):\n", 232 | " df = pd.read_excel(file)\n", 233 | " df.dropna(axis=0, how='all', inplace=True)\n", 234 | "\n", 235 | " return df" 236 | ] 237 | }, 238 | { 239 | "cell_type": "markdown", 240 | "metadata": {}, 241 | "source": [ 242 | "---\n", 243 | "#### Import Kombu classes" 244 | ] 245 | }, 246 | { 247 | "cell_type": "code", 248 | "execution_count": 7, 249 | "metadata": { 250 | "collapsed": true 251 | }, 252 | "outputs": [], 253 | "source": [ 254 | "def import_Kombu_classes(plan, summary):\n", 255 | " output = [] \n", 256 | " output.extend(['', '#{0:_^78}'.format('Import Kombu classes')])\n", 257 | " output.append('{0}'.format('from kombu import Exchange, Queue'))\n", 258 | " summary.extend(output)\n", 259 | " \n", 260 | " return summary" 261 | ] 262 | }, 263 | { 264 | "cell_type": "markdown", 265 | "metadata": {}, 266 | "source": [ 267 | "---\n", 268 | "#### CELERY_TIMEZONE & Misc." 269 | ] 270 | }, 271 | { 272 | "cell_type": "code", 273 | "execution_count": 8, 274 | "metadata": { 275 | "collapsed": false 276 | }, 277 | "outputs": [], 278 | "source": [ 279 | "def set_CELERY_TIMEZONE_Misc(plan, summary):\n", 280 | " # 自訂的\n", 281 | " CELERY_TIMEZONE = 'Asia/Taipei' \n", 282 | "\n", 283 | " output = [] \n", 284 | " output.extend(['', '#{0:_^78}'.format('CELERY_TIMEZONE & Misc.')])\n", 285 | " output.append(\"CELERY_TIMEZONE = '{0}'\".format(CELERY_TIMEZONE))\n", 286 | " output.append('CELERYD_POOL_RESTARTS = True')\n", 287 | " summary.extend(output)\n", 288 | " \n", 289 | " return summary" 290 | ] 291 | }, 292 | { 293 | "cell_type": "markdown", 294 | "metadata": {}, 295 | "source": [ 296 | "---\n", 297 | "#### BROKER_URL\n", 298 | "BROKER_URL = 'redis://netbrain.noip.me:6379/0'" 299 | ] 300 | }, 301 | { 302 | "cell_type": "code", 303 | "execution_count": 9, 304 | "metadata": { 305 | "collapsed": false 306 | }, 307 | "outputs": [], 308 | "source": [ 309 | "def set_BROKER_URL(plan, summary):\n", 310 | " BROKER_URL = plan.Broker.drop_duplicates()[0]\n", 311 | "\n", 312 | " output = [] \n", 313 | " output.extend(['', '#{0:_^78}'.format('BROKER_URL')])\n", 314 | " output.append(\"BROKER_URL = '{0}'\".format(BROKER_URL))\n", 315 | " summary.extend(output)\n", 316 | " \n", 317 | " return summary" 318 | ] 319 | }, 320 | { 321 | "cell_type": "markdown", 322 | "metadata": {}, 323 | "source": [ 324 | "---\n", 325 | "#### CELERY_RESULT_BACKEND\n", 326 | "CELERY_RESULT_BACKEND = 'redis://netbrain.noip.me:6379/1'" 327 | ] 328 | }, 329 | { 330 | "cell_type": "code", 331 | "execution_count": 10, 332 | "metadata": { 333 | "collapsed": false 334 | }, 335 | "outputs": [], 336 | "source": [ 337 | "def set_CELERY_RESULT_BACKEND(plan, summary):\n", 338 | " CELERY_RESULT_BACKEND = plan.Result_backend.drop_duplicates()[0]\n", 339 | "\n", 340 | " output = [] \n", 341 | " output.extend(['', '#{0:_^78}'.format('CELERY_RESULT_BACKEND')])\n", 342 | " output.append(\"CELERY_RESULT_BACKEND = '{0}'\".format(CELERY_RESULT_BACKEND))\n", 343 | " summary.extend(output)\n", 344 | " \n", 345 | " return summary" 346 | ] 347 | }, 348 | { 349 | "cell_type": "markdown", 350 | "metadata": {}, 351 | "source": [ 352 | "---\n", 353 | "#### CELERY_IMPORTS\n", 354 | "CELERY_IMPORTS = ('proj.tasks', )" 355 | ] 356 | }, 357 | { 358 | "cell_type": "code", 359 | "execution_count": 11, 360 | "metadata": { 361 | "collapsed": false 362 | }, 363 | "outputs": [], 364 | "source": [ 365 | "def set_CELERY_IMPORTS(plan, summary):\n", 366 | " Celery_app_tasks = plan[['Celery_app', 'Tasks_module']].drop_duplicates()\n", 367 | " modules = ('{0}.{1}'.format(Celery_app_tasks.ix[i, 'Celery_app'], Celery_app_tasks.ix[i, 'Tasks_module']) for i in range(len(Celery_app_tasks)))\n", 368 | " CELERY_IMPORTS = tuple(modules)\n", 369 | "\n", 370 | " output = [] \n", 371 | " output.extend(['', '#{0:_^78}'.format('CELERY_IMPORTS')])\n", 372 | " output.append('CELERY_IMPORTS = {0}'.format(CELERY_IMPORTS))\n", 373 | " summary.extend(output)\n", 374 | " \n", 375 | " return summary" 376 | ] 377 | }, 378 | { 379 | "cell_type": "markdown", 380 | "metadata": {}, 381 | "source": [ 382 | "---\n", 383 | "#### CELERY_QUEUES" 384 | ] 385 | }, 386 | { 387 | "cell_type": "markdown", 388 | "metadata": {}, 389 | "source": [ 390 | " CELERY_QUEUES = (\n", 391 | " Queue('feed_tasks', routing_key='feed.#'),\n", 392 | " Queue('regular_tasks', routing_key='task.#'),\n", 393 | " Queue('image_tasks', exchange=Exchange('mediatasks', type='direct'), routing_key='image.compress'),\n", 394 | " )\n", 395 | "\n", 396 | " CELERY_QUEUES = (\n", 397 | " Queue('default', Exchange('default'), routing_key='default'),\n", 398 | " Queue('videos', Exchange('media'), routing_key='media.video'),\n", 399 | " Queue('images', Exchange('media'), routing_key='media.image'),\n", 400 | " )" 401 | ] 402 | }, 403 | { 404 | "cell_type": "code", 405 | "execution_count": 12, 406 | "metadata": { 407 | "collapsed": false 408 | }, 409 | "outputs": [], 410 | "source": [ 411 | "def set_CELERY_QUEUES(plan, summary):\n", 412 | " queues = plan[['Queue', 'Exchange', 'Exchange_Type', 'Routing_Key']].drop_duplicates()\n", 413 | " output = [] \n", 414 | " output.extend(['', '#{0:_^78}'.format('CELERY_QUEUES')])\n", 415 | "\n", 416 | " output.append('CELERY_QUEUES = (')\n", 417 | "\n", 418 | " for i in range(len(queues)):\n", 419 | " output.append(\" Queue('{queue}', Exchange('{exchange}', type = '{exchange_Type}'), routing_key='{routing_key}'),\"\\\n", 420 | " .format(queue = queues.ix[i, 'Queue'],\n", 421 | " exchange = queues.ix[i, 'Exchange'],\n", 422 | " exchange_Type = queues.ix[i, 'Exchange_Type'], \n", 423 | " routing_key = queues.ix[i, 'Routing_Key'] \n", 424 | " )\n", 425 | " )\n", 426 | " output.append(')')\n", 427 | "\n", 428 | " summary.extend(output)\n", 429 | " \n", 430 | " return summary" 431 | ] 432 | }, 433 | { 434 | "cell_type": "markdown", 435 | "metadata": {}, 436 | "source": [ 437 | "---\n", 438 | "#### CELERY_ROUTES" 439 | ] 440 | }, 441 | { 442 | "cell_type": "markdown", 443 | "metadata": {}, 444 | "source": [ 445 | " CELERY_ROUTES = {\n", 446 | " 'feeds.tasks.import_feed': {\n", 447 | " 'queue': 'feed_tasks',\n", 448 | " 'routing_key': 'feed.import',\n", 449 | " },\n", 450 | " }" 451 | ] 452 | }, 453 | { 454 | "cell_type": "code", 455 | "execution_count": 13, 456 | "metadata": { 457 | "collapsed": false 458 | }, 459 | "outputs": [], 460 | "source": [ 461 | "def set_CELERY_ROUTES(plan, summary):\n", 462 | " routes = plan[['Celery_app', 'Tasks_module', 'Task', 'Queue', 'Routing_Key']].drop_duplicates()\n", 463 | " output = [] \n", 464 | " output.extend(['', '#{0:_^78}'.format('CELERY_ROUTES')])\n", 465 | "\n", 466 | " output.append('CELERY_ROUTES = {')\n", 467 | "\n", 468 | " for i in range(len(routes)):\n", 469 | " output.append(\" '{app}.{module}.{task}': {{\\n 'queue': '{queue}',\\n 'routing_key': '{routing_key}',\\n }},\"\\\n", 470 | " .format(app = routes.ix[i, 'Celery_app'],\n", 471 | " module = routes.ix[i, 'Tasks_module'],\n", 472 | " task = routes.ix[i, 'Task'], \n", 473 | " queue = routes.ix[i, 'Queue'], \n", 474 | " routing_key = routes.ix[i, 'Routing_Key'])\n", 475 | " )\n", 476 | " output.append('}')\n", 477 | "\n", 478 | " summary.extend(output)\n", 479 | " \n", 480 | " return summary" 481 | ] 482 | }, 483 | { 484 | "cell_type": "markdown", 485 | "metadata": {}, 486 | "source": [ 487 | "---\n", 488 | "#### WORKERS" 489 | ] 490 | }, 491 | { 492 | "cell_type": "code", 493 | "execution_count": 14, 494 | "metadata": { 495 | "collapsed": false 496 | }, 497 | "outputs": [], 498 | "source": [ 499 | "def set_Workers_Scripts(plan, summary):\n", 500 | " workers = plan[['Node', 'Celery_app', 'Worker', 'Queue', 'Concurrency', 'Log_level']].drop_duplicates()\n", 501 | " output = []\n", 502 | " output.extend(['', '#{0:_^78}'.format('Workers Scripts')])\n", 503 | "\n", 504 | " for i in range(len(workers)):\n", 505 | " output.append('#[Node - {node}] : celery -A {app} worker -n {worker} -Q {queue} --concurrency={concurrency} --loglevel={loglevel}'\\\n", 506 | " .format(node = workers.ix[i, 'Node'],\n", 507 | " app = workers.ix[i, 'Celery_app'],\n", 508 | " worker = workers.ix[i, 'Worker'], \n", 509 | " queue = workers.ix[i, 'Queue'], \n", 510 | " concurrency = workers.ix[i, 'Concurrency'], \n", 511 | " loglevel = workers.ix[i, 'Log_level']\n", 512 | " )\n", 513 | " )\n", 514 | "\n", 515 | " summary.extend(output)\n", 516 | " \n", 517 | " return summary" 518 | ] 519 | }, 520 | { 521 | "cell_type": "markdown", 522 | "metadata": {}, 523 | "source": [ 524 | "---\n", 525 | "#### FLOWER" 526 | ] 527 | }, 528 | { 529 | "cell_type": "code", 530 | "execution_count": 15, 531 | "metadata": { 532 | "collapsed": false 533 | }, 534 | "outputs": [], 535 | "source": [ 536 | "def set_FLOWER(plan, summary):\n", 537 | " app = plan.Celery_app.drop_duplicates()[0]\n", 538 | " output = [] \n", 539 | " output.extend(['', '#{0:_^78}'.format('FLOWER')])\n", 540 | " \n", 541 | " output.append('#[Flower] : celery -A {app} flower'.format(app = app))\n", 542 | " summary.extend(output)\n", 543 | " \n", 544 | " return summary" 545 | ] 546 | }, 547 | { 548 | "cell_type": "markdown", 549 | "metadata": {}, 550 | "source": [ 551 | "## Summarize" 552 | ] 553 | }, 554 | { 555 | "cell_type": "code", 556 | "execution_count": 16, 557 | "metadata": { 558 | "collapsed": true 559 | }, 560 | "outputs": [], 561 | "source": [ 562 | "def summarizeConfigurations(planExcelFile):\n", 563 | " \n", 564 | " summary = []\n", 565 | " \n", 566 | "# listDefaultCeleryConfigurations()\n", 567 | "\n", 568 | " plan = getExcelData(planExcelFile)\n", 569 | " \n", 570 | " import_Kombu_classes(plan, summary)\n", 571 | " set_CELERY_TIMEZONE_Misc(plan, summary)\n", 572 | " set_BROKER_URL(plan, summary)\n", 573 | " set_CELERY_RESULT_BACKEND(plan, summary)\n", 574 | " set_CELERY_IMPORTS(plan, summary)\n", 575 | " set_CELERY_QUEUES(plan, summary)\n", 576 | " set_CELERY_ROUTES(plan, summary)\n", 577 | " \n", 578 | " set_Workers_Scripts(plan, summary)\n", 579 | " set_FLOWER(plan, summary)\n", 580 | " \n", 581 | " return summary" 582 | ] 583 | }, 584 | { 585 | "cell_type": "markdown", 586 | "metadata": {}, 587 | "source": [ 588 | "## Output Configuration File" 589 | ] 590 | }, 591 | { 592 | "cell_type": "code", 593 | "execution_count": 17, 594 | "metadata": { 595 | "collapsed": false 596 | }, 597 | "outputs": [], 598 | "source": [ 599 | "def writeConfigurationFile(summary, file = 'celeryconfig.py'):\n", 600 | " with open(file, 'w', encoding = 'utf8') as f:\n", 601 | " for line in summary: f.write(line + '\\n')" 602 | ] 603 | }, 604 | { 605 | "cell_type": "code", 606 | "execution_count": 18, 607 | "metadata": { 608 | "collapsed": true 609 | }, 610 | "outputs": [], 611 | "source": [ 612 | "def genConfigFile():\n", 613 | " # 指定規劃檔案\n", 614 | " folder = os.getcwd()\n", 615 | " files = [file for file in os.listdir(folder) if file.rpartition('.')[2] in ('xls','xlsx')] \n", 616 | " \n", 617 | " if len(files) == 1 : \n", 618 | " file = os.path.join(folder, files[0])\n", 619 | " summary = summarizeConfigurations(file)\n", 620 | " for line in summary: print (line) \n", 621 | " writeConfigurationFile(summary)\n", 622 | " \n", 623 | " else:\n", 624 | " print('There must be one and only one plan Excel file.') " 625 | ] 626 | }, 627 | { 628 | "cell_type": "markdown", 629 | "metadata": {}, 630 | "source": [ 631 | "## Main" 632 | ] 633 | }, 634 | { 635 | "cell_type": "code", 636 | "execution_count": 19, 637 | "metadata": { 638 | "collapsed": false, 639 | "scrolled": true 640 | }, 641 | "outputs": [ 642 | { 643 | "name": "stdout", 644 | "output_type": "stream", 645 | "text": [ 646 | "\n", 647 | "#_____________________________Import Kombu classes_____________________________\n", 648 | "from kombu import Exchange, Queue\n", 649 | "\n", 650 | "#___________________________CELERY_TIMEZONE & Misc.____________________________\n", 651 | "CELERY_TIMEZONE = 'Asia/Taipei'\n", 652 | "CELERYD_POOL_RESTARTS = True\n", 653 | "\n", 654 | "#__________________________________BROKER_URL__________________________________\n", 655 | "BROKER_URL = 'redis://weilin.noip.me:6379/0'\n", 656 | "\n", 657 | "#____________________________CELERY_RESULT_BACKEND_____________________________\n", 658 | "CELERY_RESULT_BACKEND = 'redis://weilin.noip.me:6379/1'\n", 659 | "\n", 660 | "#________________________________CELERY_IMPORTS________________________________\n", 661 | "CELERY_IMPORTS = ('word_count.tasks',)\n", 662 | "\n", 663 | "#________________________________CELERY_QUEUES_________________________________\n", 664 | "CELERY_QUEUES = (\n", 665 | " Queue('word_counting', Exchange('celery', type = 'direct'), routing_key='word_counting'),\n", 666 | ")\n", 667 | "\n", 668 | "#________________________________CELERY_ROUTES_________________________________\n", 669 | "CELERY_ROUTES = {\n", 670 | " 'word_count.tasks.mapper': {\n", 671 | " 'queue': 'word_counting',\n", 672 | " 'routing_key': 'word_counting',\n", 673 | " },\n", 674 | "}\n", 675 | "\n", 676 | "#_______________________________Workers Scripts________________________________\n", 677 | "#[Node - localhost] : celery -A word_count worker -n worker1.%h -Q word_counting --concurrency=10 --loglevel=INFO\n", 678 | "\n", 679 | "#____________________________________FLOWER____________________________________\n", 680 | "#[Flower] : celery -A word_count flower\n" 681 | ] 682 | } 683 | ], 684 | "source": [ 685 | "if __name__ == '__main__':\n", 686 | " genConfigFile()" 687 | ] 688 | }, 689 | { 690 | "cell_type": "code", 691 | "execution_count": null, 692 | "metadata": { 693 | "collapsed": true 694 | }, 695 | "outputs": [], 696 | "source": [] 697 | } 698 | ], 699 | "metadata": { 700 | "kernelspec": { 701 | "display_name": "Python 3", 702 | "language": "python", 703 | "name": "python3" 704 | }, 705 | "language_info": { 706 | "codemirror_mode": { 707 | "name": "ipython", 708 | "version": 3 709 | }, 710 | "file_extension": ".py", 711 | "mimetype": "text/x-python", 712 | "name": "python", 713 | "nbconvert_exporter": "python", 714 | "pygments_lexer": "ipython3", 715 | "version": "3.5.1" 716 | } 717 | }, 718 | "nbformat": 4, 719 | "nbformat_minor": 0 720 | } 721 | -------------------------------------------------------------------------------- /celery_projects/Celery on Docker Swarm as Bluemix-liked IoT platform.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 使用 Celery 於 Docker Swarm 之上 建構類似 Bluemix 的 IoT 平台\n", 8 | "## Part I: Celery on Docker Swarm \n", 9 | "\n", 10 | "Wei Lin \n", 11 | "20160126 " 12 | ] 13 | }, 14 | { 15 | "cell_type": "markdown", 16 | "metadata": {}, 17 | "source": [ 18 | "## 緣起 \n", 19 | "前天(2016/01/24)到臺北參加了一場很棒的 關於 [Raspberry Pi](https://www.raspberrypi.org/), [Docker](https://www.docker.com/), [MQTT](http://cheng-min-i-taiwan.blogspot.tw/2015/03/raspberry-pimqtt-android.html), [Bluemix](https://console.ng.bluemix.net/) 的 [講座](https://www.facebook.com/events/1092250660807957/1098932823473074/)。㑹中提到 Bluemix 和 MQTT 是一種 publisher/subscriber 的機制,這讓我想起前幾週接觸到的 [Celery](http://www.celeryproject.org/),其採用 producer/queue/consumer 模型,我覺得兩者非常的相似。\n", 20 | "\n", 21 | " 在 Python 的領域中,Celery 是一套著名的 distributed task queue framework,用來面對 concurrent 的需求時非常好用,尤其是它的 Canvas 機制更是在平行系統上建構複雜處理流程的利器。\n", 22 | "\n", 23 | " 由於 Celery 能夠在 distributed 的環境下運作,而且原則上 越平行分散 效能越好,這讓我很自然的想要把它與 Docker Swarm 結合起來: Docker Swarm 負責建構 cluster 平台,提供 Celery 所需要的 worker containers,而 Celery 可以在 Docker Swarm 上視需要擴展 worker 與 processes 的數量,平行地完成指定的運算。\n", 24 | "\n", 25 | " Celery 所採用的機制,簡單來說就是: producer 發出要求運算的訊息到 queue 中排隊,眾多的 workers 紛紛到 queue 去撿出訊息來進行處理。類比於 Bluemix 和 MQTT: producer 就如同 publisher,queue 如同 topic 或 channel,consumer 如同 subscriber,我覺得兩者是十分類似的。\n", 26 | "\n", 27 | " 因此,如果我們可以利用 Dock Swarm 為 Celery 之類的 framework 提供一個 cluster 舞台,是否可以快速的建立一個私有(沒有30天的試用期限)的、類似於 Bluemix 的平台呢?讓其上的 devices 共同組成一個分散式的協同運算系統,視整個 IoT(Internet of Things) 為一體。" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "metadata": {}, 33 | "source": [ 34 | "然而,除了建構 IoT 平台之外,Celery + Docker Swarm 應該還有其他的用途,因此為了 reusability,把這部分切割出來,本次實驗先只驗證 Celery 在 Docker Swarm 上可以順利的運作。 \n", 35 | "\n", 36 | "\n", 37 | "## 實驗前提與目標:\n", 38 | "- 完全使用 Docker 組裝系統: host上除了 Docker-Engine、Docker-Machine之外 並不安裝其他軟體。\n", 39 | "- 建構分散式的運算平台: 於 Docker Swarm 的環境中,使用 Celery 架構一個 distributed task queue。\n", 40 | "- Cluster平台可抽換: Celery 和 Docker Swarm decouple,也就是說 Celery 並不知道自己是跑在單一 Docker Machine 或者 Swarm 之上 (其實 Celery 也不知道自己是在 Docker 上面跑)。\n", 41 | "- 發揮 Docker Swarm 的功能: 驗證 Docker 能自動將 Celery 所需的 worker container spread 到不同的 Docker Machines 中。\n", 42 | "- 驗證 Celery 在 Docker Swarm 中可順利運作: \n", 43 | " - 由一個 producer(publisher),發送訊息到特定的 queue(topic) 之後,相應的 consumer(subscribers) 會接收、處理、回應訊息。\n", 44 | " - 就以 Hadoop/MapReduce 領域中的入門程式 \"Word Count\" 來作為測試案例。" 45 | ] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "metadata": {}, 50 | "source": [ 51 | "## 實作步驟:" 52 | ] 53 | }, 54 | { 55 | "cell_type": "markdown", 56 | "metadata": {}, 57 | "source": [ 58 | "### 建立 Docker Swarm: \n", 59 | "之前已經參考了這篇 [文章](https://www.facebook.com/groups/docker.taipei/permalink/1704032656498757) 使用兩台 RPi2 建立了一個 Docker Swarm。" 60 | ] 61 | }, 62 | { 63 | "cell_type": "markdown", 64 | "metadata": {}, 65 | "source": [ 66 | "#### Swarm 中有兩台 Docker machines:\n", 67 | "- host rpi202(192.168.0.114) 擔任 Swarm Manager,其 Docker machine name 為 master01\n", 68 | "- host rpi201(192.168.0.109) 擔任 Swarm Node,其 Docker machine name 為 node01" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": null, 74 | "metadata": { 75 | "collapsed": true 76 | }, 77 | "outputs": [], 78 | "source": [ 79 | "HypriotOS: pi@rpi202 in ~\n", 80 | "$ docker-machine ls\n", 81 | "NAME ACTIVE DRIVER STATE URL SWARM\n", 82 | "master01 hypriot Running tcp://192.168.0.114:2376 master01 (master)\n", 83 | "node01 hypriot Running tcp://192.168.0.109:2376 master01\n", 84 | "HypriotOS: pi@rpi202 in ~\n", 85 | "$\n", 86 | "\n", 87 | "\n", 88 | "# Swarm 中的 nodes:\n", 89 | "\n", 90 | "HypriotOS: pi@rpi202 in ~\n", 91 | "$ docker $(docker-machine config --swarm master01) info\n", 92 | "Containers: 4\n", 93 | "Images: 51\n", 94 | "Role: primary\n", 95 | "Strategy: spread\n", 96 | "Filters: health, port, dependency, affinity, constraint\n", 97 | "Nodes: 2\n", 98 | " master01: 192.168.0.114:2376\n", 99 | " └ Status: Healthy\n", 100 | " └ Containers: 3\n", 101 | " └ Reserved CPUs: 0 / 4\n", 102 | " └ Reserved Memory: 0 B / 972 MiB\n", 103 | " └ Labels: executiondriver=native-0.2, kernelversion=4.1.8-hypriotos-v7+, operatingsystem=Raspbian GNU/Linux 8 (jessie), provider=hypriot, storagedriver=overlay\n", 104 | " node01: 192.168.0.109:2376\n", 105 | " └ Status: Healthy\n", 106 | " └ Containers: 1\n", 107 | " └ Reserved CPUs: 0 / 4\n", 108 | " └ Reserved Memory: 0 B / 972 MiB\n", 109 | " └ Labels: executiondriver=native-0.2, kernelversion=4.1.8-hypriotos-v7+, operatingsystem=Raspbian GNU/Linux 8 (jessie), provider=hypriot, storagedriver=overlay\n", 110 | "CPUs: 8\n", 111 | "Total Memory: 1.899 GiB\n", 112 | "Name: b7def5d9af98\n", 113 | "HypriotOS: pi@rpi202 in ~\n", 114 | "$" 115 | ] 116 | }, 117 | { 118 | "cell_type": "markdown", 119 | "metadata": {}, 120 | "source": [ 121 | "### 建立 Docker Overlay network: " 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": null, 127 | "metadata": { 128 | "collapsed": true 129 | }, 130 | "outputs": [], 131 | "source": [ 132 | "# Swarm 中原本的 networks:\n", 133 | "\n", 134 | "HypriotOS: pi@rpi202 in ~\n", 135 | "$ docker $(docker-machine config --swarm master01) network ls\n", 136 | "NETWORK ID NAME DRIVER\n", 137 | "f73c178c37f9 master01/bridge bridge\n", 138 | "414ed2b10a9d master01/none null\n", 139 | "930b58fd0d33 master01/host host\n", 140 | "759c7acf7c0a node01/none null\n", 141 | "0954a9483bae node01/host host\n", 142 | "21bce8ef40ec node01/bridge bridge\n", 143 | "HypriotOS: pi@rpi202 in ~\n", 144 | "$ \n", 145 | "\n", 146 | "\n", 147 | "# 建立一個 overlay network:\n", 148 | "\n", 149 | "HypriotOS: pi@rpi202 in ~\n", 150 | "$ docker network create --driver=overlay mynet\n", 151 | "1feac8ae92873e78570b904b0d3f848b49a180c15ebe0a4ff38f1a2cbcc691c5\n", 152 | "HypriotOS: pi@rpi202 in ~\n", 153 | "$\n", 154 | "\n", 155 | "\n", 156 | "# Swarm 中目前的 networks,有看到新建的 overlay network \"mynet\":\n", 157 | "\n", 158 | "HypriotOS: pi@rpi202 in ~\n", 159 | "$ docker $(docker-machine config --swarm master01) network ls\n", 160 | "NETWORK ID NAME DRIVER\n", 161 | "930b58fd0d33 master01/host host\n", 162 | "1feac8ae9287 mynet overlay\n", 163 | "f73c178c37f9 master01/bridge bridge\n", 164 | "21bce8ef40ec node01/bridge bridge\n", 165 | "759c7acf7c0a node01/none null\n", 166 | "0954a9483bae node01/host host\n", 167 | "414ed2b10a9d master01/none null\n", 168 | "HypriotOS: pi@rpi202 in ~\n", 169 | "$\n", 170 | "\n", 171 | "\n", 172 | "# Swarm Manager 所在的 host中 所看到的 networks,有看到新建的 overlay network \"mynet\":\n", 173 | "\n", 174 | "HypriotOS: pi@rpi202 in ~\n", 175 | "$ docker network ls\n", 176 | "NETWORK ID NAME DRIVER\n", 177 | "f73c178c37f9 bridge bridge\n", 178 | "414ed2b10a9d none null\n", 179 | "930b58fd0d33 host host\n", 180 | "1feac8ae9287 mynet overlay\n", 181 | "HypriotOS: pi@rpi202 in ~\n", 182 | "$\n", 183 | "\n", 184 | "\n", 185 | "# Swarm Node 所在的 host中 所看到的 networks,有看到新建的 overlay network \"mynet\":\n", 186 | "\n", 187 | "HypriotOS: pi@rpi201 in ~\n", 188 | "$ docker network ls\n", 189 | "NETWORK ID NAME DRIVER\n", 190 | "1feac8ae9287 mynet overlay\n", 191 | "21bce8ef40ec bridge bridge\n", 192 | "759c7acf7c0a none null\n", 193 | "0954a9483bae host host\n", 194 | "HypriotOS: pi@rpi201 in ~\n", 195 | "$" 196 | ] 197 | }, 198 | { 199 | "cell_type": "markdown", 200 | "metadata": {}, 201 | "source": [ 202 | "### 建立 Celery 所需的 Broker,使用 Redis" 203 | ] 204 | }, 205 | { 206 | "cell_type": "code", 207 | "execution_count": null, 208 | "metadata": { 209 | "collapsed": true 210 | }, 211 | "outputs": [], 212 | "source": [ 213 | "HypriotOS: pi@rpi202 in ~\n", 214 | "$ docker run -d -p 6379:6379 --net=mynet --name=redis --volume=/data:/data hypriot/rpi-redis\n", 215 | "a2abf9277b5e4818da89ffa282a706506ef288426486cc25b431208564bf6e0f\n", 216 | "\n", 217 | "\n", 218 | "HypriotOS: pi@rpi202 in ~\n", 219 | "$ docker ps\n", 220 | "CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\n", 221 | "a2abf9277b5e hypriot/rpi-redis \"/entrypoint.sh redis\" 13 hours ago Up About an hour 0.0.0.0:6379->6379/tcp redis\n", 222 | "f0ce33ca1152 hypriot/rpi-swarm \"/swarm join --advert\" 6 days ago Up 6 days 2375/tcp swarm-agent\n", 223 | "b7def5d9af98 hypriot/rpi-swarm \"/swarm manage --tlsv\" 6 days ago Up 6 days 0.0.0.0:3376->3376/tcp, 2375/tcp swarm-agent-master\n", 224 | "ad594813f8f0 nimblestratus/rpi-consul \"/bin/start -server -\" 6 days ago Up 6 days 53/udp, 8300-8302/tcp, 8301-8302/udp, 8400/tcp, 0.0.0.0:8500->8500/tcp consul\n", 225 | "HypriotOS: pi@rpi202 in ~\n", 226 | "$" 227 | ] 228 | }, 229 | { 230 | "cell_type": "markdown", 231 | "metadata": {}, 232 | "source": [ 233 | "### 將檔案 celeryconfig.py、start_workers.sh、資料夾 word_count 複製到 兩台 hosts 的 /data/celery_projects 資料夾之下\n", 234 | "\n", 235 | "#### 可以使用 SCP 來達成: http://www.hypexr.org/linux_scp_help.php \n", 236 | "\n", 237 | "例如: \n", 238 | "HypriotOS: pi@rpi202 in ~ \n", 239 | "$ scp -r /data/celery_projects root@rpi201:/data/" 240 | ] 241 | }, 242 | { 243 | "cell_type": "code", 244 | "execution_count": null, 245 | "metadata": { 246 | "collapsed": true 247 | }, 248 | "outputs": [], 249 | "source": [ 250 | "HypriotOS: pi@rpi202 in /data/celery_projects\n", 251 | "$ ll\n", 252 | "total 20\n", 253 | "drwxr-xr-x 3 999 root 4096 Jan 25 23:01 ./\n", 254 | "drwxr-xr-x 3 999 root 4096 Jan 25 23:01 ../\n", 255 | "-rw-r--r-- 1 999 root 1079 Jan 25 21:12 celeryconfig.py\n", 256 | "-rwxr-xr-x 1 999 root 732 Jan 25 22:53 start_workers.sh* <--- 用來啟動 worker containers 的 script,後述。\n", 257 | "drwxr-xr-x 3 root root 4096 Jan 25 23:01 word_count/\n", 258 | "HypriotOS: pi@rpi202 in /data/celery_projects\n", 259 | "$\n", 260 | "\n", 261 | "\n", 262 | "HypriotOS: pi@rpi201 in /data/celery_projects\n", 263 | "$ ll\n", 264 | "total 20\n", 265 | "drwxr-xr-x 3 root root 4096 Jan 25 23:03 ./\n", 266 | "drwxr-xr-x 3 999 root 4096 Jan 25 22:55 ../\n", 267 | "-rw-r--r-- 1 root root 1079 Jan 25 21:12 celeryconfig.py\n", 268 | "-rw-r--r-- 1 root root 732 Jan 25 22:53 start_workers.sh <--- 用來啟動 worker containers 的 script,後述。\n", 269 | "drwxr-xr-x 3 root root 4096 Jan 25 23:03 word_count/\n", 270 | "HypriotOS: pi@rpi201 in /data/celery_projects\n", 271 | "$" 272 | ] 273 | }, 274 | { 275 | "cell_type": "markdown", 276 | "metadata": {}, 277 | "source": [ 278 | "### 建立監控用的 [Flower ](http://docs.celeryproject.org/en/latest/userguide/monitoring.html#flower-real-time-celery-web-monitor) container" 279 | ] 280 | }, 281 | { 282 | "cell_type": "code", 283 | "execution_count": null, 284 | "metadata": { 285 | "collapsed": true 286 | }, 287 | "outputs": [], 288 | "source": [ 289 | "HypriotOS: pi@rpi202 in ~\n", 290 | "$ docker run -d -p 5555:5555 --net=mynet --name=flower --volume=/data/celery_projects:/celery_projects wei1234c/celery_armv7 /bin/sh -c \"cd /celery_projects && celery -A word_count flower\"\n", 291 | "276f00591fd7042139ddf660730d223bcf19e9f8bd369f075de417140b6dfd4a\n", 292 | "HypriotOS: pi@rpi202 in ~\n", 293 | "$\n", 294 | "\n", 295 | "\n", 296 | "HypriotOS: pi@rpi202 in ~\n", 297 | "$ docker ps\n", 298 | "CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\n", 299 | "276f00591fd7 wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" 31 seconds ago Up 27 seconds 0.0.0.0:5555->5555/tcp flower" 300 | ] 301 | }, 302 | { 303 | "cell_type": "markdown", 304 | "metadata": {}, 305 | "source": [ 306 | "#### Flower 的 container 起來之後,就可以用瀏覽器查看,不過目前都還是空的\n", 307 | "![](./jpgs/flower1.jpg)" 308 | ] 309 | }, 310 | { 311 | "cell_type": "markdown", 312 | "metadata": {}, 313 | "source": [ 314 | "### 透過 Swarm Manager 建立並佈署 Celery worker containers" 315 | ] 316 | }, 317 | { 318 | "cell_type": "markdown", 319 | "metadata": {}, 320 | "source": [ 321 | "#### 用來建立 worker containers 的 script\n", 322 | "start_workers.sh" 323 | ] 324 | }, 325 | { 326 | "cell_type": "code", 327 | "execution_count": null, 328 | "metadata": { 329 | "collapsed": true 330 | }, 331 | "outputs": [], 332 | "source": [ 333 | "# ./start_workers.sh\n", 334 | "\n", 335 | "echo \"Starting Celery cluster containers _________________________________________________\"\n", 336 | "\n", 337 | "eval $(docker-machine env --swarm master01)\n", 338 | "\n", 339 | "PROJECT=$1 # project 名稱\n", 340 | "WORKER_START_ID=$2 # worker container 編號 第一個\n", 341 | "WORKER_LAST_ID=$3 # worker container 編號 最後一個\n", 342 | "CONCURRENCY=$4 # 每個 worker 可以有幾個 subprocesses\n", 343 | "\n", 344 | "for (( i=${WORKER_START_ID}; i<=${WORKER_LAST_ID}; i=i+1 ))\n", 345 | "do\n", 346 | " docker run -d --name=${PROJECT}_celery${i} --hostname=${PROJECT}_celery${i} --net=mynet --volume=/data/celery_projects:/celery_projects wei1234c/celery_armv7 /bin/sh -c \"cd /celery_projects && celery -A ${PROJECT} worker -n worker${i}.%h --concurrency=${CONCURRENCY} --loglevel=INFO\"\n", 347 | "done" 348 | ] 349 | }, 350 | { 351 | "cell_type": "markdown", 352 | "metadata": {}, 353 | "source": [ 354 | "#### 初始建立 Celery worker containers\n", 355 | "建立了 4 個 containers,每個 container 有一個 Celery worker,每個 worker 可以使用 5 個 subprocesses。 \n", 356 | "因為這個 Swarm 採取 spread 策略,所以 4 個都被 schedule 到 container 數量偏少的那一台 Docker machine 上面。" 357 | ] 358 | }, 359 | { 360 | "cell_type": "code", 361 | "execution_count": null, 362 | "metadata": { 363 | "collapsed": false 364 | }, 365 | "outputs": [], 366 | "source": [ 367 | "# CLI 參數說明:\n", 368 | "# $1 # project 名稱\n", 369 | "# $2 # worker container 編號 第一個\n", 370 | "# $3 # worker container 編號 最後一個\n", 371 | "# $4 # 每個 worker 可以有幾個 subprocesses\n", 372 | "\n", 373 | "HypriotOS: pi@rpi202 in /data/celery_projects\n", 374 | "$ ./start_workers.sh word_count 1 4 5\n", 375 | "Starting Celery cluster containers _________________________________________________\n", 376 | "a22b08a0818b3246f90511ad21cb2a0ab37a4e72661bf559ade7e320db030505\n", 377 | "77eabded27e4ea3aaa640480c088fa7b4b9818fc3e40fb66636cc9abe8a78e69\n", 378 | "df05a7204f40470cfd8eee21a06be45f5a306ea32df0196f3d004beac5d2f82d\n", 379 | "e67d39740ace5c2a5b9a05e6ca1adc73c5e5944e62302d02391d37f7ee6aa479\n", 380 | "\n", 381 | "\n", 382 | "\n", 383 | "# 建立了 4 個 containers,每個 container 有一個 Celery worker,每個 worker 可以使用 5 個 subprocesses。\n", 384 | "\n", 385 | "# 因為採取 spread 策略,所以 4 個都被 schedule 到 container 數量偏少的一台 Docker machine。\n", 386 | "\n", 387 | "HypriotOS: pi@rpi202 in /data/celery_projects\n", 388 | "$ docker ps\n", 389 | "CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\n", 390 | "e67d39740ace wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" About a minute ago Up About a minute 5555/tcp node01/word_count_celery4\n", 391 | "df05a7204f40 wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" About a minute ago Up About a minute 5555/tcp node01/word_count_celery3\n", 392 | "77eabded27e4 wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" About a minute ago Up About a minute 5555/tcp node01/word_count_celery2\n", 393 | "a22b08a0818b wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" About a minute ago Up About a minute 5555/tcp node01/word_count_celery1\n", 394 | "276f00591fd7 wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" 37 minutes ago Up 5 minutes 192.168.0.114:5555->5555/tcp master01/flower\n", 395 | "a2abf9277b5e hypriot/rpi-redis \"/entrypoint.sh redis\" 13 hours ago Up About an hour 192.168.0.114:6379->6379/tcp master01/redis\n", 396 | "980161d10fc4 hypriot/rpi-swarm \"/swarm join --advert\" 6 days ago Up 6 days 2375/tcp node01/swarm-agent\n", 397 | "f0ce33ca1152 hypriot/rpi-swarm \"/swarm join --advert\" 6 days ago Up 6 days 2375/tcp master01/swarm-agent\n", 398 | "b7def5d9af98 hypriot/rpi-swarm \"/swarm manage --tlsv\" 6 days ago Up 6 days 2375/tcp, 192.168.0.114:3376->3376/tcp master01/swarm-agent-master\n", 399 | "ad594813f8f0 nimblestratus/rpi-consul \"/bin/start -server -\" 6 days ago Up 6 days 53/udp, 8300-8302/tcp, 8301-8302/udp, 8400/tcp, 192.168.0.114:8500->8500/tcp master01/consul\n", 400 | "HypriotOS: pi@rpi202 in /data/celery_projects\n", 401 | "$" 402 | ] 403 | }, 404 | { 405 | "cell_type": "markdown", 406 | "metadata": {}, 407 | "source": [ 408 | "#### Flower 中就會顯示 workers,目前有4個 workers\n", 409 | "- celery@worker1.word_count_celery1\n", 410 | "- celery@worker2.word_count_celery2\n", 411 | "- celery@worker3.word_count_celery3\n", 412 | "- celery@worker4.word_count_celery4 \n", 413 | "\n", 414 | "![](./jpgs/flower22.jpg)" 415 | ] 416 | }, 417 | { 418 | "cell_type": "markdown", 419 | "metadata": {}, 420 | "source": [ 421 | "#### 擴增 Celery worker containers\n", 422 | "可以隨時動態的擴增 Celery 的 scale 以因應效能的需求。 \n", 423 | "這次再增建了 4 個 containers,每個 container 有一個 Celery worker,每個 worker 可以使用 5 個 subprocesses。 \n", 424 | "因為這個 Swarm 採取 spread 策略,而且當下兩台 Docker machines 上的 containers 數量相當,所以第二次擴增的這 4 個 containers 被平均地 schedule 到兩台 Docker machines 上面。" 425 | ] 426 | }, 427 | { 428 | "cell_type": "code", 429 | "execution_count": null, 430 | "metadata": { 431 | "collapsed": true 432 | }, 433 | "outputs": [], 434 | "source": [ 435 | "# $1 # project 名稱\n", 436 | "# $2 # worker container 編號 第一個\n", 437 | "# $3 # worker container 編號 最後一個\n", 438 | "# $4 # 每個 worker 可以有幾個 subprocesses\n", 439 | "\n", 440 | "HypriotOS: pi@rpi202 in /data/celery_projects\n", 441 | "$ ./start_workers.sh word_count 5 8 5\n", 442 | "Starting Celery cluster containers _________________________________________________\n", 443 | "a4de4967fd6211266cbad04fecfc357aa81789063cca3042388019adab2a6c71\n", 444 | "7066ba6021de870f1332858c6f96673a159d7e5031a5682d3853fa6bd8fe2252\n", 445 | "79792c823fbf2769e4983c525598c30ba3758c23697ef66a78a54163374d3233\n", 446 | "25c02d07ed6f1217ee68dc486a6586262ca2e3ed01a2a8690eaa2a783ad07d73\n", 447 | "\n", 448 | "\n", 449 | "HypriotOS: pi@rpi202 in /data/celery_projects\n", 450 | "$ docker ps\n", 451 | "CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\n", 452 | "25c02d07ed6f wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" 18 seconds ago Up 12 seconds 5555/tcp node01/word_count_celery8\n", 453 | "79792c823fbf wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" 39 seconds ago Up 25 seconds 5555/tcp master01/word_count_celery7\n", 454 | "7066ba6021de wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" About a minute ago Up 45 seconds 5555/tcp node01/word_count_celery6\n", 455 | "a4de4967fd62 wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" About a minute ago Up About a minute 5555/tcp master01/word_count_celery5\n", 456 | "e67d39740ace wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" 15 minutes ago Up 15 minutes 5555/tcp node01/word_count_celery4\n", 457 | "df05a7204f40 wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" 15 minutes ago Up 15 minutes 5555/tcp node01/word_count_celery3\n", 458 | "77eabded27e4 wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" 15 minutes ago Up 15 minutes 5555/tcp node01/word_count_celery2\n", 459 | "a22b08a0818b wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" 16 minutes ago Up 16 minutes 5555/tcp node01/word_count_celery1\n", 460 | "276f00591fd7 wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" 51 minutes ago Up 19 minutes 192.168.0.114:5555->5555/tcp master01/flower\n", 461 | "a2abf9277b5e hypriot/rpi-redis \"/entrypoint.sh redis\" 14 hours ago Up 2 hours 192.168.0.114:6379->6379/tcp master01/redis\n", 462 | "980161d10fc4 hypriot/rpi-swarm \"/swarm join --advert\" 6 days ago Up 6 days 2375/tcp node01/swarm-agent\n", 463 | "f0ce33ca1152 hypriot/rpi-swarm \"/swarm join --advert\" 6 days ago Up 6 days 2375/tcp master01/swarm-agent\n", 464 | "b7def5d9af98 hypriot/rpi-swarm \"/swarm manage --tlsv\" 6 days ago Up 6 days 2375/tcp, 192.168.0.114:3376->3376/tcp master01/swarm-agent-master\n", 465 | "ad594813f8f0 nimblestratus/rpi-consul \"/bin/start -server -\" 6 days ago Up 6 days 53/udp, 8300-8302/tcp, 8301-8302/udp, 8400/tcp, 192.168.0.114:8500->8500/tcp master01/consul\n", 466 | "HypriotOS: pi@rpi202 in /data/celery_projects\n", 467 | "$" 468 | ] 469 | }, 470 | { 471 | "cell_type": "markdown", 472 | "metadata": {}, 473 | "source": [ 474 | "#### 在 Swarm node 上也可以看到透過 Swarm manager 遠端建立的 containers" 475 | ] 476 | }, 477 | { 478 | "cell_type": "code", 479 | "execution_count": null, 480 | "metadata": { 481 | "collapsed": true 482 | }, 483 | "outputs": [], 484 | "source": [ 485 | "HypriotOS: pi@rpi201 in /data/celery_projects\n", 486 | "$ docker ps\n", 487 | "CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\n", 488 | "25c02d07ed6f wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" 16 minutes ago Up 16 minutes 5555/tcp word_count_celery8\n", 489 | "7066ba6021de wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" 17 minutes ago Up 17 minutes 5555/tcp word_count_celery6\n", 490 | "e67d39740ace wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" 32 minutes ago Up 31 minutes 5555/tcp word_count_celery4\n", 491 | "df05a7204f40 wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" 32 minutes ago Up 32 minutes 5555/tcp word_count_celery3\n", 492 | "77eabded27e4 wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" 32 minutes ago Up 32 minutes 5555/tcp word_count_celery2\n", 493 | "a22b08a0818b wei1234c/celery_armv7 \"/bin/sh -c 'cd /cele\" 32 minutes ago Up 32 minutes 5555/tcp word_count_celery1\n", 494 | "980161d10fc4 hypriot/rpi-swarm \"/swarm join --advert\" 6 days ago Up 6 days 2375/tcp swarm-agent\n", 495 | "HypriotOS: pi@rpi201 in /data/celery_projects\n", 496 | "$" 497 | ] 498 | }, 499 | { 500 | "cell_type": "markdown", 501 | "metadata": {}, 502 | "source": [ 503 | "#### Flower 中就會顯示 目前兩台 machines 中共有8個 workers = 8 x 5 = 40 個 processes 可共使用\n", 504 | "\n", 505 | "![](./jpgs/flower3.jpg)" 506 | ] 507 | }, 508 | { 509 | "cell_type": "markdown", 510 | "metadata": {}, 511 | "source": [ 512 | "### 用來測試的程式碼" 513 | ] 514 | }, 515 | { 516 | "cell_type": "code", 517 | "execution_count": 18, 518 | "metadata": { 519 | "collapsed": true 520 | }, 521 | "outputs": [], 522 | "source": [ 523 | "from word_count.tasks import * \n", 524 | " \n", 525 | "# 將 text 檔案拆解成 list of words \n", 526 | "def getWordsFromText(file = '.\\\\text\\\\test.txt'):\n", 527 | " with open(file) as f:\n", 528 | " lines = f.readlines() \n", 529 | " return ' '.join(lines).replace(',', '').replace('.', '').split()\n", 530 | "\n", 531 | "\n", 532 | "def reduce(word_counts): \n", 533 | " wordCounts = {}\n", 534 | " \n", 535 | " for word_count in word_counts:\n", 536 | " if word_count is not None: \n", 537 | " wordCounts[word_count[0]] = wordCounts.get(word_count[0], 0) + word_count[1]\n", 538 | " \n", 539 | " result = sorted(list(wordCounts.items()), \n", 540 | " key = lambda x: (x[1], x[0]), \n", 541 | " reverse = True) \n", 542 | " return result" 543 | ] 544 | }, 545 | { 546 | "cell_type": "code", 547 | "execution_count": 19, 548 | "metadata": { 549 | "collapsed": false 550 | }, 551 | "outputs": [ 552 | { 553 | "data": { 554 | "text/plain": [ 555 | "[\"Aesop's\", 'Fables', 'Translated']" 556 | ] 557 | }, 558 | "execution_count": 19, 559 | "metadata": {}, 560 | "output_type": "execute_result" 561 | } 562 | ], 563 | "source": [ 564 | "# list of words\n", 565 | "words = getWordsFromText()\n", 566 | "words[:3]" 567 | ] 568 | }, 569 | { 570 | "cell_type": "code", 571 | "execution_count": 20, 572 | "metadata": { 573 | "collapsed": false 574 | }, 575 | "outputs": [ 576 | { 577 | "data": { 578 | "text/plain": [ 579 | "2190" 580 | ] 581 | }, 582 | "execution_count": 20, 583 | "metadata": {}, 584 | "output_type": "execute_result" 585 | } 586 | ], 587 | "source": [ 588 | "# 字數總計\n", 589 | "len(words)" 590 | ] 591 | }, 592 | { 593 | "cell_type": "markdown", 594 | "metadata": {}, 595 | "source": [ 596 | "#### ./word_count/tasks.py 中所定義的 mapper 函數:\n", 597 | " from celery import group\n", 598 | " from word_count.celery import app \n", 599 | " \n", 600 | " @app.task\n", 601 | " def mapper(word):\n", 602 | " return (word, 1) if len(word) >= 5 else None # 過濾掉太短的word" 603 | ] 604 | }, 605 | { 606 | "cell_type": "markdown", 607 | "metadata": {}, 608 | "source": [ 609 | "### 發送 message 要求 Celery workers 做平行運算並回應結果" 610 | ] 611 | }, 612 | { 613 | "cell_type": "code", 614 | "execution_count": 21, 615 | "metadata": { 616 | "collapsed": false 617 | }, 618 | "outputs": [ 619 | { 620 | "name": "stdout", 621 | "output_type": "stream", 622 | "text": [ 623 | "Wall time: 3min 23s\n" 624 | ] 625 | }, 626 | { 627 | "data": { 628 | "text/plain": [ 629 | "[('would', 12), ('which', 8), ('their', 8), ('caught', 6), ('Farmer', 6)]" 630 | ] 631 | }, 632 | "execution_count": 21, 633 | "metadata": {}, 634 | "output_type": "execute_result" 635 | } 636 | ], 637 | "source": [ 638 | "def count_celery(words):\n", 639 | " \n", 640 | " # 發送給 Celery 執行\n", 641 | " asyncResults = [mapper.s(word).delay() for word in words] # mapper 是定義在 ./word_count/tasks.py 中\n", 642 | " results = [asyncResult.get() for asyncResult in asyncResults if asyncResult.get() is not None]\n", 643 | "\n", 644 | " return reduce(results) \n", 645 | "\n", 646 | "%time counts = count_celery(words)\n", 647 | "counts[:5]" 648 | ] 649 | }, 650 | { 651 | "cell_type": "markdown", 652 | "metadata": {}, 653 | "source": [ 654 | "#### 訊息一送出去,Flower 中就會顯示執行的狀況\n", 655 | "![](./jpgs/flower4.jpg) \n", 656 | "\n", 657 | "#### 執行完畢\n", 658 | "![](./jpgs/flower5.jpg)\n", 659 | "\n", 660 | "#### 過程中 CPUs 並不是非常忙碌,還可以再壓榨一些\n", 661 | "![](./jpgs/htop1.jpg)" 662 | ] 663 | }, 664 | { 665 | "cell_type": "markdown", 666 | "metadata": {}, 667 | "source": [ 668 | "## Summary:\n", 669 | "雖然Bluemix的功能完整性相對的高很多,但是個人覺得兩者的原理是類似的。考量資料安全性與成本,若我們可以使用 Docker Swarm 和 Message Queue 快速地建構私有的 IoT 平台 也許是一個可以考慮的選擇。 \n", 670 | "\n", 671 | "本次實驗先驗證 Celery 可以在 Docker Swarm 上面運作 並獲得 scalability,後續再設計一個 IoT 網路來模擬 devices 之間的互動。" 672 | ] 673 | } 674 | ], 675 | "metadata": { 676 | "kernelspec": { 677 | "display_name": "Python 3", 678 | "language": "python", 679 | "name": "python3" 680 | }, 681 | "language_info": { 682 | "codemirror_mode": { 683 | "name": "ipython", 684 | "version": 3 685 | }, 686 | "file_extension": ".py", 687 | "mimetype": "text/x-python", 688 | "name": "python", 689 | "nbconvert_exporter": "python", 690 | "pygments_lexer": "ipython3", 691 | "version": "3.5.1" 692 | } 693 | }, 694 | "nbformat": 4, 695 | "nbformat_minor": 0 696 | } 697 | --------------------------------------------------------------------------------