├── ftrigger ├── __init__.py ├── kafka.py └── trigger.py ├── requirements.txt ├── setup.py ├── Dockerfile ├── .gitignore ├── docker-compose.yml ├── README.md └── wait-for-it.sh /ftrigger/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | -e . 2 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages 2 | from setuptools import setup 3 | 4 | 5 | install_requires = [ 6 | 'confluent-kafka', 7 | 'pyjq', 8 | 'requests', 9 | ] 10 | 11 | 12 | dependency_links = [ 13 | 14 | ] 15 | 16 | 17 | setup( 18 | name='ftrigger', 19 | version='0.1', 20 | description='Triggers for FaaS functions', 21 | author='King Chung Huang', 22 | author_email='kchuang@ucalgary.ca', 23 | url='https://github.com/ucalgary/ftrigger', 24 | packages=find_packages(), 25 | package_data={ 26 | }, 27 | install_requires=install_requires, 28 | dependency_links=dependency_links, 29 | entry_points=""" 30 | [console_scripts] 31 | kafka-trigger=ftrigger.kafka:main 32 | """, 33 | zip_safe=True 34 | ) 35 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ucalgary/python-librdkafka:3.7.0-0.11.6 2 | 3 | RUN mkdir -p /usr/src/app 4 | WORKDIR /usr/src/app 5 | 6 | COPY setup.py /usr/src/app 7 | COPY ftrigger /usr/src/app/ftrigger 8 | COPY wait-for-it.sh /usr/local/bin/wait-for-it 9 | ARG SETUP_COMMAND=install 10 | RUN apk add --no-cache --virtual .build-deps \ 11 | autoconf \ 12 | automake \ 13 | gcc \ 14 | git \ 15 | libtool \ 16 | make \ 17 | musl-dev && \ 18 | python setup.py ${SETUP_COMMAND} && \ 19 | apk del .build-deps 20 | 21 | LABEL maintainer="King Chung Huang " \ 22 | org.label-schema.schema-version="1.0" \ 23 | org.label-schema.name="Function Triggers" \ 24 | org.label-schema.vcs-url="https://github.com/ucalgary/ftrigger" 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *,cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | gateway: 5 | image: functions/gateway:0.6.15 6 | volumes: 7 | - /var/run/docker.sock:/var/run/docker.sock 8 | environment: 9 | dnsrr: 'true' 10 | deploy: 11 | placement: 12 | constraints: 13 | - 'node.role == manager' 14 | - 'node.platform.os == linux' 15 | ports: 16 | - 8080:8080 17 | networks: 18 | - functions 19 | 20 | ftrigger-kafka: 21 | image: ucalgary/ftrigger:master 22 | command: kafka-trigger 23 | networks: 24 | - functions 25 | - streaming 26 | 27 | echoit: 28 | image: functions/alpine:health 29 | labels: 30 | function: "true" 31 | environment: 32 | fprocess: "cat" 33 | no_proxy: "gateway" 34 | https_proxy: $https_proxy 35 | deploy: 36 | placement: 37 | constraints: 38 | - 'node.platform.os == linux' 39 | labels: 40 | ftrigger.kafka: 'true' 41 | ftrigger.kafka.topic: 'echo' 42 | networks: 43 | - functions 44 | 45 | prometheus: 46 | image: functions/prometheus:latest 47 | command: "-config.file=/etc/prometheus/prometheus.yml -storage.local.path=/prometheus -storage.local.memory-chunks=10000 --alertmanager.url=http://alertmanager:9093" 48 | ports: 49 | - 9090:9090 50 | environment: 51 | no_proxy: "gateway" 52 | networks: 53 | - functions 54 | deploy: 55 | placement: 56 | constraints: 57 | - 'node.role == manager' 58 | - 'node.platform.os == linux' 59 | 60 | zookeeper: 61 | image: confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-3.2.1} 62 | hostname: zookeeper 63 | environment: 64 | - ZOOKEEPER_CLIENT_PORT=2181 65 | networks: 66 | - streaming 67 | 68 | kafka: 69 | image: confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-3.2.1} 70 | hostname: kafka 71 | environment: 72 | - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 73 | - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092 74 | networks: 75 | - streaming 76 | 77 | kafka-rest: 78 | image: confluentinc/cp-kafka-rest:${CONFLUENT_PLATFORM_VERSION:-3.2.1} 79 | hostname: kafka-rest 80 | environment: 81 | - ACCESS_CONTROL_ALLOW_ORIGIN_DEFAULT="*" 82 | - KAFKA_REST_SCHEMA_REGISTRY_URL=http://schema-registry:8081 83 | - KAFKA_REST_ZOOKEEPER_CONNECT=zookeeper:2181 84 | - KAFKA_REST_HOST_NAME=kafka-rest 85 | - KAFKA_REST_LISTENERS=http://kafka-rest:8082 86 | - KAFKA_REST_BOOTSTRAP_SERVERS=PLAINTEXT://kafka:9092 87 | ports: 88 | - 8082:8082 89 | networks: 90 | - streaming 91 | 92 | schema-registry: 93 | image: confluentinc/cp-schema-registry:${CONFLUENT_PLATFORM_VERSION:-3.2.1} 94 | hostname: schema-registry 95 | environment: 96 | - SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL=zookeeper:2181 97 | - SCHEMA_REGISTRY_HOST_NAME=schema-registry 98 | - SCHEMA_REGISTRY_LISTENERS=http://schema-registry:8081 99 | networks: 100 | - streaming 101 | 102 | kafka-topics-ui: 103 | image: landoop/kafka-topics-ui:0.9.2 104 | environment: 105 | - KAFKA_REST_PROXY_URL=http://kafka-rest:8082 106 | - PROXY=true 107 | ports: 108 | - 8000:8000 109 | networks: 110 | - streaming 111 | 112 | networks: 113 | functions: 114 | streaming: -------------------------------------------------------------------------------- /ftrigger/kafka.py: -------------------------------------------------------------------------------- 1 | import atexit 2 | import collections 3 | import logging 4 | import os 5 | 6 | try: 7 | import ujson as json 8 | except: 9 | import json 10 | import pyjq 11 | from confluent_kafka import Consumer 12 | 13 | from .trigger import Functions 14 | 15 | 16 | log = logging.getLogger(__name__) 17 | 18 | 19 | class KafkaTrigger(object): 20 | 21 | def __init__(self, label='ftrigger', name='kafka', refresh_interval=5, 22 | kafka='kafka:9092'): 23 | self.functions = Functions(name='kafka') 24 | self.config = { 25 | 'bootstrap.servers': os.getenv('KAFKA_BOOTSTRAP_SERVERS', kafka), 26 | 'group.id': os.getenv('KAFKA_CONSUMER_GROUP', self.functions._register_label), 27 | 'default.topic.config': { 28 | 'auto.offset.reset': 'largest', 29 | 'auto.commit.interval.ms': 5000 30 | } 31 | } 32 | 33 | def run(self): 34 | consumer = Consumer(self.config) 35 | callbacks = collections.defaultdict(list) 36 | functions = self.functions 37 | 38 | def close(): 39 | log.info('Closing consumer') 40 | consumer.close() 41 | atexit.register(close) 42 | 43 | while True: 44 | add, update, remove = functions.refresh() 45 | if add or update or remove: 46 | existing_topics = set(callbacks.keys()) 47 | 48 | for f in add: 49 | callbacks[functions.arguments(f).get('topic')].append(f) 50 | for f in update: 51 | pass 52 | for f in remove: 53 | callbacks[functions.arguments(f).get('topic')].remove(f) 54 | 55 | interested_topics = set(callbacks.keys()) 56 | 57 | if existing_topics.symmetric_difference(interested_topics): 58 | log.debug(f'Subscribing to {interested_topics}') 59 | consumer.subscribe(list(interested_topics)) 60 | 61 | message = consumer.poll(timeout=functions.refresh_interval) 62 | if not message: 63 | log.debug('Empty message received') 64 | elif not message.error(): 65 | topic, key, value = message.topic(), \ 66 | message.key(), \ 67 | message.value() 68 | try: 69 | key = message.key().decode('utf-8') 70 | except: 71 | pass 72 | try: 73 | value = json.loads(value) 74 | except: 75 | pass 76 | for function in callbacks[topic]: 77 | jq_filter = functions.arguments(function).get('filter') 78 | try: 79 | if jq_filter and not pyjq.first(jq_filter, value): 80 | continue 81 | except: 82 | log.error(f'Could not filter message value with {jq_filter}') 83 | data = self.function_data(function, topic, key, value) 84 | functions.gateway.post(functions._gateway_base + f'/function/{function["name"]}', data=data) 85 | 86 | def function_data(self, function, topic, key, value): 87 | data_opt = self.functions.arguments(function).get('data', 'key') 88 | 89 | if data_opt == 'key-value': 90 | return json.dumps({'key': key, 'value': value}) 91 | else: 92 | return key 93 | 94 | 95 | def main(): 96 | trigger = KafkaTrigger() 97 | trigger.run() 98 | -------------------------------------------------------------------------------- /ftrigger/trigger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import re 4 | import time 5 | from collections import ChainMap 6 | 7 | import requests 8 | from requests.adapters import HTTPAdapter 9 | from requests.packages.urllib3.util.retry import Retry 10 | 11 | 12 | log = logging.getLogger(__name__) 13 | 14 | 15 | class Functions(object): 16 | 17 | def __init__(self, label='ftrigger', name=None, refresh_interval=5, gateway='http://gateway:8080'): 18 | self.refresh_interval = int(os.getenv('TRIGGER_REFRESH_INTERVAL', refresh_interval)) 19 | self.last_refresh = 0 20 | self._functions = {} 21 | self._stack_namespace = os.getenv('STACK_NAMESPACE', None) 22 | self._label = os.getenv('TRIGGER_LABEL', label) 23 | self._name = os.getenv('TRIGGER_NAME', name) 24 | self._register_label = f'{label}.{name}' 25 | self._argument_pattern = re.compile(f'^{label}\\.{name}\\.([^.]+)$') 26 | self._gateway_base = gateway.rstrip('/') 27 | self.gateway = requests.Session() 28 | self.gateway.mount(self._gateway_base, HTTPAdapter(max_retries=Retry( 29 | total=None, 30 | connect=int(os.getenv('GATEWAY_RETRY', 10)), 31 | read=10, 32 | redirect=10, 33 | backoff_factor=0.1, 34 | method_whitelist=frozenset(['HEAD', 'GET', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'TRACE']) 35 | ))) 36 | 37 | @property 38 | def label(self): 39 | return self._label 40 | 41 | @property 42 | def name(self): 43 | return self._name 44 | 45 | def refresh(self, force=False): 46 | if not force and time.time() - self.last_refresh < self.refresh_interval: 47 | return [], [], [] 48 | 49 | add_functions = [] 50 | update_functions = [] 51 | remove_functions = [] 52 | 53 | functions = self.gateway.get(self._gateway_base + '/system/functions').json() 54 | if self._stack_namespace: 55 | functions = filter(lambda f: f.get('labels', {}).get('com.docker.stack.namespace') == self._stack_namespace, functions) 56 | functions = list(filter(lambda f: self._register_label in ChainMap((f.get('labels') or {}), (f.get('annotations') or {})), functions)) 57 | 58 | # Scan for new and updated functions 59 | for function in functions: 60 | existing_function = self._functions.get(function['name']) 61 | 62 | if not existing_function: 63 | # register a new function 64 | log.debug(f'Add function: {function["name"]}') 65 | add_functions.append(function) 66 | self._functions[function['name']] = function 67 | elif False: 68 | # elif function['service'].attrs['UpdatedAt'] > existing_function['service'].attrs['UpdatedAt']: 69 | # maybe update an already registered function 70 | log.debug(f'Update function: {function["name"]}') 71 | update_functions.append(function) 72 | self._functions[function['name']] = function 73 | 74 | # Scan for removed functions 75 | for function_name in set(self._functions.keys()) - set([f['name'] for f in functions]): 76 | function = self._functions.pop(function_name) 77 | log.debug(f'Remove function: {function["name"]}') 78 | remove_functions.append(function) 79 | 80 | self.last_refresh = time.time() 81 | return add_functions, update_functions, remove_functions 82 | 83 | def arguments(self, function): 84 | labels = ChainMap((function.get('labels') or {}), (function.get('annotations') or {})) 85 | if self._register_label not in labels: 86 | return None 87 | 88 | args = {m.group(1): v for m, v 89 | in [(self._argument_pattern.match(k), v) for k, v in labels.items()] if m} 90 | log.debug(f'{function["name"]} arguments: {args}') 91 | return args 92 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kafka Triggers for OpenFaaS 2 | 3 | OpenFaaS (Functions as a Service) is a framework by Alex Ellis for making serverless functions simple on Docker Swarm or Kubernetes. This repo provides a tool called `ftrigger` that is designed to sit alongside an OpenFaaS API gateway, observe labels on function services, and then automatically trigger functions based on the conditions described in the labels. 4 | 5 | The only supported trigger at the moment is for Kafka topics. A time-based trigger (something like cron) will be developed next. 6 | 7 | ## Configuring Function Services to Respond to Kafka Messages 8 | 9 | OpenFaaS functions can be declared as Docker services. If the service containers are labeled with `function: 'true'`, they will be automatically registered to a running gateway and made available. 10 | 11 | ``` 12 | echoit: 13 | image: functions/alpine:health 14 | labels: 15 | function: "true" 16 | environment: 17 | fprocess: "cat" 18 | ``` 19 | 20 | ftrigger watches a running gateway and scans functions for service labels matching the pattern `ftrigger.*`, where the wildcard matches the name of a trigger service (only `kafka` is available). The trigger can be configured with labels extending from that, such as the pattern `ftrigger.kafka.*` for the kafka trigger service. 21 | 22 | In the following example, the `echoit` function is labeled to respond to Kafka messages on the `echo` topic. 23 | 24 | ``` 25 | echoit: 26 | image: functions/alpine:health 27 | labels: 28 | function: "true" 29 | environment: 30 | fprocess: "cat" 31 | deploy: 32 | labels: 33 | ftrigger.kafka: 'true' 34 | ftrigger.kafka.topic: 'echo' 35 | ``` 36 | 37 | By default, the kafka trigger sends the Kafka message value as the function request body. This can be changed to a JSON object with the message key and value by setting `ftrigger.kafka.data` to `key-value`. 38 | 39 | ``` 40 | echoit: 41 | ... 42 | deploy: 43 | labels: 44 | ftrigger.kafka: 'true' 45 | ftrigger.kafka.topic: 'echo' 46 | ftrigger.kafka.data: 'key-value' 47 | ``` 48 | 49 | There are no other options at this time. 50 | 51 | ## Test Drive 52 | 53 | You can quickly deploy ftrigger on Play with Docker, a community-run Docker playground, by clicking the following button. 54 | 55 | [![Try in PWD](https://cdn.rawgit.com/play-with-docker/stacks/cff22438/assets/images/button.png)](http://play-with-docker.com?stack=https://raw.githubusercontent.com/ucalgary/ftrigger/master/docker-compose.yml&stack_name=ftrigger) 56 | 57 | The demo stack is configured with an `echoit` function configured to respond to messages on the Kafka topic named `echo`. You can produce messages in the topic by running `kafka-console-producer` in the kafka service's container, then typing into the console. Every line is published as a message in the topic. 58 | 59 | ``` 60 | SERVICE_NAME=ftrigger_kafka 61 | TASK_ID=$(docker service ps --filter 'desired-state=running' $SERVICE_NAME -q) 62 | CONTAINER_ID=$(docker inspect --format '{{ .Status.ContainerStatus.ContainerID }}' $TASK_ID) 63 | docker exec -it $CONTAINER_ID kafka-console-producer --broker-list kafka:9092 --topic echo 64 | (type messages here, one per line) 65 | ``` 66 | 67 | The gateway logs will show that the function is being called for every message. Here is an example run in Play with Docker, with two messages (“abc” and “def” produced to the topic), and the gateway logs showing the echoit function called two times in response. 68 | 69 | ``` 70 | [node1] (local) root@10.0.2.3 ~ 71 | $ SERVICE_NAME=ftrigger_kafka 72 | [node1] (local) root@10.0.2.3 ~ 73 | $ TASK_ID=$(docker service ps --filter 'desired-state=running' $SERVICE_NAME -q) 74 | [node1] (local) root@10.0.2.3 ~ 75 | $ CONTAINER_ID=$(docker inspect --format '{{ .Status.ContainerStatus.ContainerID }}' $TASK_ID) 76 | [node1] (local) root@10.0.2.3 ~ 77 | $ docker exec -it $CONTAINER_ID kafka-console-producer --broker-list kafka:9092 --topic echo 78 | abc 79 | def 80 | ^C[node1] (local) root@10.0.2.3 ~ 81 | $ docker service logs ftrigger_gateway 82 | ftrigger_gateway.1.p86ucxfnkubk@node1 | 2017/10/11 22:36:18 HTTP Read Timeout: 8s 83 | ftrigger_gateway.1.p86ucxfnkubk@node1 | 2017/10/11 22:36:18 HTTP Write Timeout: 8s 84 | ftrigger_gateway.1.p86ucxfnkubk@node1 | 2017/10/11 22:36:18 Docker API version: 1.32, 17.09.0-ce 85 | ftrigger_gateway.1.p86ucxfnkubk@node1 | Resolving: 'ftrigger_echoit' 86 | ftrigger_gateway.1.p86ucxfnkubk@node1 | [1507761443] Forwarding request [] to: http://10.0.1.5:8080/ 87 | ftrigger_gateway.1.p86ucxfnkubk@node1 | [1507761443] took 0.009424 seconds 88 | ftrigger_gateway.1.p86ucxfnkubk@node1 | Resolving: 'ftrigger_echoit' 89 | ftrigger_gateway.1.p86ucxfnkubk@node1 | [1507761445] Forwarding request [] to: http://10.0.1.5:8080/ 90 | ftrigger_gateway.1.p86ucxfnkubk@node1 | [1507761445] took 0.002649 seconds 91 | ``` 92 | 93 | ## Maintenance 94 | 95 | This repository and image are currently maintained by the Research Management Systems project at the [University of Calgary](http://www.ucalgary.ca/). 96 | -------------------------------------------------------------------------------- /wait-for-it.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Use this script to test if a given TCP host/port or URL are available 3 | 4 | cmdname=$(basename $0) 5 | 6 | echoerr() { if [ ${QUIET} -ne 1 ]; then echo "$@" 1>&2; fi } 7 | 8 | usage() 9 | { 10 | cat << USAGE >&2 11 | Usage: 12 | ${cmdname} host:port [-s] [-t timeout] [-- command args] 13 | -h HOST Host or IP under test 14 | -p PORT TCP port under test 15 | -u URL URL to test for success 16 | -U username If -u given, the username to authenticate with. 17 | -P password If -u given, the username's password to authenticate with. 18 | -r retry_time Length of time between retries. Default 5. 19 | -s Only execute subcommand if the test succeeds 20 | -S Execute subcommand regardless of test succeeds (default) 21 | -q Do not output any status messages 22 | -t TIMEOUT Timeout in seconds, zero for no timeout 23 | -- COMMAND ARGS Execute command with args after the test finishes 24 | USAGE 25 | exit 1 26 | } 27 | 28 | wait_for() 29 | { 30 | if [ -z "$URL" ]; then 31 | if [ ${TIMEOUT} -gt 0 ]; then 32 | echoerr "${cmdname}: waiting ${TIMEOUT} seconds for ${HOST}:${PORT}" 33 | else 34 | echoerr "${cmdname}: waiting without a timeout for ${HOST}:${PORT}" 35 | fi 36 | start_ts=$(date +%s) 37 | while :; do 38 | if [ ${ISBUSY} -eq 1 ]; then 39 | nc -z ${HOST} ${PORT} 40 | result=$? 41 | else 42 | (echo > /dev/tcp/${HOST}/${PORT}) >/dev/null 2>&1 43 | result=$? 44 | fi 45 | if [ ${result} -eq 0 ]; then 46 | end_ts=$(date +%s) 47 | echoerr "${cmdname}: ${HOST}:${PORT} is available after $((end_ts - start_ts)) seconds" 48 | break 49 | fi 50 | sleep ${RETRY_TIME} 51 | done 52 | else 53 | if [ ${TIMEOUT} -gt 0 ]; then 54 | echoerr "${cmdname}: waiting ${TIMEOUT} seconds for ${URL}" 55 | else 56 | echoerr "${cmdname}: waiting without a timeout for ${URL}" 57 | fi 58 | start_ts=$(date +%s) 59 | if [ -n "${USERNAME}" ]; then 60 | AUTH="-u ${USERNAME}:${PASSWORD}" 61 | fi 62 | result=1 63 | while :; do 64 | response_code=$(curl -s -o /dev/null -H 'Cache-Control: no-cache' -k -w '%{response_code}' ${AUTH} -k ${URL}) 65 | if [ ${response_code} -eq 200 ]; then 66 | result=0 67 | end_ts=$(date +%s) 68 | echoerr "${cmdname}: ${URL} is available after $((end_ts - start_ts)) seconds" 69 | break 70 | fi 71 | sleep ${RETRY_TIME} 72 | done 73 | fi 74 | return ${result} 75 | } 76 | 77 | wait_for_wrapper() 78 | { 79 | # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 80 | if [ ${QUIET} -eq 1 ]; then 81 | ARGS="$debug -q" 82 | else 83 | ARGS="$debug" 84 | fi 85 | if [ -n "$URL" ]; then 86 | ARGS="$ARGS -u ${URL} ${USERNAME:+-U ${USERNAME}} ${PASSWORD:+-P ${PASSWORD}}" 87 | else 88 | ARGS="-h ${HOST} -p ${PORT}" 89 | fi 90 | 91 | if [ -n "$(type -t timeout)" ]; then 92 | timeout ${BUSYTIMEFLAG} ${TIMEOUT} $0 ${ARGS} -c -t ${TIMEOUT} & 93 | else 94 | gtimeout ${TIMEOUT} $0 ${ARGS} -c -t ${TIMEOUT} & 95 | fi 96 | 97 | PID=$! 98 | trap "kill -INT -${PID}" INT 99 | wait ${PID} 100 | RESULT=$? 101 | if [[ ${RESULT} -ne 0 ]]; then 102 | if [ -n "$URL" ]; then 103 | echoerr "${cmdname}: timeout occurred after waiting ${TIMEOUT} seconds for ${URL}" 104 | else 105 | echoerr "${cmdname}: timeout occurred after waiting ${TIMEOUT} seconds for ${HOST}:${PORT}" 106 | fi 107 | fi 108 | return ${RESULT} 109 | } 110 | 111 | URL="" 112 | USERNAME="" 113 | PASSWORD="" 114 | RETRY_TIME=5 115 | TIMEOUT=15 116 | STRICT=0 117 | CHILD=0 118 | QUIET=0 119 | debug="" 120 | while getopts csSqt:h:p:u:U:P:x OPT; do 121 | case "${OPT}" in 122 | c) CHILD=1;; 123 | s) STRICT=1;; 124 | S) STRICT=0;; 125 | q) QUIET=1;; 126 | t) TIMEOUT=${OPTARG};; 127 | h) HOST=${OPTARG};; 128 | p) PORT=${OPTARG};; 129 | u) URL=${OPTARG};; 130 | P) PASSWORD=${OPTARG};; 131 | U) USERNAME=${OPTARG};; 132 | x) set -x; debug="-x";; 133 | esac 134 | done 135 | 136 | shift `expr ${OPTIND} - 1` 137 | CLI="$@" 138 | 139 | if [ -z "$URL" -a \( -z "${HOST}" -o -z "${PORT}" \) ]; then 140 | echoerr "Error: you need to provide a URL or a host and port to test." 141 | usage 142 | fi 143 | 144 | # check to see if timeout is from busybox? 145 | 146 | BUSYBOX="busybox" 147 | if [ "$(type -t timeout)" != builtin ]; then 148 | ISBUSY=1 149 | BUSYTIMEFLAG="-t" 150 | else 151 | ISBUSY=0 152 | BUSYTIMEFLAG="" 153 | fi 154 | 155 | if [ ${CHILD} -gt 0 ]; then 156 | wait_for 157 | RESULT=$? 158 | exit ${RESULT} 159 | else 160 | if [ ${TIMEOUT} -gt 0 ]; then 161 | wait_for_wrapper 162 | RESULT=$? 163 | else 164 | wait_for 165 | RESULT=$? 166 | fi 167 | fi 168 | 169 | if [ -n "${CLI}" ]; then 170 | if [ ${RESULT} -ne 0 -a ${STRICT} -eq 1 ]; then 171 | echoerr "${cmdname}: strict mode, refusing to execute subprocess" 172 | exit ${RESULT} 173 | fi 174 | exec ${CLI} 175 | else 176 | exit ${RESULT} 177 | fi 178 | 179 | --------------------------------------------------------------------------------