├── hdfs_fuse_mount_monitor.sh ├── spinner.py ├── copyMerge.py ├── dbus.py ├── keytab.py └── znote.py /hdfs_fuse_mount_monitor.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | FUSE_MOUNT="/hdfs_mount" 4 | 5 | access_fuse() { 6 | ls $FUSE_MOUNT >/dev/null 2>&1 7 | } 8 | 9 | fix_fuse() { 10 | 11 | { 12 | echo "Bouncing hdfs fuse mount $FUSE_MOUNT" 13 | 14 | sudo fusermount -uz $FUSE_MOUNT 15 | sleep 1 16 | sudo mount $FUSE_MOUNT 17 | 18 | } | mailx -s "HDFS fuse mount bounced on `hostname -s`" rdautkhanov@epsilon.com 19 | 20 | } 21 | 22 | access_fuse & 23 | access_fuse_pid=$! 24 | 25 | waited_seconds=0 26 | max_wait_seconds=20 27 | 28 | while kill -0 "$access_fuse_pid" >/dev/null 2>&1; do 29 | ## echo "PROCESS pid=$access_fuse_pid IS RUNNING" 30 | 31 | if [ $waited_seconds -gt $max_wait_seconds ] 32 | then 33 | fix_fuse 34 | 35 | kill $access_fuse_pid 36 | sleep 1 37 | kill -9 $access_fuse_pid 38 | 39 | exit 1 40 | fi 41 | 42 | sleep 1 43 | ((waited_seconds++)) 44 | done 45 | 46 | ## echo "PROCESS $access_fuse_pid TERMINATED" 47 | exit 0 48 | 49 | ## TODO: 50 | ## 1. the script assumes HDFS fuse mount is mounted, but could be hanging; 51 | ## improve by checking if the mount isn't mounted at all. 52 | 53 | -------------------------------------------------------------------------------- /spinner.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import sys 4 | import threading 5 | import itertools 6 | 7 | 8 | class Spinner: 9 | 10 | def __init__(self, message, delay=0.1): 11 | self.spinner = itertools.cycle(['-', '/', '|', '\\']) 12 | self.delay = delay 13 | self.busy = False 14 | self.spinner_visible = False 15 | sys.stdout.write(message) 16 | 17 | def write_next(self): 18 | with self._screen_lock: 19 | if not self.spinner_visible: 20 | sys.stdout.write(next(self.spinner)) 21 | self.spinner_visible = True 22 | sys.stdout.flush() 23 | 24 | def remove_spinner(self, cleanup=False): 25 | with self._screen_lock: 26 | if self.spinner_visible: 27 | sys.stdout.write('\b') 28 | self.spinner_visible = False 29 | if cleanup: 30 | sys.stdout.write(' ') # overwrite spinner with blank 31 | sys.stdout.write('\r') # move to next line 32 | sys.stdout.flush() 33 | 34 | def spinner_task(self): 35 | while self.busy: 36 | self.write_next() 37 | time.sleep(self.delay) 38 | self.remove_spinner() 39 | 40 | def __enter__(self): 41 | if sys.stdout.isatty(): 42 | self._screen_lock = threading.Lock() 43 | self.busy = True 44 | self.thread = threading.Thread(target=self.spinner_task) 45 | self.thread.start() 46 | 47 | def __exit__(self, exc_type, exc_val, exc_traceback): 48 | if sys.stdout.isatty(): 49 | self.busy = False 50 | self.remove_spinner(cleanup=True) 51 | else: 52 | sys.stdout.write('\r') 53 | 54 | 55 | 56 | ### usage example: 57 | # 58 | # with Spinner("just waiting a bit.. "): 59 | # 60 | # time.sleep(3) 61 | -------------------------------------------------------------------------------- /copyMerge.py: -------------------------------------------------------------------------------- 1 | 2 | # This function implements copyMerge from Hadoop API 3 | # copyMerge will be deprecated in Hadoop 3.0 4 | # This can be used in a pySpark application (assumes `sc` variable exists) 5 | 6 | def copyMerge (src_dir, dst_file, overwrite=False, deleteSource=False, debug=False): 7 | 8 | # this function has been migrated to https://github.com/Tagar/abalon Python package 9 | 10 | hadoop = sc._jvm.org.apache.hadoop 11 | conf = hadoop.conf.Configuration() 12 | fs = hadoop.fs.FileSystem.get(conf) 13 | 14 | # check files that will be merged 15 | files = [] 16 | for f in fs.listStatus(hadoop.fs.Path(src_dir)): 17 | if f.isFile(): 18 | files.append(f.getPath()) 19 | if not files: 20 | raise ValueError("Source directory {} is empty".format(src_dir)) 21 | files.sort(key=lambda f: str(f)) 22 | 23 | # dst_permission = hadoop.fs.permission.FsPermission.valueOf(permission) # , permission='-rw-r-----' 24 | out_stream = fs.create(hadoop.fs.Path(dst_file), overwrite) 25 | 26 | try: 27 | # loop over files in alphabetical order and append them one by one to the target file 28 | for file in files: 29 | if debug: 30 | print("Appending file {} into {}".format(file, dst_file)) 31 | 32 | in_stream = fs.open(file) # InputStream object 33 | try: 34 | hadoop.io.IOUtils.copyBytes(in_stream, out_stream, conf, False) # False means don't close out_stream 35 | finally: 36 | in_stream.close() 37 | finally: 38 | out_stream.close() 39 | 40 | if deleteSource: 41 | fs.delete(hadoop.fs.Path(src_dir), True) # True=recursive 42 | if debug: 43 | print("Source directory {} removed.".format(src_dir)) 44 | 45 | copyMerge('/user/rdautkha/testdir', '/user/rdautkha/test_merge.txt', debug=True, overwrite=True, deleteSource=True) 46 | 47 | -------------------------------------------------------------------------------- /dbus.py: -------------------------------------------------------------------------------- 1 | #!python 2 | 3 | import sys 4 | import re 5 | from time import mktime, strptime, strftime 6 | 7 | 8 | class DbuParser: 9 | 10 | log4j_time_format = "%y/%m/%d %H:%M:%S" 11 | nice_time_format = "%a, %d %b %Y %H:%M:%S" 12 | 13 | @staticmethod 14 | def parse_time(timestr: str): 15 | return strptime(timestr, DbuParser.log4j_time_format) 16 | 17 | @staticmethod 18 | def parse_ts(timestr: str): 19 | return mktime(strptime(timestr, DbuParser.log4j_time_format)) 20 | 21 | def __init__(self): 22 | 23 | self.re_parser_time = re.compile(r"^(\S+ \S+) INFO ") 24 | self.re_parser = re.compile(r"^(\S+ \S+) INFO .+ Executor updated: .+ is now (\S+)") 25 | 26 | self.started_at = 0 27 | 28 | self.current_executors = 0 29 | self.max_executors = 0 30 | 31 | self.previous_checkpoint_ts = 0 32 | self.previous_line_time = None 33 | 34 | self.integral_seconds = 0 # total worker-seconds 35 | self.stopped_at = self.total_runtime = self.avg_executors = None 36 | 37 | def match_process(self, when_ts, what): 38 | 39 | self.integral_seconds += (when_ts - self.previous_checkpoint_ts) * self.current_executors 40 | 41 | if what == 'RUNNING': 42 | self.current_executors += 1 43 | if self.max_executors < self.current_executors: 44 | self.max_executors = self.current_executors 45 | elif what == 'LOST': 46 | self.current_executors -= 1 47 | assert self.current_executors >= 0, "Number of active executors can't be negative" 48 | elif what == 'END': 49 | self.current_executors = 0 50 | 51 | self.previous_checkpoint_ts = when_ts 52 | 53 | # print(f"{when_ts}: {self.current_executors} active executors; " 54 | # f"accumulated {self.integral_seconds / 60:.01f} executor-minutes") 55 | 56 | def first_line(self, line): 57 | self.previous_line_time = DbuParser.parse_time(line[0:17]) 58 | self.started_at = mktime(self.previous_line_time) 59 | print(f"""Job started at {strftime(self.nice_time_format, self.previous_line_time)}""") 60 | 61 | def print_graph(self, line): 62 | if not self.re_parser_time.match(line): 63 | return 64 | current_time = DbuParser.parse_time(line[0:17]) 65 | if current_time.tm_min != self.previous_line_time.tm_min: 66 | print(f"""{strftime("%H:%M", current_time)} {'*' * self.current_executors}""") 67 | self.previous_line_time = current_time 68 | 69 | def try_match(self, line): 70 | match = self.re_parser.match(line) 71 | if not match: 72 | return 73 | 74 | (when, what) = match.groups() # ts, running/lost 75 | when_ts = DbuParser.parse_ts(when) 76 | 77 | self.match_process(when_ts, what) 78 | 79 | def finalize(self, line): 80 | self.stopped_at = DbuParser.parse_ts(line[0:17]) 81 | self.total_runtime = int(self.stopped_at - self.started_at) 82 | print(f"""Job finished at {strftime(self.nice_time_format, DbuParser.parse_time(line[0:17]))}""") 83 | 84 | self.match_process(self.stopped_at, 'END') 85 | 86 | print(f"Script runtime {int(self.total_runtime / 60)}m {self.total_runtime % 60}s," 87 | f" or {self.total_runtime / 60:.01f} driver-minutes") 88 | 89 | self.avg_executors = self.integral_seconds / self.total_runtime 90 | print(f"Max.executors: {self.max_executors}; Avg.executors: {self.avg_executors:.01f}; " 91 | f"total {self.integral_seconds / 60:.01f} worker-minutes") 92 | 93 | 94 | def main(filename: str, print_executors_graph: bool = True): 95 | 96 | print(f"Processing {filename}") 97 | 98 | linecount = 0 99 | 100 | parser = DbuParser() 101 | 102 | with open(filename, "r") as f: 103 | for line in f: 104 | if not linecount: 105 | parser.first_line(line) 106 | 107 | linecount += 1 108 | 109 | if print_executors_graph: 110 | # optional graph - number of executors 111 | parser.print_graph(line) 112 | 113 | parser.try_match(line) 114 | 115 | print(f"{linecount:,} lines processed.") 116 | 117 | parser.finalize(line) 118 | 119 | 120 | if __name__ == '__main__': 121 | if len(sys.argv) <= 1: 122 | print(f"{sys.argv[0]} ") 123 | sys.exit(2) 124 | 125 | main(sys.argv[1]) 126 | -------------------------------------------------------------------------------- /keytab.py: -------------------------------------------------------------------------------- 1 | #!python 2 | 3 | from getpass import getuser, getpass 4 | 5 | default_keytab = '/home/%s/.kt' % getuser() 6 | default_domain = 'CORP.EPSILON.COM' 7 | ktutil = '/usr/bin/ktutil' 8 | 9 | __doc__ = """Keytab file maintenance utility. 10 | 11 | Usage: 12 | keytab.py [-u | --update] [--domain=realm] [--keytab=filename] 13 | [--and-test] [--algorithms=list] [--kvno=entry] 14 | [-d | --debug] 15 | keytab.py test [--domain=realm] [--keytab=filename] 16 | keytab.py (-h | --help) 17 | 18 | Commands: 19 | (default) Creates/overwrites Keytab file 20 | test Use generated keytab with kinit to test creating Kerberos ticket. 21 | 22 | Arguments: 23 | Is your Windows / Active Directory login name 24 | (and not UNIX login, in case if it's different from AD login). 25 | 26 | Options: 27 | -h --help Show this screen 28 | --update Overwrites just --kvno keytab entry and leaves other entries the same. 29 | --domain=realm Kerberos domain / AD realm [default: %s] 30 | --keytab=filename Keytab location [default: %s] 31 | --and-test After keytab is created/updated, try to use it by creating a Kerberos ticket 32 | -d --debug Print debug information. Note: password is visible in the log output with -d 33 | --algorithms=list List of algorithm(s) used for each keytab entry. 34 | The list has to be comma-separated [default: rc4-hmac,aes256-cts] 35 | --kvno=entry Key entry in keytab, passed as -k kvno argument to 36 | ktutil's addent command [default: 1] 37 | 38 | Assumptions: 39 | 1. This script expects MIT Kerberos compatible ktutil command 40 | to be available as %s. 41 | Script is known not to work with Heimdal Kerberos compatible ktutil. 42 | 2. docopt, pexpect Python modules should be available. 43 | 44 | History: 45 | 01/16/2017 rdautkhanov@epsilon.com - 1.0 Initial version 46 | """ % \ 47 | (default_domain, default_keytab, ktutil) 48 | 49 | 50 | ################################################################################################### 51 | ################################################################################################### 52 | 53 | 54 | try: 55 | import pexpect 56 | from docopt import docopt 57 | except ImportError: 58 | exit('Not found doctopt or pexpect Python module. Both are required') 59 | import sys 60 | 61 | 62 | # Parse command-line arguments 63 | args = docopt(__doc__) 64 | 65 | Debug = args['--debug'] 66 | keytab = args['--keytab'] 67 | principal = args[''] +'@'+ args['--domain'] 68 | 69 | if Debug: print(args) 70 | 71 | def kinit_test (): 72 | """ 73 | Runs kinit to create a Kerberos ticket using (just) generated keytab file. 74 | Returns kinit's return code (0 == OK) 75 | """ 76 | 77 | from subprocess import call 78 | retcode = call(['/usr/bin/kinit', '-kt', keytab, principal], stdout=sys.stdout) 79 | if retcode == 0: 80 | print("kinit successfully created Kerberos ticket using this keytab.") 81 | else: 82 | print("kinit wasn't able to create Kerberos ticket using this keytab.") 83 | return retcode 84 | 85 | if args['test']: 86 | sys.exit(kinit_test()) 87 | 88 | 89 | # 0. Start ktutil command as a child process 90 | child = pexpect.spawn(ktutil) 91 | default_prompt = 'ktutil: ' 92 | 93 | def wait (prompt=default_prompt): 94 | ''' Wait for ktutil's prompt 95 | Returns true if ktutil's cli command produced output (error message) or unexpected prompt 96 | ''' 97 | 98 | # always wait for default prompt too in case of error, so no timeout exception 99 | i = child.expect([prompt, default_prompt], timeout=3) 100 | 101 | lines = child.before.strip().split('\n') 102 | problem = ( len(lines) > 1 # if there is an error message 103 | or (i == 1) # or ktutil gives default prompt when another prompt expected 104 | ) 105 | if problem: 106 | print('ktutil error: ' + lines[1]) 107 | return problem 108 | 109 | # wait for ktutil to show its first prompt 110 | wait() 111 | if Debug: 112 | child.logfile = sys.stdout 113 | print('Spawned ktutil successfully.') 114 | 115 | # 1. if it's an update, then read in keytab first 116 | wkt_action = 'save' 117 | if args['--update']: 118 | wkt_action = 'update' 119 | child.sendline('read_kt ' + keytab) 120 | if wait(): 121 | print("Couldn't read keytab file %s\nNew file will be created instead" % keytab) 122 | # TODO: if KVNO already exists, ktutil may duplicate records in that entry 123 | else: 124 | # else - try removing existing keytab 125 | from os import remove 126 | try: 127 | remove(keytab) 128 | if Debug: 129 | print('Existing keytab %s removed.' % keytab) 130 | except OSError: 131 | pass # assuming e.errno==ENOENT - file doesn't exist 132 | 133 | # 2. Prompt user for Principal's password 134 | password = getpass('Active Directory user %s password: ' % principal) 135 | 136 | # 3. For each algorithm, call ktutil's addent command 137 | for algorithm in args['--algorithms'].split(','): 138 | 139 | child.sendline('addent -password -p %s -k %s -e %s' 140 | % (principal, args['--kvno'], algorithm) 141 | ) 142 | if wait('Password for ' + principal): 143 | exit('Unexpected ktutil error while waiting for password prompt') 144 | 145 | child.sendline(password) 146 | if wait(): 147 | exit('Unexpected ktutil error after addent command') 148 | 149 | # 4. Now we can save keytab file 150 | child.sendline('write_kt ' + keytab) 151 | if wait(): 152 | exit("Couldn't write keytab file " + keytab) 153 | print("Keytab file %s %sd." % (keytab, wkt_action)) 154 | 155 | # 5. exit from ktutil 156 | child.sendline('quit') 157 | child.close() # termintate ktutil (if it's not closed already) 158 | 159 | # 6. Optionally test newly created/update keytab 160 | if args['--and-test']: 161 | kinit_test() 162 | 163 | 164 | -------------------------------------------------------------------------------- /znote.py: -------------------------------------------------------------------------------- 1 | #!python 2 | 3 | from getpass import getuser 4 | 5 | default_notebooks_dir = '/home/%s/zeppelin/notebooks' % getuser() 6 | 7 | __doc__ = """ Zeppelin notebooks helper tool. 8 | 9 | Usage: 10 | znote.py extract [--fetch=interpreter] [--notebooks-dir=directory] 11 | [--skip-select] [--skip-noop] [--no-comments] [--get-disabled] 12 | [--default-interpreter=interpreter] [--sqlc-var=sqlc] [--imports] 13 | znote.py clean-output [--notebooks-dir=directory] 14 | znote.py (-h | --help) 15 | 16 | Commands: 17 | extract Extract code of type --type (defaults to Python) 18 | clean-output Removes output from all paragraphs 19 | (good to do before source rep commits and code reviews) 20 | 21 | Examples: 22 | python ./znote.py extract 2CAWV29G2 23 | python ./znote.py clean-output 2C9CTUGT3 24 | 25 | Arguments: 26 | Note ID - see directories under %s 27 | (also visible in URI when that note is open in Zeppelin) 28 | 29 | Options: 30 | --fetch=interpreter Defines which language/interpreter to fetch. [default: pyspark] 31 | --notebooks-dir=dir Directory that stores all Zeppelin notes. 32 | [default: %s] 33 | --no-comments Don't add comments formed from paragraph titles 34 | and markdown paragraphs 35 | --get-disabled Produce code even for disabled paragraphs 36 | --skip-selects Adds heuristics that detect spark-sql paragraphs that 37 | just selects data (and not for example alters tables), and skips those 38 | --skip-noop Adds heuristics that detect no-op paragraphs (like to display data 39 | or schema) and skip those paragraphs 40 | --default-interpreter=interpreter 41 | Sets default interpreter type as configured in Zeppelin. 42 | Because it's possible to not set paragraph type each time for default 43 | interpreter type, this script has to know what's default [default: pyspark] 44 | --sqlc-var=sqlc SQL Context variable [default: sqlc] 45 | --imports Add a header to embed necessary Spark import 46 | -h --help Show this screen 47 | 48 | Know bugs / todo-s: 49 | 1. This script should work with --fetch other than pyspark, but it was tested only with pyspark. 50 | Some options like --skip-noop should be tweaked for other languages other than pyspark 51 | 2. Script will be fed to stdout - make an optional parameter to write/append to a file. 52 | 53 | History: 54 | 03/07/2017 rdautkhanov@epsilon.com - 1.0 Initial version 55 | """ % \ 56 | (default_notebooks_dir, default_notebooks_dir) 57 | 58 | ################################################################################################### 59 | ################################################################################################### 60 | 61 | 62 | import json 63 | import re 64 | from os.path import isdir, isfile 65 | 66 | try: 67 | from docopt import docopt # docopt isn't part of Anaconda - so checking just this pkg 68 | except ImportError: 69 | exit('doctopt Python package not found.') 70 | 71 | 72 | # Parse command-line arguments using above pattern/description block 73 | args = docopt(__doc__) 74 | ## print(args) 75 | 76 | noteid = args[''] 77 | 78 | notebooks_dir = args['--notebooks-dir'] 79 | note_dir = notebooks_dir + '/' + noteid 80 | json_name = 'note.json' 81 | json_file = note_dir + '/' + json_name 82 | 83 | if not isdir(notebooks_dir): exit("Directory specified in --notebooks-dir doesn't exist (%s)" % notebooks_dir) 84 | if not isdir(note_dir): exit("Note directory doesn't exist (%s). Check if NoteID is correct" % note_dir) 85 | if not isfile(json_file): exit("%s file doesn't exist where expected: %s" % (json_name, json_file)) 86 | 87 | with open(json_file) as data_file: 88 | data = json.load(data_file) 89 | 90 | assert data['id'] == noteid, "Unexpected note id in " + json_file 91 | 92 | if args['extract']: 93 | 94 | fetch_intp = args['--fetch'] 95 | default_intp = args['--default-interpreter'] 96 | sqlc = args['--sqlc-var'] 97 | comments = not args['--no-comments'] 98 | 99 | if args['--imports']: 100 | from textwrap import dedent 101 | print(dedent(''' 102 | from pyspark.sql import HiveContext 103 | from pyspark import SparkConf, SparkContext 104 | 105 | # start spark application context with application name = note title 106 | conf = SparkConf().setAppName('%s') 107 | sc = SparkContext(conf=conf) 108 | 109 | # after Spark context started, reduce logging level to ERROR: 110 | log4j = sc._jvm.org.apache.log4j 111 | log4j.LogManager.getRootLogger().setLevel(log4j.Level.ERROR) 112 | 113 | ## sqlc = HiveContext(sc) # this should be part of the notebook 114 | 115 | ''' % data['name'])) 116 | 117 | print "## Fetching %s code from Zeppelin note '%s', id %s" % (fetch_intp, data['name'], noteid) 118 | 119 | for p in data['paragraphs']: 120 | 121 | print 122 | 123 | if comments: 124 | print '## Paragraph %s:' % p['id'] 125 | 126 | # Check if paragraph is disabled 127 | enabled = p['config'].get('enabled', True) # by default assume enabled 128 | if not enabled: 129 | if args['--get-disabled']: 130 | if comments: print "## paragraph '%s' is disabled but will run because of --get-disabled" % p['id'] 131 | else: 132 | if comments: print "## paragraph '%s' is disabled and will be skipped" % p['id'] 133 | continue 134 | 135 | # print title 136 | title = p.get('title', '') 137 | if title and comments: 138 | print "## **** %s ****" % title 139 | 140 | text = p.get('text', '') # empty text '' is the default 141 | 142 | # Zeppelin thing - 'scala' is default irrespective of default interpreter setting 143 | # para_type = p['config'].get('editorMode', 'ace/mode/scala').split('/')[2] # takes last word 144 | # So editorMode is misleading and below code will infer type from the text instead.. 145 | 146 | # Detect paragraph type 147 | para_type_re = '^\s*%(\w+)\s+' 148 | m = re.match(para_type_re, text, flags=re.MULTILINE) 149 | if m: 150 | para_type = m.group(1) 151 | text = re.sub(para_type_re, '', text, count=1, flags=re.MULTILINE) # remove para type from the code too 152 | else: 153 | para_type = default_intp 154 | 155 | # if comments: print "## (paragraph type is %s)" % para_type 156 | 157 | if re.match('^\s*$', text): 158 | if comments: print "## (no code)" 159 | continue 160 | 161 | # Now we can append code of the current paragraph 162 | if para_type == fetch_intp: 163 | 164 | # first, check if it's a no-op command that we should skip 165 | if args['--skip-noop'] and fetch_intp=='pyspark' and re.match( 166 | r''' \A \s * # any whitespace before code 167 | ( \w + \. (show|printSchema) \( \) # SKIP dataframe.show() or printSchema 168 | | z \. show \( . + \) # OR z.show( dataframe ) 169 | | print \s * \(? .+ \)? # or a print statement 170 | ) 171 | \s * \Z # any whitespace before end of the code 172 | ''' 173 | , text 174 | , flags=re.VERBOSE 175 | ): 176 | if comments: print("## skipping no-op code because of --skip-noop") 177 | continue # skip to next paragraph 178 | 179 | print text 180 | 181 | elif para_type == 'sql': 182 | 183 | # first, check if it's a pure select statement that we should skip 184 | if args['--skip-select'] and re.match( 185 | r'''^( \s* # optional comment block: 186 | -- # sql syntax comment - double hyphen 187 | .* \n # commented line up to newline is ignored 188 | | \s* \n # OR empty line - since commented and empty lines can interleave 189 | )* # 0 or more commented or empty lines 190 | \s* # ignore spaces before `select` keyword 191 | (SELECT|explain) \s+ # `select` or `explain` keyword followed by whitespace 192 | ''' 193 | , text 194 | , flags=re.IGNORECASE|re.VERBOSE 195 | ): 196 | if comments: print("## skipping SELECT/explain statement because of --skip-select") 197 | continue # skip to next paragraph 198 | 199 | print "%s.sql(''' %s ''')" % (sqlc, text) 200 | 201 | elif para_type == 'md': 202 | 203 | text = re.sub('^', '# ', text, flags=re.MULTILINE) # show MarkDown as-is, just commented 204 | print text 205 | 206 | elif para_type == 'sh': 207 | 208 | # TODO: 209 | 210 | continue 211 | 212 | else: 213 | if comments: print("## skipping non-target language '%s'" % para_type) 214 | 215 | exit() 216 | --------------------------------------------------------------------------------