├── .gitignore ├── tests ├── __init__.py ├── test_depsgraph.py ├── test_depstrace.py └── test_parsers.py ├── README ├── COPYING ├── depstrace.py └── depslint.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | .files 3 | GPATH 4 | GRTAGS 5 | GTAGS 6 | ID 7 | TAGS 8 | tags 9 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | from .test_depstrace import * 2 | from .test_parsers import * 3 | from .test_depsgraph import * 4 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | Depslint is a tool for analyzing and verifying build dependencies. It 5 | was written to assist switching a large project build system from an 6 | existing complex legacy solution to ninja. 7 | 8 | The tool works by running a complete build under 'strace', 9 | recording relevant system calls to discover real 'dependencies', 10 | and then verifying that dependencies defined in the ninja manifest and 11 | the corresponding depfiles are correct. 12 | 13 | Purpose 14 | ============ 15 | Verify a build manifest for: 16 | 17 | * Correctness of the 'first build' (e.g., building following a 18 | pristine checkout, before any dep files are available). This is 19 | mostly useful for projects with autogenerated headers, to verify 20 | that build rules enforce autogenerated files to be created before 21 | including source files are compiled. 22 | 23 | * Correctness of the 'incremental build' (e.g., rebuild following a 24 | modification of any of the files under the build tree when the tree 25 | already contains some of the build products and dep files). 26 | 27 | * Redundant dependency specifications in the manifest making incremental 28 | rebuilds longer than needed. 29 | 30 | * More.. 31 | 32 | Quick usage guide 33 | ================= 34 | cd ~/myproject 35 | ninja -t clean 36 | git clean -fdX 37 | 38 | # You can substitute the above with a suitable command cleaning your 39 | # build tree. 40 | 41 | ~/depslint/depstrace.py ninja 42 | 43 | # The above will build your project under 'strace', parse output and 44 | # store discovered dependency information in 'deps.lst'. The complete 45 | # strace output will also be stored in 'strace_log.txt' for debugging, 46 | # if necessary. 47 | 48 | ~/depslint/depslint.py --stats=all 49 | 50 | # Parse build.ninja, load depfiles, build dependency graph and 51 | # validate it using 'real' dependency information in 'deps.lst'. If 52 | # '.depslint' file is available, it can be used to set 53 | # 'IGNORED_SUFFICES' and 'IMPLICIT_DEPS_MATCHERS' to ignore certain 54 | # dependecy issues. Tool output with extra details is stored to 55 | # 'depslint.log'. 56 | 57 | Documentation 58 | ============= 59 | The documentation will eventually appear under doc/. 60 | 61 | Hacking & Testing 62 | ================= 63 | 64 | There are unit-tests available under 'tests' directory. You can run 65 | these as: 'python -m unittest tests' under project root. If you wish 66 | to contribute changes, it would be best if you will add supporting 67 | tests as well. 68 | -------------------------------------------------------------------------------- /tests/test_depsgraph.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # Copyright 2013 Maxim Kalaev 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import unittest 18 | import depslint 19 | 20 | from depslint import BuildRule as BR 21 | 22 | # Silence all output 23 | depslint._verbose = -1 24 | 25 | def e2t(edges): 26 | """Edges to targets""" 27 | return depslint.sets_union(e.provides for e in edges) 28 | 29 | class DepsGraphTestBasics(unittest.TestCase): 30 | build_rules = [ 31 | BR(['outA'], ['A1', 'A2'], order_only_deps=['unused1']), 32 | BR(['outB'], ['B1', 'AB'], order_only_deps=['unused2']), 33 | BR(['A2'], ['AB']), 34 | ] 35 | def setUp(self): 36 | self.g = depslint.Graph(self.build_rules, [], is_clean_build_graph = False) 37 | 38 | def testBuild(self): 39 | g = self.g 40 | self.assertItemsEqual(g.target2edge.keys(), ["outA", "outB", "A2"]) 41 | self.assertItemsEqual(g.source2edges.keys(), ["A1", "A2", "B1", "AB"]) 42 | self.assertItemsEqual(g.duplicate_target_rules, []) 43 | 44 | def testTopTargets(self): 45 | g = self.g 46 | self.assertItemsEqual(g.top_targets, ["outA", "outB"]) 47 | 48 | def testRanks(self): 49 | g = self.g 50 | self.assertItemsEqual(g.targets_by_ranks.keys(), [0, 1, 2]) 51 | self.assertItemsEqual(g.targets_by_ranks[0], ["A1", "B1", "AB"]) 52 | self.assertItemsEqual(g.targets_by_ranks[1], ["A2", "outB"]) 53 | self.assertItemsEqual(g.targets_by_ranks[2], ["outA"]) 54 | 55 | def testIsStatic(self): 56 | g = self.g 57 | self.assertTrue(g.is_static_target('A1')) 58 | self.assertTrue(g.is_static_target('B1')) 59 | self.assertTrue(g.is_static_target('AB')) 60 | self.assertFalse(g.is_static_target('A2')) 61 | self.assertFalse(g.is_static_target('outA')) 62 | self.assertFalse(g.is_static_target('outB')) 63 | 64 | def testGetPathToTop(self): 65 | g = self.g 66 | self.assertEqual(g.get_any_path_to_top('A1'), ['outA']) 67 | self.assertEqual(g.get_any_path_to_top('A2'), ['outA']) 68 | self.assertEqual(g.get_any_path_to_top('B1'), ['outB']) 69 | self.assertIn(g.get_any_path_to_top('AB'), (['outB'], ['A2','outA'])) 70 | 71 | def testDepsClosure(self): 72 | g = self.g 73 | self.assertItemsEqual(g.get_deps_closure('AB'), []) 74 | self.assertItemsEqual(g.get_deps_closure('A1'), []) 75 | self.assertItemsEqual(g.get_deps_closure('B1'), []) 76 | self.assertItemsEqual(g.get_deps_closure('A2'), ["AB"]) 77 | self.assertItemsEqual(g.get_deps_closure('outA'), ["A1", "A2", "AB"]) 78 | self.assertItemsEqual(g.get_deps_closure('outB'), ["B1", "AB"]) 79 | 80 | def testProductsClosures(self): 81 | g = self.g 82 | self.assertItemsEqual(e2t(g.get_product_rules_closure('A1')), ['outA']) 83 | self.assertItemsEqual(e2t(g.get_product_rules_closure('A2')), ['outA']) 84 | self.assertItemsEqual(e2t(g.get_product_rules_closure('B1')), ['outB']) 85 | self.assertItemsEqual(e2t(g.get_product_rules_closure('AB')), ['outB', 'A2', 'outA']) 86 | 87 | class DepsGraphTestWanted(unittest.TestCase): 88 | build_rules = [ 89 | BR(['outA'], ['A1', 'A2']), 90 | BR(['outB'], ['B1', 'AB']), 91 | BR(['A2'], ['AB']), 92 | ] 93 | def setUp(self): 94 | self.g = depslint.Graph(self.build_rules, targets_wanted=['outA'], is_clean_build_graph = False) 95 | 96 | def testBuild(self): 97 | g = self.g 98 | self.assertItemsEqual(g.target2edge.keys(), ["outA", "outB", "A2"]) 99 | self.assertItemsEqual(g.source2edges.keys(), ["A1", "A2", "B1", "AB"]) 100 | self.assertItemsEqual(g.duplicate_target_rules, []) 101 | 102 | def testTopTargets(self): 103 | g = self.g 104 | self.assertItemsEqual(g.top_targets, ["outA"]) 105 | 106 | def testRanks(self): 107 | g = self.g 108 | self.assertItemsEqual(g.targets_by_ranks.keys(), [0, 1, 2]) 109 | self.assertItemsEqual(g.targets_by_ranks[0], ["A1", "AB"]) 110 | self.assertItemsEqual(g.targets_by_ranks[1], ["A2"]) 111 | self.assertItemsEqual(g.targets_by_ranks[2], ["outA"]) 112 | 113 | def testIsStatic(self): 114 | g = self.g 115 | self.assertTrue(g.is_static_target('A1')) 116 | self.assertTrue(g.is_static_target('AB')) 117 | self.assertFalse(g.is_static_target('A2')) 118 | self.assertFalse(g.is_static_target('outA')) 119 | # Not wanted 120 | self.assertFalse(g.is_static_target('outB')) # Not static & not wanted 121 | self.assertFalse(g.is_static_target('B1')) # Static, not wanted 122 | 123 | def testGetPathToTop(self): 124 | g = self.g 125 | self.assertEqual(g.get_any_path_to_top('A1'), ['outA']) 126 | self.assertEqual(g.get_any_path_to_top('A2'), ['outA']) 127 | self.assertEqual(g.get_any_path_to_top('AB'), ['A2', 'outA']) 128 | # Not wanted 129 | with self.assertRaises(Exception): 130 | g.get_any_path_to_top('B1') 131 | 132 | def testDepsClosure(self): 133 | g = self.g 134 | self.assertItemsEqual(g.get_deps_closure('AB'), []) 135 | self.assertItemsEqual(g.get_deps_closure('A1'), []) 136 | self.assertItemsEqual(g.get_deps_closure('A2'), ["AB"]) 137 | self.assertItemsEqual(g.get_deps_closure('outA'), ["A1", "A2", "AB"]) 138 | # Not wanted 139 | with self.assertRaises(Exception): 140 | g.get_deps_closure('outB') 141 | with self.assertRaises(Exception): 142 | g.get_deps_closure('B1') 143 | 144 | def testProductsClosures(self): 145 | g = self.g 146 | self.assertItemsEqual(e2t(g.get_product_rules_closure('A1')), ['outA']) 147 | self.assertItemsEqual(e2t(g.get_product_rules_closure('A2')), ['outA']) 148 | self.assertItemsEqual(e2t(g.get_product_rules_closure('AB')), ['A2', 'outA']) 149 | # Not wanted 150 | with self.assertRaises(Exception): 151 | g.get_product_rules_closure('B1') 152 | 153 | class DepsGraphWithPhony(unittest.TestCase): 154 | build_rules = [ 155 | BR(['outA'], ['A1', 'A2']), 156 | BR(['A1'], [], rule_name="phony"), 157 | BR(['A2'], ['AB']), 158 | BR(['AB'], [], rule_name="phony"), 159 | BR(['aliasA2'], ['A2'], rule_name="phony"), 160 | ] 161 | def setUp(self): 162 | self.g = depslint.Graph(self.build_rules, targets_wanted=[], is_clean_build_graph = False) 163 | 164 | def testBuild(self): 165 | g = self.g 166 | self.assertItemsEqual(g.target2edge.keys(), ["outA", "aliasA2", "A2", "A1", "AB"]) 167 | self.assertItemsEqual(g.source2edges.keys(), ["A1", "A2", "AB"]) 168 | self.assertItemsEqual(g.duplicate_target_rules, []) 169 | 170 | def testTopTargets(self): 171 | g = self.g 172 | self.assertItemsEqual(g.top_targets, ["outA", "aliasA2"]) 173 | 174 | def testRanks(self): 175 | g = self.g 176 | self.assertItemsEqual(g.targets_by_ranks.keys(), [0, 1, 2]) 177 | self.assertItemsEqual(g.targets_by_ranks[0], ["A1", "AB"]) 178 | self.assertItemsEqual(g.targets_by_ranks[1], ["A2", "aliasA2"]) 179 | self.assertItemsEqual(g.targets_by_ranks[2], ["outA"]) 180 | 181 | def testIsStatic(self): 182 | g = self.g 183 | self.assertTrue(g.is_static_target('A1')) 184 | self.assertTrue(g.is_static_target('AB')) 185 | self.assertFalse(g.is_static_target('A2')) 186 | self.assertFalse(g.is_static_target('outA')) 187 | self.assertFalse(g.is_static_target('aliasA2')) 188 | 189 | def testResolvePhony(self): 190 | g = self.g 191 | self.assertItemsEqual(g.resolve_phony(['A1']), ['A1']) 192 | self.assertItemsEqual(g.resolve_phony(['aliasA2', 'A1']), ['A2', 'A1']) 193 | self.assertItemsEqual(g.resolve_phony(['aliasA2', 'A1']), ['A2', 'A1']) 194 | 195 | def testGetPathToTop(self): 196 | g = self.g 197 | self.assertIn(g.get_any_path_to_top('A1'), (['outA'],)) 198 | self.assertIn(g.get_any_path_to_top('A2'), (['outA'],['aliasA2'])) 199 | self.assertIn(g.get_any_path_to_top('AB'), (['A2', 'outA'], ['A2', 'aliasA2'])) 200 | self.assertEqual(g.get_any_path_to_top('outA'), []) 201 | self.assertEqual(g.get_any_path_to_top('aliasA2'), []) 202 | 203 | def testDepsClosure(self): 204 | g = self.g 205 | self.assertItemsEqual(g.get_deps_closure('AB'), []) 206 | self.assertItemsEqual(g.get_deps_closure('A1'), []) 207 | self.assertItemsEqual(g.get_deps_closure('A2'), ['AB']) 208 | self.assertItemsEqual(g.get_deps_closure('outA'), ['A1', 'A2', 'AB']) 209 | self.assertItemsEqual(g.get_deps_closure('aliasA2'), ['A2', 'AB']) 210 | 211 | def testProductsClosures(self): 212 | g = self.g 213 | self.assertItemsEqual(e2t(g.get_product_rules_closure('A1')), ['outA']) 214 | self.assertItemsEqual(e2t(g.get_product_rules_closure('A2')), ['outA', 'aliasA2']) 215 | self.assertItemsEqual(e2t(g.get_product_rules_closure('AB')), ['A2', 'outA', 'aliasA2']) 216 | 217 | class DepsGraphMultipleOutputs(unittest.TestCase): 218 | # TODO: highly relevant to trace graphs 219 | pass 220 | -------------------------------------------------------------------------------- /tests/test_depstrace.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # Copyright 2013 Maxim Kalaev 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import unittest 18 | import depstrace 19 | 20 | # Silence all output 21 | depstrace._verbose = -1 22 | 23 | class DepsTraceTests(unittest.TestCase): 24 | def testDepStraceLogIterator(self): 25 | tracefile = """5082 execve("/bin/ninja", [...], [/* 45 vars */]) = 0 26 | 5082 chdir("/home/build/") = 0 27 | 5082 rename("version_info/contained_commits~", "version_info/contained_commits") = 0 28 | 5082 open("tst.c", O_RDONLY) = 5 29 | 5082 open("tst.o", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE) = 6 30 | 5082 symlink(".build/install", "install") = 0 31 | 5086 execve("gcc", [...], [/* 45 vars */] 32 | 5085 open("tst.h", O_RDONLY|O_NOCTTY|O_LARGEFILE 33 | 5082 clone(child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0xb77c8768) = 5555 34 | 5082 clone( 35 | 4294 vfork( 36 | 5086 <... execve resumed> ) = 0 37 | 5085 <... open resumed> ) = 7 38 | 4294 <... vfork resumed> ) = 4295 39 | 5082 <... clone resumed> child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0x7f1483d089f0) = ? ERESTARTNOINTR (To be restarted) 40 | 4120 wait4(-1, 41 | 4120 <... wait4 resumed> [{WIFEXITED(s) && WEXITSTATUS(s) == 0}], WNOHANG, NULL) = 4180 42 | 4181 stat("/bin/ls", {st_mode=S_IFREG|0755, st_size=97736, ...}) = 0 43 | 5085 exit_group(0) = ? 44 | """ 45 | tracer = depstrace.DepsTracer() 46 | tokens = list(tracer._strace_log_iter(tracefile.splitlines(True))) 47 | self.assertItemsEqual(tracer.unmatched_lines, []) 48 | 49 | # execve 50 | self.assertEqual(tokens.pop(0), ('5082', 'execve', '0', '/bin/ninja', '[...]')) 51 | # chdir 52 | self.assertEqual(tokens.pop(0), ('5082', 'chdir', '0', '/home/build/', None)) 53 | # rename 54 | self.assertEqual(tokens.pop(0), ('5082', 'rename', '0', 'version_info/contained_commits~', 55 | 'version_info/contained_commits')) 56 | # open one flag 57 | self.assertEqual(tokens.pop(0), ('5082', 'open', '5', 'tst.c', 'O_RDONLY')) 58 | # open flags OR-ed 59 | self.assertEqual(tokens.pop(0), ('5082', 'open', '6', 'tst.o', 'O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE')) 60 | # symlink 61 | self.assertEqual(tokens.pop(0), ('5082', 'symlink', '0', '.build/install', 'install')) 62 | # clone 63 | self.assertEqual(tokens.pop(0), ('5082', 'clone', '5555', 'child_stack=0', 64 | 'flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD')) 65 | # execve unfinished and then resumed 66 | self.assertEqual(tokens.pop(0), ('5086', 'execve', '0', 'gcc', '[...]')) 67 | # open unfinished and then resumed 68 | self.assertEqual(tokens.pop(0), ('5085', 'open', '7', 'tst.h', 'O_RDONLY|O_NOCTTY|O_LARGEFILE')) 69 | # vfork unfinished and then resumed 70 | self.assertEqual(tokens.pop(0), ('4294', 'vfork', '4295', None, None)) 71 | # clone non-return (ERESTARTNOINTR) 72 | self.assertEqual(tokens.pop(0), ('5082', 'clone', '?', 'child_stack=0', 73 | 'flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD')) 74 | 75 | # Complex parameter expression 76 | self.assertEqual(tokens.pop(0), ('4120', 'wait4', '4180', '-1', '[{WIFEXITED(s) && WEXITSTATUS(s) == 0}]')) 77 | 78 | # Another complex parameter expression 79 | self.assertEqual(tokens.pop(0), ('4181', 'stat', '0', '/bin/ls', '{st_mode=S_IFREG|0755, st_size=97736, ...}')) 80 | 81 | # non-return 82 | self.assertEqual(tokens.pop(0), ('5085', 'exit_group', '?', '0', None)) 83 | 84 | # Ensure all the tockens were fetched 85 | self.assertEqual(0, len(tokens)) 86 | 87 | def testDepStraceTrackTwoRules(self): 88 | tracefile = """5082 execve("/home/maximk/programs/bin/ninja", [...], [/* 45 vars */]) = 0 89 | 5082 open("tst.d", O_RDONLY) = 5 90 | 5082 open("tst2.d", O_RDONLY) = 5 91 | 5082 clone(child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0xb77c8768) = 5085 92 | 5082 clone(child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0xb77c8768) = 5086 93 | 5085 execve("/usr/bin/gcc", [...], [/* 45 vars */] 94 | 5086 execve("gcc", [...], [/* 45 vars */] 95 | 5085 <... execve resumed> ) = 0 96 | 5086 <... execve resumed> ) = 0 97 | 5085 open("tst.c", O_RDONLY|O_NOCTTY|O_LARGEFILE 98 | 5086 open("tst.c", O_RDONLY|O_NOCTTY|O_LARGEFILE 99 | 5085 <... open resumed> ) = 4 100 | 5086 <... open resumed> ) = 4 101 | 5085 open("tst.h", O_RDONLY|O_NOCTTY|O_LARGEFILE 102 | 5086 open("tst.h", O_RDONLY|O_NOCTTY|O_LARGEFILE 103 | 5085 <... open resumed> ) = 5 104 | 5086 <... open resumed> ) = 5 105 | 5085 open("tst2.d", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 4 106 | 5086 open("tst.d", O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 4 107 | 5086 open("tst", O_RDWR|O_CREAT|O_TRUNC|O_LARGEFILE, 0666 108 | 5086 <... open resumed> ) = 5 109 | 5085 open("tst2", O_RDWR|O_CREAT|O_TRUNC|O_LARGEFILE, 0666) = 4 110 | 5086 open("tst", O_RDWR|O_LARGEFILE) = 13 111 | 5085 open("tst2", O_RDWR|O_LARGEFILE) = 13 112 | """ 113 | tracer = depstrace.DepsTracer(None) 114 | rules = tracer.parse_trace(tracefile.splitlines(True)) 115 | self.assertEqual(2, len(rules)) 116 | 117 | r = rules.pop(0) 118 | self.assertItemsEqual(r.deps, ['tst.c', 'tst.h']) 119 | self.assertItemsEqual(r.outputs, ['tst2', 'tst2.d']) 120 | self.assertItemsEqual(r.pids, ['5085']) 121 | self.assertEqual(r.lineno, 4) 122 | 123 | r = rules.pop(0) 124 | self.assertItemsEqual(r.deps, ['tst.c', 'tst.h', 'gcc']) 125 | self.assertItemsEqual(r.outputs, ['tst', 'tst.d']) 126 | self.assertItemsEqual(r.pids, ['5086']) 127 | self.assertEqual(r.lineno, 5) 128 | 129 | self.assertItemsEqual(tracer.unmatched_lines, []) 130 | 131 | def testDepStraceUnmatched(self): 132 | tracefile = """5082 execve("/home/maximk/programs/bin/ninja", [...], [/* 45 vars */]) = 0 133 | 5082 clone(child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0xb77c8768) = 5085 134 | 5085 execve("tst.sh", [...], [/* 45 vars */]) = 0 135 | 5085 open("tst", O_RDWR) = 4 136 | 5085 unmatchedop("tst.c") = 5 137 | """ 138 | tracer = depstrace.DepsTracer() 139 | rules = tracer.parse_trace(tracefile.splitlines(True)) 140 | self.assertEqual(1, len(rules)) 141 | 142 | r = rules.pop(0) 143 | self.assertItemsEqual(r.deps, ['tst.sh']) 144 | self.assertItemsEqual(r.outputs, ['tst']) 145 | self.assertItemsEqual(r.pids, ['5085']) 146 | self.assertEqual(r.lineno, 2) 147 | 148 | self.assertItemsEqual(tracer.unmatched_lines, ['5085 unmatchedop("tst.c") = 5']) 149 | 150 | def testDepStraceDoubleResumedBug(self): 151 | tracefile = """5082 execve("/home/maximk/programs/bin/ninja", [...], [/* 45 vars */]) = 0 152 | 5082 clone(child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0xb77c8768) = 21316 153 | 21316 execve("/usr/lib/gcc/x86_64-linux-gnu/4.4.3/cc1plus", [...], [/* 63 vars */] 154 | 21316 <... execve resumed> ) = 0 155 | 21316 open("../../wtf/Assertions.h", O_RDONLY|O_NOCTTY 156 | 21316 <... open resumed> ) = 4 157 | 21316 <... open resumed> ) = 0 158 | """ 159 | tracer = depstrace.DepsTracer(build_dir="/xxx/yyy") 160 | rules = tracer.parse_trace(tracefile.splitlines(True)) 161 | self.assertEqual(1, len(rules)) 162 | 163 | r = rules.pop(0) 164 | self.assertItemsEqual(r.deps, ['../../wtf/Assertions.h']) 165 | self.assertItemsEqual(r.outputs, []) 166 | self.assertItemsEqual(r.pids, ['21316']) 167 | self.assertEqual(r.lineno, 2) 168 | 169 | self.assertItemsEqual(tracer.unmatched_lines, ['21316 <... open resumed> ) = 0']) 170 | 171 | def testDepStraceDoubleUnfinishedBug(self): 172 | tracefile = """5082 execve("/home/maximk/programs/bin/ninja", [...], [/* 45 vars */]) = 0 173 | 5082 clone(child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0xb77c8768) = 21316 174 | 21316 execve("/usr/lib/gcc/x86_64-linux-gnu/4.4.3/cc1plus", [...], [/* 63 vars */] 175 | 21316 <... execve resumed> ) = 0 176 | 21316 open("../../wtf/Assertions.h", O_RDONLY|O_NOCTTY 177 | 21316 open("Unexpected.h", O_RDONLY|O_NOCTTY 178 | 21316 <... open resumed> ) = 4 179 | """ 180 | tracer = depstrace.DepsTracer(build_dir="/xxx/yyy") 181 | rules = tracer.parse_trace(tracefile.splitlines(True)) 182 | self.assertEqual(1, len(rules)) 183 | 184 | r = rules.pop(0) 185 | self.assertIn(r.deps.pop(), ['Unexpected.h', '../../wtf/Assertions.h']) 186 | self.assertItemsEqual(r.outputs, []) 187 | self.assertItemsEqual(r.pids, ['21316']) 188 | self.assertEqual(r.lineno, 2) 189 | 190 | self.assertItemsEqual(tracer.unmatched_lines, ['21316 open("Unexpected.h", O_RDONLY|O_NOCTTY ']) 191 | 192 | def testDepStraceExcessiveUnfinishedBug(self): 193 | #TODO 194 | tracefile = """5082 execve("/home/maximk/programs/bin/ninja", [...], [/* 45 vars */]) = 0 195 | 5082 clone(child_stack=0, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0xb77c8768) = 21316 196 | 21316 execve("/usr/lib/gcc/x86_64-linux-gnu/4.4.3/cc1plus", [...], [/* 63 vars */] 197 | 21316 <... execve resumed> ) = 0 198 | 21316 open("../../wtf/Assertions.h", O_RDONLY|O_NOCTTY 199 | """ 200 | tracer = depstrace.DepsTracer(build_dir="/xxx/yyy") 201 | rules = tracer.parse_trace(tracefile.splitlines(True)) 202 | self.assertEqual(1, len(rules)) 203 | 204 | r = rules.pop(0) 205 | self.assertItemsEqual(r.deps, []) 206 | self.assertItemsEqual(r.outputs, []) 207 | self.assertItemsEqual(r.pids, ['21316']) 208 | self.assertEqual(r.lineno, 2) 209 | 210 | self.assertItemsEqual(tracer.unmatched_lines, ['21316 open("../../wtf/Assertions.h", O_RDONLY|O_NOCTTY ']) 211 | 212 | def testDepStraceTrackChdir(self): 213 | #TODO 214 | tracefile = """""" 215 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2010 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /tests/test_parsers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # Copyright 2013 Maxim Kalaev 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import unittest 18 | import depslint 19 | 20 | # Silence all output 21 | depslint._verbose = -1 22 | 23 | class NinjaManifestParserIOHooked(depslint.NinjaManifestParser): 24 | def __init__(self, manifest, files_dict={}): 25 | self.files = files_dict 26 | super(NinjaManifestParserIOHooked, self).__init__(manifest) 27 | 28 | def _read_depfile(self, path): 29 | return self.files.get(path) 30 | 31 | class NinjaManifestParserTests(unittest.TestCase): 32 | def testBuildRulesParse(self): 33 | manifest = """ 34 | rule RULE 35 | command = cc $in -o $out 36 | 37 | build out1 out2: RULE in1 in2 | ein1 ein2 || oin1 oin2 38 | 39 | build out3: RULE in2 | in1 40 | build out4: RULE || oin3 41 | build out5: RULE | in4 42 | build out6: RULE in5 43 | build out7: RULE | || in6 44 | build out8: RULE | || 45 | build out9: RULE || 46 | build outA: RULE | 47 | """ 48 | parser = NinjaManifestParserIOHooked(manifest.splitlines(True)) 49 | edges = list(parser.iterate_target_rules()) 50 | 51 | e = edges.pop(0) 52 | self.assertItemsEqual(e.targets,["out1", "out2"]) 53 | self.assertItemsEqual(e.deps, ["in1", "in2", "ein1", "ein2"]) 54 | self.assertItemsEqual(e.order_only_deps, ["oin1", "oin2"]) 55 | self.assertEqual(e.rule_name, "RULE") 56 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "in1 in2") 57 | 58 | e = edges.pop(0) 59 | self.assertItemsEqual(e.targets,["out3"]) 60 | self.assertItemsEqual(e.deps, ["in1", "in2"]) 61 | self.assertItemsEqual(e.order_only_deps, []) 62 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "in2") 63 | self.assertEqual(parser._eval_edge_attribute(e, "out"), "out3") 64 | self.assertEqual(e.rule_name, "RULE") 65 | 66 | e = edges.pop(0) 67 | self.assertItemsEqual(e.targets,["out4"]) 68 | self.assertItemsEqual(e.deps, []) 69 | self.assertItemsEqual(e.order_only_deps, ["oin3"]) 70 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "") 71 | self.assertEqual(parser._eval_edge_attribute(e, "out"), "out4") 72 | self.assertEqual(e.rule_name, "RULE") 73 | 74 | e = edges.pop(0) 75 | self.assertItemsEqual(e.targets,["out5"]) 76 | self.assertItemsEqual(e.deps, ["in4"]) 77 | self.assertItemsEqual(e.order_only_deps, []) 78 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "") 79 | self.assertEqual(parser._eval_edge_attribute(e, "out"), "out5") 80 | self.assertEqual(e.rule_name, "RULE") 81 | 82 | e = edges.pop(0) 83 | self.assertItemsEqual(e.targets,["out6"]) 84 | self.assertItemsEqual(e.deps, ["in5"]) 85 | self.assertItemsEqual(e.order_only_deps, []) 86 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "in5") 87 | self.assertEqual(parser._eval_edge_attribute(e, "out"), "out6") 88 | self.assertEqual(e.rule_name, "RULE") 89 | 90 | e = edges.pop(0) 91 | self.assertItemsEqual(e.targets,["out7"]) 92 | self.assertItemsEqual(e.deps, []) 93 | self.assertItemsEqual(e.order_only_deps, ["in6"]) 94 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "") 95 | self.assertEqual(parser._eval_edge_attribute(e, "out"), "out7") 96 | self.assertEqual(e.rule_name, "RULE") 97 | 98 | e = edges.pop(0) 99 | self.assertItemsEqual(e.targets,["out8"]) 100 | self.assertItemsEqual(e.deps, []) 101 | self.assertItemsEqual(e.order_only_deps, []) 102 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "") 103 | self.assertEqual(parser._eval_edge_attribute(e, "out"), "out8") 104 | self.assertEqual(e.rule_name, "RULE") 105 | 106 | e = edges.pop(0) 107 | self.assertItemsEqual(e.targets,["out9"]) 108 | self.assertItemsEqual(e.deps, []) 109 | self.assertItemsEqual(e.order_only_deps, []) 110 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "") 111 | self.assertEqual(parser._eval_edge_attribute(e, "out"), "out9") 112 | self.assertEqual(e.rule_name, "RULE") 113 | 114 | e = edges.pop(0) 115 | self.assertItemsEqual(e.targets,["outA"]) 116 | self.assertItemsEqual(e.deps, []) 117 | self.assertItemsEqual(e.order_only_deps, []) 118 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "") 119 | self.assertEqual(parser._eval_edge_attribute(e, "out"), "outA") 120 | self.assertEqual(e.rule_name, "RULE") 121 | 122 | self.assertItemsEqual(edges, list()) 123 | 124 | def testManifestPathsNormalization(self): 125 | manifest = """ 126 | # Paths normalization 127 | build a/../outB: phony ./a/b | a//c || a/../a/d 128 | """ 129 | parser = NinjaManifestParserIOHooked(manifest.splitlines(True)) 130 | edges = list(parser.iterate_target_rules()) 131 | 132 | e = edges.pop(0) 133 | self.assertItemsEqual(e.targets,["outB"]) 134 | self.assertItemsEqual(e.deps, ["a/b", "a/c"]) 135 | self.assertItemsEqual(e.order_only_deps, ["a/d"]) 136 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "a/b") 137 | self.assertEqual(parser._eval_edge_attribute(e, "out"), "outB") 138 | self.assertEqual(e.rule_name, "phony") 139 | 140 | self.assertItemsEqual(edges, list()) 141 | 142 | def testAtributes(self): 143 | manifest = """ 144 | v1 = out 145 | v2 = in 146 | v3 = g 147 | 148 | rule RULE 149 | command = cc $in -o $out 150 | v3 = r 151 | 152 | # Using variables in build contructs, with and w/o {} 153 | # Build scope overrides definitions from global and rule scopes, 154 | # including build block filenames. 155 | build ${v1}: RULE $v2 156 | v1 = out1 157 | v2 = in1 158 | v4 = $v1/$v2 $v3 159 | 160 | # Embedding varibles in a string 161 | build ${v1}2: RULE prefix$v2.ext 162 | 163 | # Rule scope and global scope 164 | build ${v1}3: phony 165 | """ 166 | parser = NinjaManifestParserIOHooked(manifest.splitlines(True)) 167 | edges = list(parser.iterate_target_rules()) 168 | 169 | e = edges.pop(0) 170 | self.assertItemsEqual(e.targets,["out1"]) 171 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "in1") 172 | self.assertEqual(parser._eval_edge_attribute(e, "v1"), "out1") 173 | self.assertEqual(parser._eval_edge_attribute(e, "v2"), "in1") 174 | self.assertEqual(parser._eval_edge_attribute(e, "v3"), "r") 175 | self.assertEqual(parser._eval_edge_attribute(e, "v4"), "out1/in1 r") 176 | 177 | e = edges.pop(0) 178 | self.assertItemsEqual(e.targets,["out2"]) 179 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "prefixin.ext") 180 | self.assertEqual(parser._eval_edge_attribute(e, "v1"), "out") 181 | self.assertEqual(parser._eval_edge_attribute(e, "v2"), "in") 182 | self.assertEqual(parser._eval_edge_attribute(e, "v3"), "r") 183 | 184 | e = edges.pop(0) 185 | self.assertItemsEqual(e.targets,["out3"]) 186 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "") 187 | self.assertEqual(parser._eval_edge_attribute(e, "v1"), "out") 188 | self.assertEqual(parser._eval_edge_attribute(e, "v2"), "in") 189 | self.assertEqual(parser._eval_edge_attribute(e, "v3"), "g") 190 | 191 | self.assertItemsEqual(edges, list()) 192 | 193 | @unittest.skip("Fixme later") 194 | def testManifestRecursiveAttrs(self): 195 | manifest = """ 196 | # Recursive attribute redefinition 197 | v2 = in 198 | 199 | build out: phony $v2 200 | v2 = $v2.$v2 201 | """ 202 | parser = NinjaManifestParserIOHooked(manifest.splitlines(True)) 203 | edges = list(parser.iterate_target_rules()) 204 | 205 | e = edges.pop(0) 206 | self.assertItemsEqual(e.targets,["out"]) 207 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "in.in") 208 | self.assertEqual(parser._eval_edge_attribute(e, "v2"), "in.in") 209 | 210 | self.assertItemsEqual(edges, list()) 211 | 212 | def testDepfilesLoading(self): 213 | manifest = """ 214 | rule RULE 215 | depfile = $out.d 216 | 217 | build out1: RULE in1 | in2 || oin3 218 | 219 | # Empty depfile 220 | build out2: RULE || oin3 221 | 222 | # Missing depfile 223 | build out3: RULE 224 | """ 225 | files = ({"out1.d":"out1: din1 din2", 226 | "out2.d":"out2: "}) 227 | parser = NinjaManifestParserIOHooked(manifest.splitlines(True), files) 228 | edges = list(parser.iterate_target_rules()) 229 | 230 | e = edges.pop(0) 231 | self.assertItemsEqual(e.targets,["out1"]) 232 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "in1") 233 | self.assertEqual(parser._eval_edge_attribute(e, "depfile"), "out1.d") 234 | self.assertItemsEqual(e.deps, ["in1", "in2"]) 235 | self.assertItemsEqual(e.depfile_deps, ["din1", "din2"]) 236 | self.assertItemsEqual(e.order_only_deps, ["oin3"]) 237 | 238 | e = edges.pop(0) 239 | self.assertItemsEqual(e.targets,["out2"]) 240 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "") 241 | self.assertEqual(parser._eval_edge_attribute(e, "depfile"), "out2.d") 242 | self.assertItemsEqual(e.deps, []) 243 | self.assertItemsEqual(e.depfile_deps, []) 244 | self.assertItemsEqual(e.order_only_deps, ["oin3"]) 245 | 246 | e = edges.pop(0) 247 | self.assertItemsEqual(e.targets,["out3"]) 248 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "") 249 | self.assertEqual(parser._eval_edge_attribute(e, "depfile"), "out3.d") 250 | self.assertItemsEqual(e.deps, []) 251 | self.assertItemsEqual(e.depfile_deps, []) 252 | self.assertItemsEqual(e.order_only_deps, []) 253 | 254 | self.assertItemsEqual(edges, list()) 255 | 256 | class NinjaManifestParserEscapesTests(unittest.TestCase): 257 | @unittest.skip("Fixme later") 258 | def testEscapeUSD(self): 259 | manifest = """ 260 | v2 = in 261 | v3 = g 262 | 263 | # Escaping variables 264 | build $${v1}4: phony $$v2 265 | v3 = $$v3 266 | 267 | # Escaping long lines 268 | build out: phony $ 269 | in1 $ 270 | in2 271 | """ 272 | parser = NinjaManifestParserIOHooked(manifest.splitlines(True)) 273 | edges = list(parser.iterate_target_rules()) 274 | 275 | # Fix needed! 276 | e = edges.pop(0) 277 | self.assertItemsEqual(e.targets,["${v1}4"]) 278 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "$v2") 279 | self.assertEqual(parser._eval_edge_attribute(e, "v1"), "out") 280 | self.assertEqual(parser._eval_edge_attribute(e, "v2"), "in") 281 | self.assertEqual(parser._eval_edge_attribute(e, "v3"), "$v3") 282 | 283 | self.assertItemsEqual(edges, list()) 284 | 285 | def testEscapeCR(self): 286 | manifest = """ 287 | # Splitting long lines 288 | build out: phony $ 289 | in1 $ 290 | in2 291 | """ 292 | parser = NinjaManifestParserIOHooked(manifest.splitlines(True)) 293 | edges = list(parser.iterate_target_rules()) 294 | 295 | e = edges.pop(0) 296 | self.assertItemsEqual(e.targets,["out"]) 297 | self.assertItemsEqual(e.deps, ["in1", "in2"]) 298 | self.assertItemsEqual(e.order_only_deps, []) 299 | self.assertEqual(e.rule_name, "phony") 300 | self.assertEqual(parser._eval_edge_attribute(e, "in"), "in1 in2") 301 | self.assertItemsEqual(edges, list()) 302 | 303 | #TODO: rename edges to 'build rules' 304 | class NinjaManifestKeywords(unittest.TestCase): 305 | def setUp(self): 306 | manifest = """ 307 | ninja_required_version = 1.2 308 | 309 | pool mypool 310 | depth = 2 311 | 312 | rule RULE 313 | command = cc $in -o $out 314 | pool = otherpool 315 | 316 | build out1: RULE in1 317 | pool = mypool 318 | 319 | default out1 out$ 3 $ 320 | out-4 321 | """ 322 | self.parser = NinjaManifestParserIOHooked(manifest.splitlines(True)) 323 | self.edges = list(self.parser.iterate_target_rules()) 324 | 325 | def testPoolParse(self): 326 | e = self.edges.pop(0) 327 | self.assertItemsEqual(e.targets,["out1"]) 328 | self.assertItemsEqual(e.deps, ["in1"]) 329 | self.assertEqual(e.rule_name, "RULE") 330 | 331 | # Treating pool selection like a regular attribute. 332 | self.assertEqual(self.parser._eval_edge_attribute(e, "pool"), "mypool") 333 | 334 | def testDefaultParse(self): 335 | self.assertItemsEqual(self.parser.get_default_targets(), 336 | ['out1', 'out 3', 'out-4']) 337 | 338 | def testNinjaRequiredVersion(self): 339 | self.assertAlmostEqual(self.parser.ninja_required_version, 1.2) 340 | 341 | class DepfilesParsingTests(unittest.TestCase): 342 | def testDepfilesTypicalParse(self): 343 | depfile = """out1:""" 344 | parser = depslint.DepfileParser() 345 | targets, deps = parser.parse_depfile(depfile) 346 | self.assertItemsEqual(targets, ["out1"]) 347 | self.assertItemsEqual(deps, []) 348 | 349 | depfile = """out: in1 in2""" 350 | targets, deps = parser.parse_depfile(depfile) 351 | self.assertItemsEqual(targets, ["out"]) 352 | self.assertItemsEqual(deps, ["in1", "in2"]) 353 | 354 | depfile = r"""out: \ 355 | in1 \ 356 | in2""" 357 | targets, deps = parser.parse_depfile(depfile) 358 | self.assertItemsEqual(targets, ["out"]) 359 | self.assertItemsEqual(deps, ["in1", "in2"]) 360 | 361 | def testDepfilesWhitespacesAndSpecials(self): 362 | depfile = r""" 363 | out\ 1: in\ 1.h in\ 2.h \ 364 | in\3.h in\\4.h c:\ms(x86)\h @conf+-=.h""" 365 | parser = depslint.DepfileParser() 366 | targets, deps = parser.parse_depfile(depfile) 367 | self.assertItemsEqual(targets, ["out 1"]) 368 | self.assertItemsEqual(deps, ["in 1.h", "in 2.h", r"in\3.h", r"in\4.h", 369 | r"c:\ms(x86)\h", 370 | "@conf+-=.h"]) 371 | 372 | def testDepfilesMultitargets(self): 373 | depfile = r"""out\ 1 out\ 2: in\ 1.h in\ 2.h""" 374 | parser = depslint.DepfileParser() 375 | targets, deps = parser.parse_depfile(depfile) 376 | self.assertItemsEqual(targets, ["out 1", "out 2"]) 377 | self.assertItemsEqual(deps, ["in 1.h", "in 2.h"]) 378 | 379 | def testDepfilesPathsNormalization(self): 380 | depfile = r"""out//out1 ./out2: ./../in1.h /a/../in2.h""" 381 | parser = depslint.DepfileParser() 382 | targets, deps = parser.parse_depfile(depfile) 383 | self.assertItemsEqual(targets, ["out/out1", "out2"]) 384 | self.assertItemsEqual(deps, ["../in1.h", "/in2.h"]) 385 | 386 | 387 | class TraceParserTests(unittest.TestCase): 388 | def testIterateTargetRules(self): 389 | input = """{'OUT': ['out1', 'out2'], 'IN': ['in1', 'in2']} 390 | {'OUT': ['out3'], 'IN': []} 391 | """ 392 | parser = depslint.TraceParser(input.splitlines(True)) 393 | parser.iterate_target_rules() 394 | edges = list(parser.iterate_target_rules()) 395 | 396 | e = edges.pop(0) 397 | self.assertItemsEqual(e.targets, ['out1', 'out2']) 398 | self.assertItemsEqual(e.deps, ['in1', 'in2']) 399 | self.assertItemsEqual(e.depfile_deps, []) 400 | self.assertItemsEqual(e.order_only_deps, []) 401 | 402 | e = edges.pop(0) 403 | self.assertItemsEqual(e.targets, ['out3']) 404 | self.assertItemsEqual(e.deps, []) 405 | self.assertItemsEqual(e.depfile_deps, []) 406 | self.assertItemsEqual(e.order_only_deps, []) 407 | 408 | self.assertItemsEqual(edges, list()) 409 | -------------------------------------------------------------------------------- /depstrace.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright 2013 Maxim Kalaev 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import optparse 18 | import os 19 | import re 20 | import subprocess 21 | import sys 22 | #import tempfile 23 | 24 | _NINJA_PROG_NAME = 'ninja' 25 | _DEFAULT_OUTFILE = 'deps.lst' 26 | _STRACE_LOG = 'strace_log.txt' 27 | _STRACE_FIFO = '/tmp/_strace_log_fifo' # TODO: use tempfile 28 | 29 | _FILEOPS=r'open|(sym)?link|rename|chdir|creat' # TODO: handle |openat? 30 | _PROCOPS=r'clone|execve|v?fork' 31 | _UNUSED=r'l?chown(32)?|[gs]etxattr|fchmodat|rmdir|mkdir|unlinkat|utimensat|getcwd|chmod|statfs(64)?|l?stat(64)?|access|readlink|unlink|exit_group|waitpid|wait4|arch_prctl|utime' 32 | 33 | _ARG = (r'\{[^}]+\}|' + # {st_mode=S_IFREG|0755, st_size=97736, ...} 34 | r'"[^"]+"|' + # "tst.o" 35 | r'\[[^]]+\]|' + # [{WIFEXITED(s) && WEXITSTATUS(s) == 0}] 36 | r'\S+') # O_WRONLY|O_CREAT|O_TRUNC|O_LARGEFILE, et. al. 37 | _OPS = '%s|%s|%s' % (_FILEOPS, _PROCOPS, _UNUSED) 38 | 39 | global _verbose 40 | _verbose = 0 41 | 42 | def V0(*strings): 43 | if _verbose >= 0: 44 | print " ".join(strings) 45 | 46 | def V1(*strings): 47 | if _verbose >= 1: 48 | print " ".join(strings) 49 | 50 | def V2(*strings): 51 | if _verbose >= 2: 52 | print " ".join(strings) 53 | 54 | def V3(*strings): 55 | if _verbose >= 3: 56 | print " ".join(strings) 57 | 58 | def fatal(msg, ret=-1): 59 | if _verbose < 0: 60 | return 61 | sys.stdout.flush() 62 | msg = "FATAL: %s" % msg 63 | print >>sys.stderr, "\033[1;41m%s\033[0m" % msg 64 | sys.exit(ret) 65 | 66 | def warn(msg): 67 | if _verbose < 0: 68 | return 69 | print "\033[1;33mWARNING: %s\033[0m" % msg 70 | 71 | def info(msg): 72 | if _verbose < 0: 73 | return 74 | print "\033[1;32mINFO: %s\033[0m" % msg 75 | 76 | class TracedRule(object): 77 | def __init__(self, lineno): 78 | self.deps = set() 79 | self.outputs = set() 80 | 81 | # Debug info 82 | self.pids = set() 83 | self.lineno = lineno 84 | 85 | def add_dep(self, path): 86 | self.deps.add(path) 87 | 88 | def add_output(self, path): 89 | self.outputs.add(path) 90 | 91 | def add_pid(self, pid): 92 | self.pids.add(pid) 93 | 94 | def get_deps_filtered(self): 95 | # Complex scripts may create intermediate outputs and then 96 | # reconsume these as inputs, therefore we don't consider modifed 97 | # files as dependendencies. 98 | return self.deps - self.outputs 99 | 100 | def get_outputs_filtered(self): 101 | # Filter out outputs which were deleted (consider these 'temporary' files) 102 | # existing_outputs = [f for f in self.outputs if os.path.lexists(f)] 103 | # return existing_outputs 104 | return self.outputs 105 | 106 | class DepsTracer(object): 107 | # Regular expressions for parsing syscall in strace log 108 | # TODO: this is VERY slow. We can easily improve this if anyone cares... 109 | _file_re = re.compile(r'(?P\d+)\s+' + 110 | r'(?P%s)\(' % _OPS + 111 | r'(?P%s)?(, (?P%s))?(, (%s))*' % (_ARG,_ARG,_ARG) + 112 | r'\) = (?P-?\d+|\?)') 113 | 114 | # Regular expressions for joining interrupted lines in strace log 115 | _unfinished_re = re.compile(r'(?P(?P\d+).*)\s+$') 116 | _resumed_re = re.compile(r'(?P\d+)\s+<\.\.\. \S+ resumed> (?P.*)') 117 | 118 | def __init__(self, build_dir=None, strict=False): 119 | self._test_strace_version() 120 | self.build_dir = os.path.abspath(build_dir or os.getcwd()) 121 | self.logfile = None 122 | self.unmatched_lines = [] 123 | self.traced_rules = list() 124 | self.cur_lineno = 0 125 | self.pid2rule = dict() # pid -> TracedRule (many to one is allowed) 126 | self.working_dirs = dict() # pid -> cwd 127 | self.strict = False 128 | 129 | def createRule(self, pid): 130 | r = TracedRule(self.cur_lineno) 131 | r.add_pid(pid) 132 | self.traced_rules.append(r) 133 | self.pid2rule[pid] = r 134 | return r 135 | 136 | def norm_path(self, cwd, path): 137 | path = os.path.join(cwd, path) 138 | path = os.path.normpath(path) 139 | 140 | # Make paths relative to build_dir when possible 141 | if os.path.isabs(path) and path.startswith(self.build_dir): 142 | path = path[len(self.build_dir):] 143 | path = path.lstrip(os.path.sep) 144 | 145 | return path 146 | 147 | def add_dep(self, pid, path): 148 | if not self._is_in_buildtree(path): 149 | return 150 | rule = self.pid2rule.get(pid) 151 | if rule: 152 | rule.add_dep(path) 153 | 154 | def add_output(self, pid, path): 155 | if not self._is_in_buildtree(path): 156 | return 157 | rule = self.pid2rule.get(pid) 158 | if rule: 159 | rule.add_output(path) 160 | 161 | def _is_in_buildtree(self, norm_path): 162 | # All paths which are in build tree were converted to relative by here 163 | in_build_tree = not os.path.isabs(norm_path) 164 | return in_build_tree 165 | 166 | def _test_strace_version(self): 167 | try: 168 | subprocess.check_call(['strace', '-o/dev/null','-etrace=file,process', 'true']) 169 | #TODO: actually test strace version... 170 | except subprocess.CalledProcessError: 171 | print >>sys.stderr, "strace is missing or incompatible" 172 | sys.exit(-1) 173 | 174 | def trace(self, cmd): 175 | """ 176 | Run build script cmd under strace as: 'strace ' and factor out a list of 'rules' 177 | with dependencies and outputs (judging by files opened or modified). 178 | 179 | Return (status code, list of rule objects). 180 | """ 181 | # Note (*) we are tracing now all system calls classified as 'file' or 'process' 182 | # and warn if we see something unrecognizable to make sure we don't miss something important. 183 | # TODO: this approach is cpu-expensive, consider alternatives. 184 | self.logfile = file(_STRACE_LOG, "w") 185 | fifopath = _STRACE_FIFO 186 | #os.unlink(fifopath) - TBD + catch exception 187 | os.mkfifo(fifopath) 188 | try: 189 | command = ['strace', 190 | '-o%s' % fifopath, 191 | '-f', # Follow child processes 192 | '-a1', # Only one space before return values 193 | '-s0', # Print non-filename strings really short to keep parser simpler 194 | '-etrace=file,process', # Trace syscals related to file and process operations (*) 195 | '-esignal=none'] + cmd 196 | V1("Running: %r" % command) 197 | 198 | strace_popen = subprocess.Popen(command) 199 | rules = self.parse_trace(file(fifopath)) 200 | finally: 201 | os.unlink(fifopath) 202 | 203 | # Strace return code. 204 | retcode = strace_popen.wait() 205 | return retcode, rules 206 | 207 | def parse_trace(self, strace_out): 208 | # Init strace log parser 209 | log_iterator = self._strace_log_iter(strace_out) 210 | 211 | # Look for 'ninja' process invocation 212 | ninja_pid = None 213 | for pid, op, ret, arg1, _ in log_iterator: 214 | if op == 'execve' and ret == '0': 215 | path = os.path.normpath(arg1) 216 | if path.endswith(_NINJA_PROG_NAME): 217 | ninja_pid = pid 218 | V1("detected ninja process invocation: '%s'" % self.cur_line.strip()) 219 | break 220 | if ninja_pid is None: 221 | print >>sys.stderr, "Ninja ('%s') process invocation could not be detected" % _NINJA_PROG_NAME 222 | sys.exit(-1) 223 | 224 | # Track processes spawn under 'ninja' and record their inputs/outputs, 225 | # grouped by 'rule'. 'Rule' is considered to be process tree 226 | # parented directly under 'ninja'. 227 | for pid, op, ret, arg1, arg2 in log_iterator: 228 | # Ignore failed syscalls 229 | if ret == '-1': 230 | continue 231 | 232 | # Process successful system calls 233 | cwd = self.working_dirs.get(pid, '.') 234 | if op in ('clone', 'fork', 'vfork') and ret != '?': 235 | new_pid = ret 236 | self.working_dirs[new_pid] = cwd 237 | # Consider all processes forked by ninja directly a 'build rule' process tree 238 | if pid == ninja_pid: 239 | V2("Creating a build rule record for pid %s, line %d in strace log" % (new_pid, self.cur_lineno)) 240 | self.createRule(new_pid) 241 | else: 242 | rul = self.pid2rule.get(pid) 243 | rul.add_pid(new_pid) 244 | self.pid2rule[new_pid] = rul 245 | elif op == 'chdir': 246 | new_cwd = os.path.join(cwd, arg1) 247 | self.working_dirs[pid] = new_cwd 248 | elif op == 'open': 249 | path = self.norm_path(cwd, arg1) 250 | mode = arg2 251 | if 'O_DIRECTORY' in mode: 252 | # Filter out 'opendir'-s.TBD: does this test worth the cycles? 253 | continue 254 | if 'O_RDONLY' in mode: 255 | self.add_dep(pid, path) 256 | else: 257 | self.add_output(pid, path) 258 | elif op == 'execve': 259 | path = self.norm_path(cwd, arg1) 260 | self.add_dep(pid, path) 261 | elif op == 'symlink': 262 | path = self.norm_path(cwd, arg2) 263 | self.add_output(pid, path) 264 | elif op in ('rename', 'link'): 265 | from_path = self.norm_path(cwd, arg1) 266 | to_path = self.norm_path(cwd, arg2) 267 | self.add_dep(pid, from_path) 268 | self.add_output(pid, to_path) 269 | 270 | return self.traced_rules 271 | 272 | def _on_parsing_error(self, msg, line=None): 273 | line = line or self.cur_line 274 | warn("Strace output parsing error: %r" % msg) 275 | V0("........ %r @line: %d)" % (line, self.cur_lineno)) 276 | if self.strict: 277 | fatal("terminating due to a parsing error in strict mode") 278 | V0("........ (tracer output may be incomplete)") 279 | self.unmatched_lines.append(line.strip()) 280 | 281 | def _strace_log_iter(self, strace_log): 282 | interrupted_syscalls = {} # pid -> interrupted syscall log beginning 283 | for self.cur_lineno, line in enumerate(strace_log, start=1): 284 | self.cur_line = line 285 | if self.logfile: 286 | self.logfile.write(line) 287 | 288 | # Join unfinished syscall traces to a single line 289 | match = self._unfinished_re.match(line) 290 | if match: 291 | pid, body = match.group('pid'), match.group('body') 292 | if pid in interrupted_syscalls: 293 | self._on_parsing_error("unexpected unfinished syscall") 294 | # Replacing the previous 'unfinished' 295 | interrupted_syscalls[pid] = body 296 | continue 297 | match = self._resumed_re.match(line) 298 | if match: 299 | pid, body = match.group('pid'), match.group('body') 300 | if pid not in interrupted_syscalls: 301 | self._on_parsing_error("unexpected resumed syscall") 302 | continue 303 | line = interrupted_syscalls[pid] + body 304 | del interrupted_syscalls[pid] 305 | 306 | # Parse syscall line 307 | fop = self._file_re.match(line) 308 | if not fop: 309 | self._on_parsing_error("unmatched strace output line", line) 310 | continue 311 | 312 | pid, op, ret = fop.group('pid'), fop.group('op'), fop.group('ret') 313 | arg1, arg2 = fop.group('arg1'), fop.group('arg2') 314 | arg1 = arg1.strip('"') if arg1 else arg1 315 | arg2 = arg2.strip('"') if arg2 else arg2 316 | V2("pid=%s, op='%s', arg1=%s, arg2=%s, ret=%s" % (pid, op, arg1, arg2, ret)) 317 | yield (pid, op, ret, arg1, arg2) 318 | if interrupted_syscalls: 319 | warn("excessive interrupted syscall(s) at the end of trace:") 320 | for k, v in interrupted_syscalls.iteritems(): 321 | V0("........ %s: %r" % (k, v)) 322 | if self.strict: 323 | fatal("terminating due to a parsing error in strict mode") 324 | V0("(probably strace bugs, consider upgrading 'strace')") 325 | V0("(tracer output may be incomplete)") 326 | 327 | 328 | def process_results(options, rules, unmatched_lines): 329 | # Display unmatched lines.. 330 | if unmatched_lines: 331 | warn("Summary of all unmatched lines:") 332 | V0("........ (probably strace bugs, consider upgrading 'strace')") 333 | for l in unmatched_lines: 334 | V0("........ unmatched: %r" % l) 335 | 336 | # Log results 337 | info("Detected %d build rules in total, writing log: %s" % (len(rules), options.outfile)) 338 | with file(options.outfile, "w") as f: 339 | for rule in rules: 340 | deps = sorted(rule.get_deps_filtered()) 341 | outputs = sorted(rule.get_outputs_filtered()) 342 | f.write("{'OUT': %r, 'IN': %r, 'LINE': %d, 'PID': %r}\n" % ( 343 | outputs, deps, rule.lineno, "|".join(rule.pids))) 344 | info("Done") 345 | 346 | def tracecmd(options, args): 347 | tracer = DepsTracer(strict=options.strict) 348 | 349 | # Build & trace 350 | status, rules = tracer.trace(cmd=args) 351 | if status: 352 | print >>sys.stderr, "**ERROR**: command execution has failed: %r" % args 353 | print >>sys.stderr, "**ERROR**: cwd:", os.getcwd() 354 | return status 355 | 356 | process_results(options, rules, tracer.unmatched_lines) 357 | return 0 358 | 359 | def parse_tracefile(options): 360 | tracer = DepsTracer(strict=options.strict) 361 | 362 | # Process pre-recorded tracefile 363 | rules = tracer.parse_trace(file(options.from_tracefile, "r")) 364 | process_results(options, rules, tracer.unmatched_lines) 365 | return 0 366 | 367 | if __name__ == '__main__': 368 | parser = optparse.OptionParser(prog='depstrace', 369 | version='%prog: git', 370 | usage="usage: %prog [options] -- [command [arg ...]]") 371 | parser.add_option('-o', '--outfile', default=_DEFAULT_OUTFILE, 372 | help="store output to the specified file [default: %default]") 373 | parser.add_option('-r', '--from_tracefile', 374 | help="parse pre-recorded strace output" 375 | " instead of tracing the command") 376 | parser.add_option('-v', '--verbose', action='count', default=0) 377 | parser.add_option('--strict', action='store_true', default=False, 378 | help="Don't tolerate parsing errors when tracing") 379 | (options, args) = parser.parse_args() 380 | 381 | # Global verbosity settings 382 | _verbose = options.verbose 383 | 384 | if options.from_tracefile: 385 | # Process an existing strace output file instead of 386 | # actually running the command under strace 387 | info("""Processing tracefile: %r""" % options.from_tracefile) 388 | parse_tracefile(options) 389 | sys.exit(0) 390 | 391 | # Run process, trace it and process the traces 392 | if not args: 393 | print >>sys.stderr, "ERROR: invalid command line" 394 | print >>sys.stderr, "Either '-r' or a 'command' should be specified." 395 | sys.exit(-1) 396 | info("""Tracing: %r""" % args) 397 | ret = tracecmd(options, args) 398 | sys.exit(ret) 399 | -------------------------------------------------------------------------------- /depslint.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright 2013 Maxim Kalaev 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import argparse 18 | import os 19 | import re 20 | import sys 21 | import time 22 | from collections import defaultdict 23 | 24 | _DEPSLINT_CFG = '.depslint' 25 | _DEFAULT_TRACEFILE = 'deps.lst' 26 | _DEFAULT_MANIFEST = 'build.ninja' 27 | _SUPPORTED_NINJA_VER = "1.2" 28 | 29 | # Matching targets are silently dropped when loading trace file, as if these 30 | # were never accessed. 31 | _IGNORED_SUFFICES = ['.d', '.pyc', '.rsp'] 32 | 33 | # Implicit dependencies list. Stores pairs of regexeps matching a 34 | # target and its implicit dependencies, and used to discard false/irrelevant 35 | # alarms regrading missing dependencies detected by the tool. 36 | # 37 | # For example, you may have a tool (e.g., calc_crc.sh) in the build tree 38 | # invoked for each target built. It will be considered a dependency by deplint, 39 | # but you couldn't care less. Adding ("", r"calc_crc\.sh") to the list 40 | # will suppress the errors. 41 | 42 | _IMPLICIT_DEPS_MATCHERS = [] 43 | 44 | _module_path = os.path.join(os.getcwd(), __file__) 45 | 46 | global _verbose 47 | _verbose = 0 48 | 49 | global _logfile 50 | _logfile = None 51 | def _set_logger(filename): 52 | if not filename: 53 | filename = '/dev/null' 54 | global _logfile 55 | _logfile = open(filename, "w") 56 | 57 | def log_msg(level, msg, trunc_lines=True, ansi=None, fd=sys.stdout): 58 | if _verbose < 0: 59 | # Testing mode: be silent 60 | return 61 | 62 | if _verbose >= level: 63 | _logfile.write("[%s] %s\n" % (time.asctime(), msg)) 64 | 65 | if trunc_lines and len(msg) > 140: 66 | msg = msg[:137] + "..." 67 | if ansi: 68 | _ANSI_END = '\033[0m' 69 | msg = ansi + msg + _ANSI_END 70 | print >>fd, msg 71 | fd.flush() 72 | return 73 | 74 | # Always log levels 0 and 1 75 | if level in (0, 1): 76 | _logfile.write("[%s] %s\n" % (time.asctime(), msg)) 77 | 78 | def H0(): 79 | log_msg(0, "") 80 | 81 | def V0(msg, trunc_lines=True): 82 | log_msg(0, msg, trunc_lines) 83 | 84 | def V1(msg, trunc_lines=True): 85 | log_msg(1, msg, trunc_lines) 86 | 87 | def V2(msg, trunc_lines=True): 88 | log_msg(2, msg, trunc_lines) 89 | 90 | def V3(msg, trunc_lines=True): 91 | log_msg(3, msg, trunc_lines) 92 | 93 | def fatal(msg, ret=-1): 94 | msg = "FATAL: %s" % msg 95 | log_msg(0, msg, trunc_lines=False, ansi='\033[1;41m', fd=sys.stderr) 96 | sys.exit(ret) 97 | 98 | def error(msg): 99 | msg = "ERROR: %s" % msg 100 | log_msg(0, msg, trunc_lines=False, ansi='\033[1;31m') 101 | 102 | def warn(msg): 103 | msg = "WARNING: %s" % msg 104 | log_msg(0, msg, trunc_lines=False, ansi='\033[1;33m') 105 | 106 | def info(msg): 107 | msg = "INFO: %s" % msg 108 | log_msg(0, msg, ansi='\033[1;32m') 109 | 110 | def debug(msg): 111 | msg = "DEBUG: %s" % msg 112 | log_msg(0, msg, ansi='\033[1;34m') 113 | 114 | def is_ignored(target): 115 | return any(target.endswith(suffix) for suffix in _IGNORED_SUFFICES) 116 | 117 | def match_implicit_dependency(dep, targets): 118 | """Verify if any of paths in 'targets' depends implicitly on 'dep' 119 | to inhibit a 'missing dependency' error.""" 120 | V3("looking for an implicit dependency for any of %r on %r" % (targets, dep)) 121 | for target_re, dep_re in _IMPLICIT_DEPS_MATCHERS: 122 | if not re.match(dep_re, dep): 123 | continue 124 | V3("Found a rule matching %r, checking for any target match.." % dep) 125 | for t in targets: 126 | if re.match(target_re, t): 127 | return t 128 | return None 129 | 130 | def trc_filter_ignored(targets): 131 | return [t for t in targets if not is_ignored(t)] 132 | 133 | def norm_paths(paths): 134 | return [os.path.normpath(p) for p in paths] 135 | 136 | def sets_union(iterable_of_sets): 137 | u = set() 138 | for s in iterable_of_sets: 139 | u.update(s) 140 | return u 141 | 142 | class DepfileParser(object): 143 | _depfile_parse_re = re.compile(r'\s*(?P.*?)\s*' 144 | r'(?.*?)\s*$', re.DOTALL) 146 | _depfile_split_re = re.compile(r'(? _SUPPORTED_NINJA_VER: 303 | warn("Ninja version required in manifest is newer than supported (%r vs %r)", 304 | self.ninja_required_version, _SUPPORTED_NINJA_VER) 305 | warn("Trying to continue but the results may be meaningless...") 306 | 307 | _build_re = re.compile(r'build\s+(?P.+)\s*'+ 308 | r'(?\S+)'+ 309 | r'\s*(?P.*)\s*$') 310 | def _handle_build_blk(self, blk): 311 | V3("** Parsing build block: '%s'" % (blk[0], )) 312 | match = re.match(self._build_re, blk[0]) 313 | if not match: 314 | raise Exception("Error parsing manifest at line:%d: '%s'" % (self.lineno-len(blk), blk[0])) 315 | targets, rule, all_deps = match.groups() 316 | ins, implicit, order = self._split_deps(all_deps) 317 | 318 | # TODO: fix to evaluate along the parsing to avoid possible cycles 319 | edge_attrs = dict(self._parse_attributes(blk[1:])) 320 | 321 | # prep attributes scope 322 | scope = self.global_attributes.copy() 323 | scope.update(self._get_rule_attrs(rule)) 324 | scope.update(edge_attrs) 325 | 326 | # evaluate targets and dependencies 327 | targets = norm_paths(self._split_unescape_and_eval(targets, scope)) 328 | ins = norm_paths(self._split_unescape_and_eval(ins, scope)) 329 | implicit = norm_paths(self._split_unescape_and_eval(implicit, scope)) 330 | order = norm_paths(self._split_unescape_and_eval(order, scope)) 331 | 332 | # Add automatic variables 333 | edge_attrs.update({'out':" ".join(targets), 'in':" ".join(ins)}) 334 | 335 | edge = BuildRule(targets=targets, 336 | deps=ins + implicit, 337 | depfile_deps=[], 338 | order_only_deps=order, 339 | rule_name=rule) 340 | V2("** BuildRule** ", str(edge)) 341 | self.edges.append(edge) 342 | self.edges_attributes[edge] = edge_attrs 343 | 344 | _rule_re = re.compile(r'rule\s+(?P.+?)\s*$') 345 | def _handle_rule_blk(self, blk): 346 | match = re.match(self._rule_re, blk[0]) 347 | if not match: 348 | raise Exception("Error parsing manifest at line:%d: '%s'" % (self.lineno-len(blk), blk[0])) 349 | rule = match.group('rule') 350 | attributes = dict(self._parse_attributes(blk[1:])) 351 | self.rules[rule] = dict(attributes=attributes) 352 | 353 | _attr_re = re.compile(r'\s*(?P\w+)\s*=\s*(?P.*?)\s*$') # key = val 354 | def _parse_attributes(self, blk): 355 | #TODO: fix to eval/expand attributes as we parse! 356 | 357 | for line in blk: 358 | match = re.match(self._attr_re, line) 359 | if not match: 360 | raise Exception("Error parsing manifest, expecting key=val, got: '%s'" % line) 361 | yield (match.group('k'), match.group('v')) 362 | 363 | def _iterate_manifest_blocks(self, fh): 364 | blk = [] 365 | for line in self._iterate_manifest_lines(fh): 366 | # After stripping comments and joining escaped EOLs, 367 | # 'block' always starts with a 'header' at position '0', 368 | # then optionally followed by a couple of 'indented key = val' lines. 369 | if blk and not line[0].isspace(): 370 | yield blk 371 | blk = [] 372 | blk.append(line) 373 | if blk: 374 | yield blk 375 | 376 | def _iterate_manifest_lines(self, fh): 377 | acc = [] 378 | for line in fh: 379 | self.lineno += 1 380 | # Skip empty lines (?) and comments 381 | if not line or line.isspace() or line.startswith('#'): 382 | continue 383 | # Join escaped EOLs 384 | if line.endswith('$\n'): 385 | acc.append(line[:-2]) 386 | continue 387 | yield str.rstrip("".join(acc) + line) 388 | acc = [] 389 | if acc: 390 | raise Exception("Error parsing manifest, unexpected end of file after: %s" % acc[-1]) 391 | 392 | _split_all_deps_re = re.compile(r'(?P.*?)' # Explicit deps 393 | r'((?[^|].*?)?)?' # Unescaped | + implicit deps 394 | r'((?.*))?' # Unescaped || + order deps 395 | r'$') 396 | def _split_deps(self, s): 397 | if not s or s.isspace(): 398 | return ("", "", "") 399 | match = re.match(self._split_all_deps_re, s) 400 | if not match: 401 | raise Exception("Error parsing deps: '%s'" % (s,)) 402 | ins, implicit, order = match.group('in'), match.group('deps'), match.group('ord') 403 | return (ins or "", implicit or "", order or "") 404 | 405 | def _unescape(self, string): 406 | # Unescape '$ ', '$:', '$$' sequences 407 | return re.sub(r'\$([ :$])', r'\1', string) 408 | 409 | _deps_sep_re = re.compile(r'(?> split_unescape_and_eval('%s') -> '%s'" % (s, lst)) 424 | return lst 425 | 426 | _attr_sub_re = re.compile('(?\w+)(?(1)})') # $attr or ${attr} 427 | def _eval_attribute(self, scope, attribute): 428 | # TBD: use empty strings for undefined attributes or raise? 429 | V3(">>> evaluating attribute: '%s'" % attribute) 430 | def evaluator(match): 431 | V3(">>> Evaluating replacement for attr:", match.group('attr')) 432 | attribute_val = scope.get(match.group('attr'), "") 433 | return self._eval_attribute(scope, attribute_val) 434 | evaluated_attr = re.sub(self._attr_sub_re, evaluator, attribute) 435 | V3(">>> evaluated attribute: '%s' -> '%s'" % (attribute, evaluated_attr)) 436 | return evaluated_attr 437 | 438 | # TODO: fix variables substituation according to ninja's rules. E.g., need to eval as we parse. 439 | def _eval_edge_attribute(self, edge, attribute): 440 | scope = self.global_attributes.copy() 441 | scope.update(self._get_rule_attrs(edge.rule_name)) 442 | scope.update(self._get_edge_attrs(edge)) 443 | V3(">> eval_edge_attribute('%s': '%s')" % (scope, attribute)) 444 | attribute_val = scope.get(attribute, "") 445 | return self._unescape(self._eval_attribute(scope, attribute_val)) 446 | 447 | class Edge(object): 448 | def __init__(self, provides, requires, is_phony): 449 | self.provides = frozenset(provides) 450 | self.requires = frozenset(requires) 451 | self.is_phony = is_phony 452 | self.rank = None 453 | 454 | class Graph(object): 455 | def __init__(self, from_brules, targets_wanted, is_clean_build_graph): 456 | self.target2edge = dict() 457 | self.source2edges = defaultdict(set) 458 | 459 | self.duplicate_target_rules = set() 460 | 461 | self.top_targets = list(targets_wanted) 462 | self.targets_by_ranks = defaultdict(set) 463 | 464 | self.target_deps_closure = dict() 465 | self.target_products_closure = dict() 466 | 467 | for brule in from_brules: 468 | # Clean build graph - as everything is rebuilt, only build 469 | # order matters. Depfiles do not exist. Incremental build 470 | # - depfiles exist, and order rules can be neglected 471 | # assuming that clean order build is correct (and a 472 | # missing depfile triggers target rebuild). Implicit and 473 | # explicit dependencies from manifest always play. 474 | deps = brule.deps + (brule.order_only_deps if is_clean_build_graph else brule.depfile_deps) 475 | edge = Edge(provides=brule.targets, 476 | requires=deps, 477 | is_phony=(brule.rule_name == "phony")) 478 | self._add_edge(edge) 479 | 480 | self._eval_graph_properties() 481 | 482 | def _add_edge(self, edge): 483 | # Populate targets dictionary, take note of duplicate target 484 | # rules. 485 | for t in edge.provides: 486 | if self.target2edge.get(t): 487 | self.duplicate_target_rules.add(t) 488 | self.target2edge[t] = edge 489 | for s in edge.requires: 490 | self.source2edges[s].add(edge) 491 | 492 | def _eval_graph_properties(self): 493 | if not self.top_targets: 494 | V1("Finding all terminal targets...") 495 | self.top_targets = list(self._find_top_targets()) 496 | #TODO: filter out top_targets w/o an incoming edge? 497 | 498 | V1("Terminal targets (up to first 5): %r" % self.top_targets[:5]) 499 | V1("Calculating deps closures and nodes ranks...") 500 | for target in self.top_targets: 501 | self._calc_deps_closure_in_tree(target) 502 | 503 | V1("Calculating targets product closures...") 504 | self._calc_products_closure_in_tree() 505 | V1("Done") 506 | 507 | def get_edge(self, target): 508 | """Returns an edge corresponding to target build rule, or 509 | 'None' if there is no rule to build the target (e.g., a static target).""" 510 | return self.target2edge.get(target, None) 511 | 512 | def is_phony_target(self, target): 513 | edge = self.target2edge.get(target) 514 | if not edge: 515 | return False 516 | return edge.is_phony 517 | 518 | def is_static_target(self, target): 519 | """Returns 'True' if the target is 'wanted' and unless the 520 | target is a product of the non-all-phony edges path. 'False' 521 | otherwise.""" 522 | return target in self.targets_by_ranks[0] 523 | 524 | def get_any_path_to_top(self, target): 525 | out_edges = self.source2edges.get(target, []) 526 | out_edges = [e for e in out_edges if self._is_wanted(e)] 527 | if not out_edges and target not in self.top_targets: 528 | # If the queried target was 'wanted', there should be a 529 | # path to a 'wanted' top target. 530 | raise Exception("Unknown or unwanted target: %r" % target) 531 | 532 | for edge in out_edges: 533 | for out in edge.provides: 534 | return [out] + self.get_any_path_to_top(out) 535 | 536 | # No outgoing edges, reached the top of the 'wanted' sub-graph 537 | return [] 538 | 539 | def get_deps_closure(self, target): 540 | try: 541 | return self.target_deps_closure[target] 542 | except KeyError: 543 | raise Exception("Unknown or unwanted target: %r" % target) 544 | 545 | def get_product_rules_closure(self, target): 546 | try: 547 | return self.target_products_closure[target] 548 | except KeyError: 549 | raise Exception("Unknown or unwanted target: %r" % target) 550 | 551 | def resolve_phony(self, targets): 552 | """Substitute phone targets by non-phony dependecies, unless 553 | target dependencies list is empty.""" 554 | resolved = [] 555 | for t in targets: 556 | edge = self.target2edge.get(t) 557 | if not edge or not edge.is_phony or not edge.requires: 558 | resolved.append(t) 559 | continue 560 | resolved.extend(self.resolve_phony(edge.requires)) 561 | return resolved 562 | 563 | def iterate_targets_by_rank(self, include_static_targets): 564 | for rank in sorted(self.targets_by_ranks.keys()): 565 | if rank == 0 and not include_static_targets: 566 | continue 567 | for tpath in self.targets_by_ranks[rank]: 568 | #TODO: sort by significance 569 | yield tpath 570 | 571 | def sorted_by_products_num(self, targets, reverse=False): 572 | def by_products(x,y): 573 | return cmp(len(self.get_product_rules_closure(x)), len(self.get_product_rules_closure(y))) 574 | return sorted(targets, cmp=by_products, reverse=reverse) 575 | 576 | def _find_top_targets(self): 577 | # Top targets are not required by any other target in the build graph 578 | top_targets_set = set(self.target2edge.keys()) - set(self.source2edges.keys()) 579 | if not top_targets_set and self.target2edge: 580 | raise Exception("ERROR: could not isolate top targets, check inputs for dependency loops") 581 | return sorted(top_targets_set) 582 | 583 | def _calc_deps_closure_in_tree(self, target): 584 | visited = list() 585 | return self._do_calc_deps_closure(target, visited) 586 | 587 | def _do_calc_deps_closure(self, target, visited): 588 | # Cycles detection 589 | if target in visited: 590 | raise Exception("Dependencies loop detected: %r" % (visited + [target],)) 591 | 592 | edge = self.target2edge.get(target, None) 593 | if not edge: 594 | # Static source 595 | self.targets_by_ranks[0].add(target) 596 | self.target_deps_closure[target] = set() 597 | return 0 598 | 599 | # Already processed? 600 | if edge.rank is not None: 601 | return edge.rank 602 | 603 | max_children_rank = 0 604 | closure = set(edge.requires) 605 | for p in edge.requires: 606 | max_children_rank = max(self._do_calc_deps_closure(p, visited + [target]), max_children_rank) 607 | closure.update(self.target_deps_closure[p]) 608 | 609 | # Note: phony targets don't climb ranks, 610 | # non-static target w/no dependencies are ranked '1' 611 | rank = edge.rank = max_children_rank + (0 if edge.is_phony else 1) 612 | for t in edge.provides: 613 | self.target_deps_closure[t] = closure 614 | self.targets_by_ranks[rank].add(t) 615 | return rank 616 | 617 | def _is_wanted(self, edge): 618 | return edge.rank is not None 619 | 620 | def _calc_products_closure_in_tree(self): 621 | reachable_sources = self.targets_by_ranks[0] 622 | visited = list() 623 | # DFS-traverse the DAG bottom-up 624 | for source in reachable_sources: 625 | self._do_calc_products_closure(source, visited) 626 | 627 | def _do_calc_products_closure(self, source, visited): 628 | # TODO: rewrite to use top-down BFS (to limit scope to the specified targets), 629 | # see then if it is possible to do 'deps' closure calculation 'on the way back'? 630 | 631 | # Cycles detection (just in case) 632 | if source in visited: 633 | raise Exception("Dependencies loop detected: %r" % (visited + [source],)) 634 | 635 | # Already calculated? 636 | my_products = self.target_products_closure.get(source, set()) 637 | if my_products: 638 | return my_products 639 | 640 | for out_edge in self.source2edges.get(source, []): 641 | if not self._is_wanted(out_edge): 642 | continue 643 | for p in out_edge.provides: 644 | products = self._do_calc_products_closure(p, visited + [source]) 645 | my_products.update(products) 646 | my_products.add(out_edge) 647 | self.target_products_closure[source] = my_products 648 | return my_products 649 | 650 | def create_graph(path, parser, targets=[], clean_build_graph=False): 651 | info("Building %s graph for '%s'.." % ( 652 | "order-only" if clean_build_graph else "dependency", path)) 653 | g = Graph(parser.iterate_target_rules(), targets, clean_build_graph) 654 | return g 655 | 656 | def load_config(path): 657 | if not os.path.isfile(path): 658 | V2("Note: no custom configuration file at: %r" % path) 659 | return None 660 | 661 | try: 662 | conf = {} 663 | execfile(path, conf) 664 | except Exception, e: 665 | # TODO: give more helpful errors 666 | fatal("Error loading configuration file: %r" % e) 667 | 668 | info("Loaded configuration file: %r" % config_path) 669 | if conf.get('IGNORED_SUFFICES'): 670 | global _IGNORED_SUFFICES 671 | _IGNORED_SUFFICES = list(conf.get('IGNORED_SUFFICES')) 672 | V1("Set ignored suffices to: %r" % _IGNORED_SUFFICES) 673 | if conf.get('IMPLICIT_DEPS_MATCHERS'): 674 | impl_deps = conf.get('IMPLICIT_DEPS_MATCHERS') 675 | global _IMPLICIT_DEPS_MATCHERS 676 | _IMPLICIT_DEPS_MATCHERS = list((re.compile(t), re.compile(s)) for t, s in impl_deps) 677 | V1("Set implicit matchers to: %r" % impl_deps) 678 | 679 | return conf 680 | 681 | def compare_dependencies(trace_graph, manifest_graph, clean_build): 682 | missing = defaultdict(list) 683 | ignored_missing = defaultdict(list) 684 | for tp in manifest_graph.iterate_targets_by_rank(include_static_targets=False): 685 | if manifest_graph.is_phony_target(tp): 686 | V2("Skipping phony: %s" % tp) 687 | continue 688 | 689 | edge_in_trace = trace_graph.get_edge(tp) 690 | if not edge_in_trace: 691 | warn("manifest target '%s' doesn't present in strace graph" % tp) 692 | continue 693 | 694 | for dep in edge_in_trace.requires - manifest_graph.get_deps_closure(tp): 695 | if clean_build and trace_graph.is_static_target(dep): 696 | # Only dependencies on the non-static targets are critical 697 | # for a clean build. 698 | continue 699 | 700 | if match_implicit_dependency(dep, [tp] + list(manifest_graph.get_deps_closure(tp))): 701 | # The dependency 'tp | deps' IS missing in manifest graph, 702 | # but 'implicit' dependencies rules fix this. Inhibit the warning. 703 | ignored_missing[tp].append(dep) 704 | continue 705 | missing[tp].append(dep) 706 | return missing, ignored_missing 707 | 708 | def print_excessive_manifest_dependencies(manifest_graph, trace_graph): 709 | manifest_targets = set(manifest_graph.target_deps_closure.iterkeys()) 710 | traced_targets = set(trace_graph.target_deps_closure.iterkeys()) 711 | excessive_by_targets = defaultdict(list) 712 | for x in sorted(manifest_targets - traced_targets): 713 | if ninja_incremental_graph.is_phony_target(x): 714 | continue 715 | path_to_top = manifest_graph.get_any_path_to_top(x) 716 | immediate_parent = path_to_top[0] 717 | excessive_by_targets[immediate_parent].append(x) 718 | 719 | if not excessive_by_targets: 720 | info("No issues!") 721 | return [] 722 | 723 | warn("Targets with excessive dependenies: %d" % len(excessive_by_targets)) 724 | for t in manifest_graph.sorted_by_products_num(excessive_by_targets.keys(), reverse=True): 725 | deps = excessive_by_targets[t] 726 | V1("%s (%d excessive deps): %r {> '%s'}" % (t, len(deps), deps, "' > '".join(path_to_top))) 727 | return excessive_by_targets 728 | 729 | def print_missing_dependencies(manifest_graph, missing, ignored_missing, clean_build): 730 | dtype="ORDER " if clean_build else "" 731 | for t, t_deps in missing.iteritems(): 732 | error("target '%s' is missing %sdependencies on: %r" % (t, dtype, t_deps)) 733 | #TODO: print path from t to top in verbose mode? 734 | 735 | if ignored_missing: 736 | warn("%sDependency errors inhibited for %d targets due to implicit dependency rules" % (dtype, len(ignored_missing))) 737 | for t, t_ignored_deps in ignored_missing.iteritems(): 738 | V1("Ignoring missing %sdependencies of '%s' on %r" % (dtype, t, t_ignored_deps)) 739 | #TODO: print path from t to top in verbose mode? 740 | 741 | def print_targets_by_ranks(graph): 742 | for rank in sorted(graph.targets_by_ranks.keys(), reverse=True): 743 | lst = ", ".join(graph.targets_by_ranks[rank]) 744 | V0("Rank %2d: %5d targets) [%s]" % (rank, len(graph.targets_by_ranks[rank]), lst)) 745 | 746 | def print_targets_by_depending_products(graph): 747 | static_wanted_sources = graph.targets_by_ranks[0] 748 | nonstatic_wanted_rules = list(e for e in graph.target2edge.itervalues() if e.rank) 749 | bins = [list() for x in xrange(0,10)] 750 | for t in graph.sorted_by_products_num(static_wanted_sources, reverse=True): 751 | score = len(graph.get_product_rules_closure(t)) 752 | prct = score*100.0/len(nonstatic_wanted_rules) 753 | bin = int(prct / 10) 754 | bins[bin].append((t, score, prct)) 755 | V2("%5d (%2.0f%%): %r" % (score, prct, t)) 756 | for i, bin in enumerate(bins): 757 | if not bin: 758 | continue 759 | V0("[%2d-%2d%%]: %5d targets [%s]" % ( 760 | 10*i, 10*i+10, len(bin), 761 | ", ".join("%s(%d%%)" % (b[0], b[2]) for b in bin))) 762 | return bins 763 | 764 | if __name__ == '__main__': 765 | parser = argparse.ArgumentParser(prog='depslint') 766 | parser.add_argument('-C', dest='dir', help='change to DIR before doing anything else') 767 | parser.add_argument('-f', dest='manifest', default=_DEFAULT_MANIFEST, help='specify input ninja manifest') 768 | parser.add_argument('-r', dest='tracefile', default=_DEFAULT_TRACEFILE, help='specify input trace file') 769 | parser.add_argument('--conf', help='load custom configuration from CONF') 770 | parser.add_argument('--stats', choices=['all'], help='Evaluate and print build tree statitics') 771 | parser.add_argument('-v', dest='verbose', action='count', default=0, help='increase verbosity level') 772 | parser.add_argument('--version', action='version', version='%(prog)s: git') 773 | parser.add_argument('targets', nargs='*', help='specify targets to verify, as passed to ninja when traced') 774 | args = parser.parse_args() 775 | 776 | # Set global verbosity level 777 | _verbose = args.verbose 778 | _set_logger(filename='depslint.log') 779 | 780 | # Process "-C" 781 | if args.dir: 782 | V1("Changing working dir to: %r" % args.dir) 783 | os.chdir(args.dir) 784 | 785 | # TODO: validate files existence (manifest, traces & config) 786 | # TODO: validate top targets (verify exists, convert phony, etc) 787 | 788 | # Attempt loading custom configuration file 789 | # With custom 'IGNORED_SUFFICES' and 'IMPLICIT_DEPS_MATCHERS' lists 790 | if args.conf: 791 | if load_config(args.conf) is None: 792 | fatal("Couldn't load configuration file: %r" % args.conf) 793 | else: 794 | config_path = os.path.join(os.path.dirname(args.manifest), _DEPSLINT_CFG) 795 | load_config(config_path) 796 | 797 | ### Parsing inputs 798 | info("Parsing Ninja manifest..") 799 | ninja_parser = NinjaManifestParser(file(args.manifest, "r")) 800 | info("Parsing Trace log..") 801 | trace_parser = TraceParser(file(args.tracefile, "r")) 802 | 803 | # If 'default' was specified in nija manifest, use it if no targets 804 | # were selected in command line. 805 | wanted = args.targets or ninja_parser.get_default_targets() 806 | 807 | ### Build graphs 808 | ninja_clean_build_graph = create_graph(args.manifest, ninja_parser, wanted, clean_build_graph=True) 809 | ninja_incremental_graph = create_graph(args.manifest, ninja_parser, wanted, clean_build_graph=False) 810 | # Note: for now, always build a complete (e.g., all-targets-wanted) trace-graph 811 | trace_graph = create_graph(args.tracefile, trace_parser, targets=[]) 812 | 813 | ### Verification passes 814 | H0() 815 | info("=== Pass #1: checking clean build order constraints ===") 816 | info("=== (may lead to clean build failure or, rarely, to incorrect builds) ===") 817 | missing, ignored = compare_dependencies(trace_graph, ninja_clean_build_graph, clean_build=True) 818 | if missing or ignored: 819 | info("Errors: %d, Ignored: %d" % (len(missing), len(ignored))) 820 | print_missing_dependencies(ninja_clean_build_graph, missing, ignored, clean_build=True) 821 | else: 822 | info("No issues!") 823 | 824 | H0() 825 | info("=== Pass #2: checking for missing dependencies ===") 826 | info("=== (may lead to incomlete incremental builds if any) ===") 827 | missing, ignored = compare_dependencies(trace_graph, ninja_incremental_graph, clean_build=False) 828 | if missing or ignored: 829 | info("Errors: %d, Ignored: %d" % (len(missing), len(ignored))) 830 | print_missing_dependencies(ninja_incremental_graph, missing, ignored, clean_build=False) 831 | else: 832 | info("No issues!") 833 | 834 | ### Statistics passes 835 | if args.stats: 836 | H0() 837 | info("=== Statistics ===") 838 | 839 | info("=== Listing targets in manifest, not in the traces ===") 840 | info("=== (these are adding an unnecessary overhead on the build system) ===") 841 | info("=== (or indicating incomplete trace file - have you traced a clean build?) ===") 842 | print_excessive_manifest_dependencies(ninja_incremental_graph, trace_graph) 843 | 844 | H0() 845 | info("=== Targets rank histograms ===") 846 | info("=== ('rank' is target distance from the bottom of the graph) ===") 847 | info("=== (e.g., a minimal number of sequential tasks to rebuild a target) ===") 848 | V0("=== Targets from '%s' by order-dependencies rank ===" % args.manifest) 849 | print_targets_by_ranks(ninja_clean_build_graph) 850 | 851 | V0("=== Targets from '%s' by rebuild-dependencies rank ===" % args.manifest) 852 | print_targets_by_ranks(ninja_incremental_graph) 853 | 854 | V0("=== Targets from TRACE by rank ===") 855 | print_targets_by_ranks(trace_graph) 856 | 857 | #TODO: warn about any duplicate target build detected when tracing. 858 | #TODO: factor out 'reload' targets 859 | # warn("Detected multiple rules modifying targets:") 860 | # print trace_graph.duplicate_target_rules() 861 | 862 | H0() 863 | info("=== Targets by number of products ===") 864 | info("=== (e.g., how many targets are rebuilt if 'x' is touched) ===") 865 | print_targets_by_depending_products(ninja_incremental_graph) 866 | 867 | info("=== That's all! ===") 868 | sys.exit(0) 869 | 870 | # TODO: try-except.. 871 | --------------------------------------------------------------------------------