43 |
44 |
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/tests/test_fixit.py:
--------------------------------------------------------------------------------
1 | import os
2 | import unittest
3 | from pathlib import Path
4 |
5 | from fixit.config import collect_rules, parse_rule
6 | from fixit.ftypes import Config
7 | from fixit.testing import generate_lint_rule_test_cases
8 |
9 | from oca_pre_commit_hooks import checks_odoo_module_fixit
10 |
11 |
12 | class FixitTest(unittest.TestCase):
13 | def test_fixit(self):
14 | """Run 'fixit test' based on fixit.cli.test method"""
15 | os.environ["FIXIT_ODOO_VERSION"] = "18.0"
16 | os.environ["FIXIT_AUTOFIX"] = "True"
17 |
18 | rule = parse_rule(
19 | ".checks_odoo_module_fixit",
20 | Path(os.path.dirname(os.path.dirname(os.path.abspath(checks_odoo_module_fixit.__file__)))),
21 | )
22 | lint_rules = collect_rules(Config(enable=[rule], disable=[], python_version=None))
23 | test_cases = generate_lint_rule_test_cases(lint_rules)
24 | print("")
25 | for test_case_class in test_cases:
26 | with self.subTest(rule_class=test_case_class.__name__):
27 | suite = unittest.defaultTestLoader.loadTestsFromTestCase(test_case_class)
28 | for test in suite:
29 | test_result = unittest.TestResult()
30 | test.run(test_result)
31 | if test_result.failures or test_result.errors:
32 | print(f"❌ Failed: {test.id()}")
33 | for _fail, traceback in test_result.failures + test_result.errors:
34 | print(traceback)
35 | self.fail(f"Subtest {test.id()} failed")
36 | else:
37 | print(f"✅ Subtest {test.id()} passed")
38 |
--------------------------------------------------------------------------------
/.pylintrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | [MASTER]
4 | load-plugins=pylint.extensions.docstyle, pylint.extensions.mccabe
5 | score=n
6 |
7 | [MESSAGES CONTROL]
8 | enable=all
9 | # Enable all then relax disable instead of disable all and enable one-by-one
10 | disable=c-extension-no-member,
11 | fixme,
12 | import-error,
13 | inconsistent-return-statements,
14 | line-too-long,
15 | locally-disabled,
16 | logging-too-many-args,
17 | missing-class-docstring,
18 | missing-function-docstring,
19 | missing-module-docstring,
20 | suppressed-message,
21 | too-few-public-methods,
22 | too-many-ancestors,
23 | too-many-arguments,
24 | too-many-boolean-expressions,
25 | too-many-branches,
26 | too-many-branches,
27 | too-many-format-args,
28 | too-many-function-args,
29 | too-many-instance-attributes,
30 | too-many-instance-attributes,
31 | too-many-lines,
32 | too-many-locals,
33 | too-many-locals,
34 | too-many-nested-blocks,
35 | too-many-public-methods,
36 | too-many-return-statements,
37 | too-many-star-expressions,
38 | too-many-statements,
39 |
40 | [REPORTS]
41 | msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}
42 | output-format=colorized
43 | reports=no
44 |
45 | [SIMILARITIES]
46 | min-similarity-lines=14
47 | ignore-comments=yes
48 | ignore-docstrings=yes
49 |
50 | [DESIGN]
51 | # McCabe complexity cyclomatic threshold for too-complex check
52 | # Value definied from https://en.wikipedia.org/wiki/Cyclomatic_complexity
53 | # - The figure of 10 had received substantial corroborating evidence,
54 | # but that in some circumstances it may be appropriate to relax the restriction
55 | # and permit modules with a complexity as high as 15
56 | max-complexity=15
57 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | norecursedirs =
3 | .git
4 | .tox
5 | .env
6 | dist
7 | build
8 | migrations
9 |
10 | python_files =
11 | test_*.py
12 | *_test.py
13 | tests.py
14 | addopts =
15 | -ra
16 | --strict-markers
17 | --ignore=docs/conf.py
18 | --ignore=setup.py
19 | --ignore=ci
20 | --ignore=.eggs
21 | --doctest-modules
22 | --doctest-glob=\*.rst
23 | --tb=short
24 | --pyargs
25 | # The order of these options matters. testpaths comes after addopts so that
26 | # oca_pre_commit_hooks in testpaths is interpreted as
27 | # --pyargs oca_pre_commit_hooks.
28 | # Any tests in the src/ directory (that is, tests installed with the package)
29 | # can be run by any user with pytest --pyargs oca_pre_commit_hooks.
30 | # Packages that are sensitive to the host machine, most famously NumPy,
31 | # include tests with the installed package so that any user can check
32 | # at any time that everything is working properly.
33 | # If you do choose to make installable tests, this will run the installed
34 | # tests as they are actually installed (same principle as when we ensure that
35 | # we always test the installed version of the package).
36 | # If you have no need for this (and your src/ directory is very large),
37 | # you can save a few milliseconds on testing by telling pytest not to search
38 | # the src/ directory by removing
39 | # --pyargs and oca_pre_commit_hooks from the options here.
40 | testpaths =
41 | src/oca_pre_commit_hooks
42 | tests/
43 |
44 | # Idea from: https://til.simonwillison.net/pytest/treat-warnings-as-errors
45 | filterwarnings =
46 | error
47 | # You can add exclusions, some examples:
48 | # ignore:'oca_pre_commit_hooks' defines default_app_config:PendingDeprecationWarning::
49 | # ignore:The {{% if:::
50 | # ignore:Coverage disabled via --no-cov switch!
51 |
--------------------------------------------------------------------------------
/test_repo/test_module/except_pass.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Test Except Pass usage"""
3 |
4 |
5 | class TestExceptPass(object):
6 | """Test Except Pass class """
7 |
8 | def test_method(self):
9 | try:
10 | raise Exception('Exception')
11 | except Exception: # except-pass
12 | pass
13 |
14 | def test_2_method(self):
15 | """This pass is skip for body of except has more than one line """
16 | try:
17 | raise Exception('Exception')
18 | except Exception:
19 | pass
20 | print('Exception')
21 |
22 | def test_3_method(self):
23 | """This pass is skip for the exception is assigned"""
24 | exception = False
25 | try:
26 | raise Exception('Exception')
27 | except Exception:
28 | pass
29 | if exception:
30 | pass
31 |
32 | def test_4_method(self):
33 | userError = False
34 | try:
35 | raise Exception('Exception')
36 | except Exception:
37 | pass
38 | if userError:
39 | pass
40 |
41 | def test_5_method(self):
42 | exception = False
43 | try:
44 | raise Exception('Exception')
45 | except (Exception, IndexError):
46 | pass
47 | if exception:
48 | pass
49 |
50 | def test_6_method(self):
51 | try:
52 | raise Exception('Exception')
53 | except (Exception, IndexError): # except-pass
54 | pass
55 |
56 | def test_7_method(self):
57 | exception = False
58 | try:
59 | raise Exception('Exception')
60 | except (Exception, IndexError, NameError):
61 | pass
62 | except Exception: # except-pass
63 | pass
64 | if exception:
65 | pass
66 |
--------------------------------------------------------------------------------
/test_repo/test_module/website_templates_disable.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 | This shows proper usage. Default content is provided but can be modified/replaced by the user!
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/docs/messages/xml/oe-structure-missing-id.rst:
--------------------------------------------------------------------------------
1 | xml-oe-structure-missing-id
2 | ###########################
3 | This message is generated whenever a tag having :code:`oe_structure` as one of its classes is missing a valid
4 | :code:`id`. A valid :code:`id` must contain :code:`oe_structure` inside it. So :code:`id="my_unique_id"` is not valid,
5 | while :code:`id="oe_structure_my_unique_id"` is valid.
6 |
7 | Rationale
8 | *********
9 | The check was suggested in `this issue `_. Tags with
10 | :code:`oe_structure` as their class are meant for users to edit them through the website builder. If the tag has no
11 | :code:`id`, the website will replace the entire original view with a copy that contains the user changes.
12 | This means updates to other parts of the view (through code, AKA updating a module) may not be reflected.
13 |
14 | By providing a valid :code:`id`, only the tag with it will be replaced. Internally Odoo will inherit the original view
15 | and use an XPath to replace the tag with the user's content. This means the rest of the view can still be updated
16 | and changes should be reflected.
17 |
18 | Fixing your code
19 | ****************
20 | To fix your code you should first of all determine whether the content inside the offending tag should be
21 | editable by users. If it shouldn't, removing :code:`oe_structure` is probably the best solution.
22 |
23 | If the content is meant to be edited by users then provide a valid, **unique** ID. In order to avoid collisions
24 | it is often recommended for the ID to contain the template's ID in it. For example:
25 |
26 | .. code-block:: xml
27 |
28 |
29 |
30 | Some stuff that should not be edited
31 |
32 |
33 | Customize this greeting through the website editor!
34 |
35 |
36 |
37 | Additionally **ensure your code does not depend on any elements inside tags with oe_structure**. User editable
38 | content is volatile and subject to changes or complete removal (if the user so wishes), so something like an
39 | XPath expression may break if the user removes elements referenced in it.
40 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist =
3 | lint,
4 | update-readme,
5 | build,
6 | py39,
7 | py310,
8 | py311,
9 | py312,
10 | py313,
11 | py314,
12 |
13 | [testenv]
14 | parallel_show_output=true
15 | setenv =
16 | PYTHONPATH={toxinidir}/tests
17 | PYTHONUNBUFFERED=yes
18 | # Compatible with "tox --parallel" to avoid concurrency
19 | COVERAGE_FILE={toxinidir}/.coverage.{envname}
20 | COVERAGE_CONTEXT={envname}
21 | passenv =
22 | *
23 | deps = -r{toxinidir}/test-requirements.txt
24 | usedevelop = true
25 | commands =
26 | pytest --cov-append -s --cov --cov-report=term-missing --cov-report=html --cov-context=test -vv {posargs:}
27 |
28 | [testenv:cprofile]
29 | setenv =
30 | PROFILING=yes
31 | commands =
32 | pytest -vv -rA {posargs:-k test_profile_checks}
33 |
34 | [testenv:update-readme]
35 | basepython = {env:TOXPYTHON:python3.13}
36 | setenv =
37 | {[testenv]setenv}
38 | BUILD_README=true
39 | usedevelop = true
40 | commands =
41 | {posargs:pytest -svvk test_build_docstring}
42 | deps =
43 | {[testenv]deps}
44 |
45 | [testenv:lint]
46 | skip_install = true
47 | commands =
48 | pre-commit run --all-files --show-diff-on-failure --color=always
49 |
50 | [testenv:build]
51 | skip_install = true
52 | deps =
53 | {[testenv]deps}
54 | commands =
55 | python -m build --sdist --wheel --outdir dist_wo_pbr/
56 | python -c "import shutil;shutil.rmtree('dist/', ignore_errors=True)"
57 | python -m build --no-isolation --sdist --wheel --outdir dist/ # Generate ChangeLog with pbr
58 | python -m twine check --strict dist/*
59 | bump2version patch --allow-dirty --no-commit --no-tag --dry-run --verbose
60 | # Install packages from binaries to test if all files were already included in the compressed file
61 | python -c '''import sys,pip,os,glob;os.chdir("dist");sys.argv = ["", "install", "-U", "--force-reinstall", glob.glob("*.tar.gz")[-1], "--use-feature=no-binary-enable-wheel-cache"];pip.main()'''
62 | # Testing the package is importing the dependencies well
63 | python -c '''import sys, os;from oca_pre_commit_hooks import cli,cli_po;os.chdir("dist");sys.argv = ["", "--list-msgs", "--no-exit"];cli.main();cli_po.main()'''
64 |
65 | [testenv:clean]
66 | commands = coverage erase
67 | skip_install = true
68 | deps = coverage
69 |
--------------------------------------------------------------------------------
/test_repo/test_module/website_templates.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
147 |
148 |
149 |
150 |
151 |
152 |
--------------------------------------------------------------------------------
/test_repo/broken_module/i18n/ar_unicode.po:
--------------------------------------------------------------------------------
1 | # Translation of OpenERP Server.
2 | # This file contains the translation of the following modules:
3 | # * base_report_designer
4 | #
5 | msgid ""
6 | msgstr ""
7 | "Project-Id-Version: OpenERP Server 6.0dev\n"
8 | "Report-Msgid-Bugs-To: support@openerp.com\n"
9 | "POT-Creation-Date: 2012-12-21 17:05+0000\n"
10 | "PO-Revision-Date: 2012-12-01 17:32+0000\n"
11 | "Last-Translator: gehad shaat \n"
12 | "Language-Team: \n"
13 | "MIME-Version: 1.0\n"
14 | "Content-Type: text/plain; charset=UTF-8\n"
15 | "Content-Transfer-Encoding: 8bit\n"
16 | "X-Launchpad-Export-Date: 2014-04-22 07:02+0000\n"
17 | "X-Generator: Launchpad (build 16985)\n"
18 |
19 | #. module: base_report_designer
20 | #: model:ir.model,name:base_report_designer.model_base_report_sxw
21 | msgid "base.report.sxw"
22 | msgstr "base.report.sxw"
23 |
24 | #. module: base_report_designer
25 | #: view:base_report_designer.installer:0
26 | msgid "OpenERP Report Designer Configuration"
27 | msgstr "إعدادت OpenERP لتصميم التقارير"
28 |
29 | #. module: base_report_designer
30 | #: view:base_report_designer.installer:0
31 | msgid ""
32 | "This plug-in allows you to create/modify OpenERP Reports into OpenOffice "
33 | "Writer."
34 | msgstr "يسمح لك سد العجز بإنشاء/تعديل تقارير OpenERP إلى محرر .OpenOffice"
35 |
36 | #. module: base_report_designer
37 | #: view:base.report.sxw:0
38 | msgid "Upload the modified report"
39 | msgstr "تحميل تقرير معدل"
40 |
41 | #. module: base_report_designer
42 | #: view:base.report.file.sxw:0
43 | msgid "The .SXW report"
44 | msgstr "تقرير SXW"
45 |
46 | #. module: base_report_designer
47 | #: model:ir.model,name:base_report_designer.model_base_report_designer_installer
48 | msgid "base_report_designer.installer"
49 | msgstr "base_report_designer.installer"
50 |
51 | #. module: base_report_designer
52 | #: model:ir.model,name:base_report_designer.model_base_report_rml_save
53 | msgid "base.report.rml.save"
54 | msgstr "base.report.rml.save"
55 |
56 | #. module: base_report_designer
57 | #: view:base_report_designer.installer:0
58 | msgid "Configure"
59 | msgstr "تهيئة"
60 |
61 | #. module: base_report_designer
62 | #: view:base_report_designer.installer:0
63 | msgid "title"
64 | msgstr "الاسم"
65 |
66 | #. module: base_report_designer
67 | #: field:base.report.file.sxw,report_id:0
68 | #: field:base.report.sxw,report_id:0
69 | msgid "Report"
70 | msgstr "تقرير"
71 |
72 | #. module: base_report_designer
73 | #: view:base.report.rml.save:0
74 | msgid "The RML Report"
75 | msgstr "تقرير RML"
76 |
77 | #. module: base_report_designer
78 | #: model:ir.ui.menu,name:base_report_designer.menu_action_report_designer_wizard
79 | msgid "Report Designer"
80 | msgstr "مصمم تقارير"
81 |
82 | #. module: base_report_designer
83 | #: field:base_report_designer.installer,name:0
84 | msgid "File name"
85 | msgstr "اسم الملف"
86 |
87 | #. module: base_report_designer
88 | #: view:base.report.file.sxw:0
89 | #: view:base.report.sxw:0
90 | msgid "Get a report"
91 | msgstr "احصل علي التقرير"
92 |
93 | #. module: base_report_designer
94 | #: view:base_report_designer.installer:0
95 | #: model:ir.actions.act_window,name:base_report_designer.action_report_designer_wizard
96 | msgid "OpenERP Report Designer"
97 | msgstr "مصمم التقارير لـ Openerp"
98 |
99 | #. module: base_report_designer
100 | #: view:base.report.sxw:0
101 | msgid "Continue"
102 | msgstr "تابع"
103 |
104 | #. module: base_report_designer
105 | #: field:base.report.rml.save,file_rml:0
106 | msgid "Save As"
107 | msgstr "حفظ كـ"
108 |
109 | #. module: base_report_designer
110 | #: help:base_report_designer.installer,plugin_file:0
111 | msgid ""
112 | "OpenObject Report Designer plug-in file. Save as this file and install this "
113 | "plug-in in OpenOffice."
114 | msgstr ""
115 | "يسد تصميم التقرير OpenObject العجز في الملف. احفظ كهذا الملف وثبت سد العجز "
116 | "فيOpenOffice."
117 |
118 | #. module: base_report_designer
119 | #: view:base.report.rml.save:0
120 | msgid "Save RML FIle"
121 | msgstr "حفظ كملف RML"
122 |
123 | #. module: base_report_designer
124 | #: field:base.report.file.sxw,file_sxw:0
125 | #: field:base.report.file.sxw,file_sxw_upload:0
126 | msgid "Your .SXW file"
127 | msgstr "ملفك SXW"
128 |
129 | #. module: base_report_designer
130 | #: view:base_report_designer.installer:0
131 | msgid "Installation and Configuration Steps"
132 | msgstr "خطوات التثبيت و التهيئة"
133 |
134 | #. module: base_report_designer
135 | #: field:base_report_designer.installer,description:0
136 | msgid "Description"
137 | msgstr "وصف"
138 |
139 | #. module: base_report_designer
140 | #: view:base.report.file.sxw:0
141 | msgid ""
142 | "This is the template of your requested report.\n"
143 | "Save it as a .SXW file and open it with OpenOffice.\n"
144 | "Don't forget to install the OpenERP SA OpenOffice package to modify it.\n"
145 | "Once it is modified, re-upload it in OpenERP using this wizard."
146 | msgstr ""
147 | "هذا هو قالب التقرير المطلوب.\n"
148 | "قم بحفظه كملف SXW و افتحه ببرنامج LibreOffice.\n"
149 | "لا تنسي أن تقوم بتثبيت باقة Openerp لتستطيع التحرير و التعديل.\n"
150 | "حين الإنتهاء يمكنك إعادة تحميله مرة أخرة بإستخدام نفس المعالج."
151 |
152 | #. module: base_report_designer
153 | #: model:ir.actions.act_window,name:base_report_designer.action_view_base_report_sxw
154 | msgid "Base Report sxw"
155 | msgstr "التقرير الأساسي كـ SXW"
156 |
157 | #. module: base_report_designer
158 | #: model:ir.model,name:base_report_designer.model_base_report_file_sxw
159 | msgid "base.report.file.sxw"
160 | msgstr "base.report.file.sxw"
161 |
162 | #. module: base_report_designer
163 | #: field:base_report_designer.installer,plugin_file:0
164 | msgid "OpenObject Report Designer Plug-in"
165 | msgstr "أداة OpenObject لتصميم التقارير"
166 |
167 | #. module: base_report_designer
168 | #: model:ir.actions.act_window,name:base_report_designer.action_report_designer_installer
169 | msgid "OpenERP Report Designer Installation"
170 | msgstr "تثبيت مصمم التقارير لـ Openerp"
171 |
172 | #. module: base_report_designer
173 | #: view:base.report.sxw:0
174 | msgid "Cancel"
175 | msgstr "إلغاء"
176 |
177 | #. module: base_report_designer
178 | #: view:base.report.sxw:0
179 | msgid "or"
180 | msgstr "أو"
181 |
182 | #. module: base_report_designer
183 | #: model:ir.model,name:base_report_designer.model_ir_actions_report_xml
184 | msgid "ir.actions.report.xml"
185 | msgstr "ir.actions.report.xml"
186 |
187 | #. module: base_report_designer
188 | #: view:base.report.sxw:0
189 | msgid "Select your report"
190 | msgstr "اختر التقرير"
191 |
192 | #~ msgid "Introduction"
193 | #~ msgstr "المقدمة"
194 |
195 | #~ msgid "_Close"
196 | #~ msgstr "إ_غلاق"
197 |
198 | #~ msgid "Configuration Progress"
199 | #~ msgstr "سير الإعدادات"
200 |
201 | #~ msgid "Skip"
202 | #~ msgstr "تخطي"
203 |
204 | #~ msgid "Image"
205 | #~ msgstr "صورة"
206 |
207 | #~ msgid "Report designer interface module"
208 | #~ msgstr "برنامج واجهة تصميم التقارير"
209 |
210 | #~ msgid "The RML report"
211 | #~ msgstr "تقرير RML"
212 |
213 | #~ msgid ""
214 | #~ "\n"
215 | #~ "This module is used along with OpenERP OpenOffice plugin.\n"
216 | #~ "You have to first install the plugin which is available at\n"
217 | #~ "http://www.openerp.com\n"
218 | #~ "\n"
219 | #~ "This module adds wizards to Import/Export .sxw report that\n"
220 | #~ "you can modify in OpenOffice.Once you have modified it you can\n"
221 | #~ "upload the report using the same wizard.\n"
222 | #~ msgstr ""
223 | #~ "\n"
224 | #~ "وتستخدم هذه الوحدة جنبا إلى جنب مع البرنامج المساعد OpenERP أوبن أوفيس.\n"
225 | #~ "عليك أولا تثبيت البرنامج المساعد والذي يتوفر في\n"
226 | #~ "http://www.openerp.com\n"
227 | #~ "\n"
228 | #~ "تضيف هذه الوحدة المعالجات إلى تقرير الاستيراد / التصدير. sxw إلى أن\n"
229 | #~ "يمكنك التعديل في OpenOffice.Once عندما تقوم بتعديل ذلك يمكنك\n"
230 | #~ "رفع التقرير باستخدام المعالج نفسه.\n"
231 |
--------------------------------------------------------------------------------
/src/oca_pre_commit_hooks/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | import shutil
4 | import subprocess
5 | import sys
6 | import tempfile
7 | from ast import literal_eval
8 | from contextlib import contextmanager
9 | from functools import lru_cache
10 | from inspect import getmembers, isfunction
11 | from itertools import chain
12 | from pathlib import Path
13 |
14 | from fixit.config import collect_rules, parse_rule
15 | from fixit.ftypes import Config
16 | from packaging.version import InvalidVersion, Version
17 |
18 | from oca_pre_commit_hooks.base_checker import BaseChecker
19 |
20 | CHECKS_DISABLED_REGEX = re.compile(re.escape("oca-hooks:disable=") + r"([a-z\-,]+)")
21 | DEPRECATED_CHECKS_DISABLED_REGEX = re.compile(re.escape("pylint:disable=") + r"([a-z\-,]+)")
22 | RE_CHECK_DOCSTRING = r"\* Check (?P[\w|\-]+)"
23 |
24 |
25 | def checks_disabled(comment):
26 | comment_strip = comment.replace("\n", "").replace(" ", "").replace("#", "")
27 | check_disable_match = CHECKS_DISABLED_REGEX.search(comment_strip)
28 | check_deprecated_disable_match = DEPRECATED_CHECKS_DISABLED_REGEX.search(comment_strip)
29 |
30 | match = check_disable_match or check_deprecated_disable_match
31 | use_deprecate = bool(check_deprecated_disable_match)
32 | if not match:
33 | return [], False
34 |
35 | return match.groups()[0].split(","), use_deprecate
36 |
37 |
38 | def only_required_for_checks(*checks):
39 | """Decorator to store checks that are handled by a checker method as an
40 | attribute of the function object.
41 |
42 | This information is used to decide whether to call the decorated
43 | method or not. If none of the checks is enabled, the method will be skipped.
44 | """
45 |
46 | def store_checks(func):
47 | setattr(func, "checks", set(checks)) # noqa: B010
48 | return func
49 |
50 | return store_checks
51 |
52 |
53 | def only_required_for_installable():
54 | """Decorator to store checks that are handled by a checker method as an
55 | attribute of the function object.
56 |
57 | This information is used to decide whether to call the decorated
58 | method or not. If the module is not installabe, the method will be skipped.
59 | """
60 |
61 | def store_installable(func):
62 | setattr(func, "installable", True) # noqa: B010
63 | return func
64 |
65 | return store_installable
66 |
67 |
68 | def getattr_checks(obj_or_class: BaseChecker, prefix="check_", disable_node=None):
69 | """Get all the attributes callables (methods)
70 | that start with word 'def check_*'
71 | Skip the methods with attribute "checks" defined if
72 | the check is not enable or if it is disabled"""
73 | for attr in dir(obj_or_class):
74 | if not callable(getattr(obj_or_class, attr)) or not attr.startswith(prefix):
75 | continue
76 | meth = getattr(obj_or_class, attr)
77 | meth_checks = getattr(meth, "checks", set())
78 | if meth_checks and not any(
79 | obj_or_class.is_message_enabled(meth_check, disable_node) for meth_check in meth_checks
80 | ):
81 | continue
82 | meth_installable = getattr(meth, "installable", None)
83 | is_module_installable = getattr(obj_or_class, "is_module_installable", None)
84 | if (
85 | meth_installable is not None
86 | and is_module_installable is not None
87 | and meth_installable
88 | and not is_module_installable
89 | ):
90 | continue
91 | yield getattr(obj_or_class, attr)
92 |
93 |
94 | @contextmanager
95 | def chdir(directory):
96 | """Change the current directory similar to command 'cd directory'
97 | but remembering the previous value to be revert at final
98 | Similar to run 'original_dir=$(pwd) && cd odoo && cd ${original_dir}'
99 | """
100 | original_dir = os.getcwd()
101 | os.chdir(directory)
102 | try:
103 | yield
104 | finally:
105 | os.chdir(original_dir)
106 |
107 |
108 | @lru_cache(maxsize=256)
109 | def top_path(path):
110 | """Get the top level path based on git
111 | If no git repository is found (and therefore no top level path), the user's HOME is returned.
112 |
113 | It is using lru_cache in order to re-use top level path values
114 | if multiple files are sharing the same path
115 |
116 | Notice it is not compatible with TemporaryDirectory since that it needs to have a .git folder
117 | but you can fix it using "git init"
118 | """
119 | try:
120 | with chdir(path):
121 | return (
122 | subprocess.check_output(["git", "rev-parse", "--show-toplevel"], stderr=subprocess.STDOUT)
123 | .decode(sys.stdout.encoding)
124 | .strip()
125 | )
126 | except (FileNotFoundError, subprocess.CalledProcessError):
127 | path = Path(path)
128 | return path.root or Path.home()
129 |
130 |
131 | def full_norm_path(path):
132 | """Expand paths in all possible ways"""
133 | return os.path.normpath(os.path.realpath(os.path.abspath(os.path.expanduser(os.path.expandvars(path.strip())))))
134 |
135 |
136 | @lru_cache(maxsize=256)
137 | def walk_up(path, filenames, top):
138 | """Look for "filenames" walking up in parent paths of "path"
139 | but limited only to "top" path
140 | """
141 | if full_norm_path(path) == full_norm_path(top):
142 | return None
143 | for filename in filenames:
144 | path_filename = os.path.join(path, filename)
145 | if os.path.isfile(full_norm_path(path_filename)):
146 | return path_filename
147 | return walk_up(os.path.dirname(path), filenames, top)
148 |
149 |
150 | def get_checks_docstring(check_classes):
151 | checks_docstring = ""
152 | checks_found = set()
153 | for check_class in check_classes:
154 | check_meths = chain(
155 | [member[1] for member in getmembers(check_class, predicate=isfunction) if member[0].startswith("check")],
156 | [member[1] for member in getmembers(check_class, predicate=isfunction) if member[0].startswith("visit")],
157 | )
158 | # Sorted to avoid mutable checks order readme
159 | check_meths = sorted(
160 | list(check_meths), key=lambda m: m.__name__.replace("visit", "", 1).replace("check", "", 1).strip("_")
161 | )
162 | for check_meth in check_meths:
163 | if not check_meth or not check_meth.__doc__ or "* Check" not in check_meth.__doc__:
164 | continue
165 | checks_docstring += "\n" + check_meth.__doc__.strip(" \n") + "\n"
166 | checks_found |= set(re.findall(RE_CHECK_DOCSTRING, checks_docstring))
167 | checks_docstring = re.sub(r"( )+\*", "*", checks_docstring)
168 | rule = parse_rule(
169 | ".checks_odoo_module_fixit",
170 | Path(__file__).resolve().parent,
171 | )
172 | if "ChecksOdooModule" in [check_class.__name__ for check_class in check_classes]:
173 | checks_docstring += "\n** Special fixit checks\n"
174 | lint_rules = collect_rules(Config(enable=[rule], disable=[], python_version=None))
175 | for lint_rule in sorted(lint_rules, key=lambda r: r.name):
176 | checks_found |= {lint_rule.name}
177 | rule_doc = lint_rule.__doc__.strip("\n ")
178 | checks_docstring += f"\n* Check {lint_rule.name}\n{rule_doc}\n"
179 | return checks_found, checks_docstring
180 |
181 |
182 | def str2version(version_str):
183 | try:
184 | return Version(version_str)
185 | except (InvalidVersion, TypeError):
186 | return None
187 |
188 |
189 | def manifest_version(manifest_path):
190 | with open(manifest_path, encoding="utf-8") as manifest_fd:
191 | try:
192 | manifest = literal_eval(manifest_fd.read())
193 | except (ValueError, SyntaxError):
194 | return None
195 | return str2version(manifest.get("version"))
196 |
197 |
198 | def perform_fix(file_path, new_content):
199 | """Perform the fix by overwriting the file with the new content
200 | using a temp file to copy after."""
201 | # Use `delete=False` to be able to copy the file on Windows
202 | with tempfile.NamedTemporaryFile("wb", delete=False) as f_tmp:
203 | f_tmp.write(new_content)
204 | shutil.copy(f_tmp.name, file_path)
205 | os.unlink(f_tmp.name)
206 |
--------------------------------------------------------------------------------
/tests/test_checks.py:
--------------------------------------------------------------------------------
1 | # pylint: disable=duplicate-code,useless-suppression
2 | import glob
3 | import os
4 | import re
5 | import subprocess
6 | import sys
7 | import unittest
8 |
9 | import oca_pre_commit_hooks
10 | from . import common
11 |
12 | ALL_CHECK_CLASS = [
13 | oca_pre_commit_hooks.checks_odoo_module.ChecksOdooModule,
14 | oca_pre_commit_hooks.checks_odoo_module_csv.ChecksOdooModuleCSV,
15 | oca_pre_commit_hooks.checks_odoo_module_xml.ChecksOdooModuleXML,
16 | ]
17 |
18 |
19 | EXPECTED_ERRORS = {
20 | "csv-duplicate-record-id": 1,
21 | "csv-syntax-error": 1,
22 | "file-not-used": 1,
23 | "manifest-superfluous-key": 3,
24 | "manifest-syntax-error": 2,
25 | "prefer-env-translation": 41,
26 | "xml-create-user-wo-reset-password": 1,
27 | "xml-dangerous-qweb-replace-low-priority": 9,
28 | "xml-deprecated-data-node": 8,
29 | "xml-deprecated-openerp-node": 4,
30 | "xml-deprecated-qweb-directive": 2,
31 | "xml-deprecated-tree-attribute": 3,
32 | "xml-duplicate-fields": 3,
33 | "xml-duplicate-record-id": 2,
34 | "xml-not-valid-char-link": 2,
35 | "xml-redundant-module-name": 2,
36 | "xml-syntax-error": 2,
37 | "xml-view-dangerous-replace-low-priority": 7,
38 | "xml-xpath-translatable-item": 4,
39 | "xml-oe-structure-missing-id": 6,
40 | "xml-record-missing-id": 2,
41 | "xml-duplicate-template-id": 9,
42 | "xml-header-missing": 1,
43 | "xml-header-wrong": 19,
44 | "xml-id-position-first": 3,
45 | "xml-deprecated-oe-chatter": 1,
46 | }
47 |
48 |
49 | class TestChecks(common.ChecksCommon):
50 |
51 | def setUp(self):
52 | super().setUp()
53 | self.file_paths = glob.glob(os.path.join(self.test_repo_path, "*", "__openerp__.py")) + glob.glob(
54 | os.path.join(self.test_repo_path, "*", "__manifest__.py")
55 | )
56 | self.checks_run = oca_pre_commit_hooks.checks_odoo_module.run
57 | self.checks_cli_main = oca_pre_commit_hooks.cli.main
58 | self.expected_errors = EXPECTED_ERRORS.copy()
59 |
60 | @unittest.skipIf(not os.environ.get("BUILD_README"), "BUILD_README environment variable not enabled")
61 | def test_build_docstring(self):
62 | # Run "tox -e update-readme"
63 | # Why this here?
64 | # The unittest are isolated using "tox" virtualenv with all test-requirements installed
65 | # and latest dev version of the package instead of using the
66 | # already installed in the OS (without latest dev changes)
67 | # and we do not have way to evaluate all checks are evaluated and documented from another side
68 | # Feel free to migrate to better place this non-standard section of the code
69 |
70 | checks_found, checks_docstring = oca_pre_commit_hooks.utils.get_checks_docstring(ALL_CHECK_CLASS)
71 | readme_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "README.md")
72 | with open(readme_path, encoding="UTF-8") as f_readme:
73 | readme_content = f_readme.read()
74 |
75 | checks_docstring = f"# Checks\n{checks_docstring}"
76 | new_readme = self.re_replace(
77 | "[//]: # (start-checks)", "[//]: # (end-checks)", checks_docstring, readme_content
78 | )
79 |
80 | # Find a better way to get the --help string
81 | help_content = subprocess.check_output(["oca-checks-odoo-module", "--help"], stderr=subprocess.STDOUT).decode(
82 | sys.stdout.encoding
83 | )
84 | help_content = f"# Help\n```bash\n{help_content}\n```"
85 | # remove extra spaces
86 | help_content = re.sub(r"\n( )+", " ", help_content)
87 | help_content = re.sub(r"( )+", " ", help_content)
88 | new_readme = self.re_replace("[//]: # (start-help)", "[//]: # (end-help)", help_content, new_readme)
89 |
90 | all_check_errors = self.checks_run(sorted(self.file_paths), no_exit=True, no_verbose=False)
91 | all_check_errors_by_code = self.get_grouped_errors(all_check_errors)
92 |
93 | version = oca_pre_commit_hooks.__version__
94 | check_example_content = ""
95 | for code in sorted(all_check_errors_by_code):
96 | check_example_content += f"\n\n * {code}\n"
97 | for check_error in sorted(all_check_errors_by_code[code])[:3]:
98 | msg = f"{check_error.position.filepath}"
99 | if check_error.position.line:
100 | msg += f"#L{check_error.position.line}"
101 | if check_error.message:
102 | msg += f" {check_error.message}"
103 | check_example_content += (
104 | f"\n - https://github.com/OCA/odoo-pre-commit-hooks/blob/v{version}/test_repo/{msg}"
105 | )
106 | check_example_content = f"# Examples\n{check_example_content}"
107 | new_readme = self.re_replace(
108 | "[//]: # (start-example)", "[//]: # (end-example)", check_example_content, new_readme
109 | )
110 | with open(readme_path, "w", encoding="UTF-8") as f_readme:
111 | f_readme.write(new_readme)
112 | self.assertEqual(
113 | readme_content,
114 | new_readme,
115 | "The README was updated! Don't panic only failing for CI purposes. Run the same test again.",
116 | )
117 | self.assertFalse(set(self.expected_errors) - checks_found, "Missing docstring of checks tested")
118 |
119 | def test_non_exists_path(self):
120 | all_check_errors = self.checks_run(["/tmp/no_exists"], no_exit=True, no_verbose=False)
121 | self.assertFalse(all_check_errors)
122 |
123 | def test_autofix(self):
124 | # Before autofix
125 | fname_wo_header = os.path.join(self.test_repo_path, "broken_module", "xml_wo_header.xml")
126 | with open(fname_wo_header, "rb") as f_wo_header:
127 | content = f_wo_header.read()
128 | self.assertFalse(content.strip().startswith(b"',
143 | content,
144 | "The XML wrong xmlid order was previously fixed",
145 | )
146 | self.assertIn(
147 | b'',
148 | content,
149 | "The XML wrong xmlid order and redundant module name was previously fixed",
150 | )
151 |
152 | fname_redundant_module_name = os.path.join(self.test_repo_path, "broken_module", "model_view2.xml")
153 | with open(fname_redundant_module_name, "rb") as f_redundant_module_name:
154 | content = f_redundant_module_name.read()
155 | self.assertIn(
156 | b'',
157 | content,
158 | "The XML wrong redundant module name was previously fixed",
159 | )
160 |
161 | self.checks_run(self.file_paths, autofix=True, no_exit=True, no_verbose=False)
162 |
163 | # After autofix
164 | with open(fname_wo_header, "rb") as f_wo_header:
165 | content = f_wo_header.read()
166 | self.assertTrue(content.strip().startswith(b"', content, "The XML wrong xmlid was not fixed"
178 | )
179 | self.assertIn(
180 | b'',
181 | content,
182 | "The XML wrong xmlid order and redundant module name was not fixed",
183 | )
184 |
185 | with open(fname_redundant_module_name, "rb") as f_redundant_module_name:
186 | content = f_redundant_module_name.read()
187 | self.assertIn(
188 | b'',
189 | content,
190 | "The XML wrong redundant module name was not fixed",
191 | )
192 |
--------------------------------------------------------------------------------
/docs/dev.md:
--------------------------------------------------------------------------------
1 | # Development
2 | This file documents the purpose and architecture of programs contained in this repository.
3 | It is meant to serve as an introduction to new developers and a guideline for development. It is expected that
4 | new code will abide to these guidelines.
5 |
6 | ## Purpose
7 | Provide linters that complement [`pylint-odoo`](https://github.com/OCA/pylint-odoo). `pylint-odoo` is focused on
8 | checking Python source code, which means linters in this project focus solely on the following types of files:
9 |
10 | * csv
11 | * po
12 | * xml
13 |
14 | As the name of the repository implies, all linters in this project must be compatible with
15 | [`pre-commit`](https://pre-commit.com/). This means **all linters must comply with the following points:
16 |
17 | * provide an entrypoint in the form of an executable which takes (at least) filenames as arguments
18 | * nonzero exit on failure
19 |
20 | A linter is considered to have failed once it emits a message.
21 |
22 | ## Nomenclature
23 | * Linter: An executable (entry point) which runs checks and emits messages.
24 | * Check: A routine inside a linter, verifies the source code conforms to a certain rule (or rules).
25 | * Message: **Generated by checks** when a rule is not met. **Emitted by linters**.
26 |
27 | Why is this last point important? Checks must be self-contained units which perform a single task: verify the source
28 | conforms to the rule it checks for. This improves testability.
29 |
30 | ## Checks
31 | Checks are run by linters. The nature of checks can be divided in two categories:
32 |
33 | * Dependent
34 | * Independent
35 |
36 | ### Dependent Checks
37 | These are checks that depend on other files. **The check must process all its dependencies everytime it runs**. Most
38 | times these dependencies consist of all files of the same type inside an
39 | [Odoo module](https://www.odoo.com/documentation/16.0/developer/howtos/backend.html). An example of a dependent
40 | check is `xml-duplicate-record-id`.
41 |
42 | This check must read all `` tags inside a module and verify none of them share the
43 | same id (``), `` tags are found in **xml** files, so this check **must read all xml files in
44 | a module**.
45 |
46 | ### Independent Checks
47 | These checks have no dependencies. They only need to process the very file they are checking. An example would be
48 | `xml-deprecated-node`. This check ensures there are no `` tags in xml files.
49 |
50 | ## Messages
51 | Messages are generated by checks, but emitted by linters. They alert the user about failure to meet one of the
52 | rules verified by checks. They are a standardized in a dataclass. **All compliant checks must only generate
53 | messages in their standardized format (see Architecture Specs).
54 |
55 | ## Linters
56 | Linters are programs executed by the user to run various checks. They are separated based on the types of checks they
57 | run. **Linters must contain only one type of check (dependent or independent).**
58 |
59 | > **Warning**
60 | > Running independent and dependent checks inside the same linter will nullify one feature of `pre-commit`: performing
61 | > checks only on files which have been modified.
62 |
63 | Linters are composed of five elements:
64 |
65 | * **Frontend**: Command line executable. **In charge of parsing all user input**. User input not only consists of
66 | command line arguments, but also environment variables and configuration files. Said configuration files may not be
67 | explicitly stated by the user and may reside in default locations. It needs to normalize all user input into a common
68 | format for the next component, the *scheduler*.
69 | * **Scheduler**: Gathers all checks for the linter and runs the *necessary* checks in an orderly manner. Note the word
70 | *necessary*. Certain messages may be disabled
71 | (either trough command line arguments, environment variables or a configuration file). The scheduler must detect this
72 | cases and stop the checks that generate said messages from running.
73 | * **Checks**: Self contained function which takes a list of arguments and if necessary generates messages.
74 | * **Message Stack**: Stores all messages generated during the execution of checks.
75 | * **Printer**: Reads messages from the message stack and generates the corresponding output. Generally in the form
76 | of text to `stdout`.
77 |
78 | ```mermaid
79 | flowchart TD;
80 | start([Executable is run]) --> frontend[/Parse user's arguments. Read environment variables and config files/]
81 | subgraph _______________Frontend
82 | frontend --> frontendProc(Normalize user input into standard configuration)
83 | frontendProc --> scheduler(Start Scheduler with normalized config)
84 | end
85 | subgraph ______________Scheduler
86 | scheduler --> check(Run Check)
87 | check --> checkIf{Any more checks?}
88 | checkIf -->|Yes| check
89 | end
90 | subgraph ____________Printer
91 | checkIf -->|No| msgIf{Are there any messages in the Message Stack?}
92 | msgIf -->|Yes| printer[/Show messages to user/]
93 | end
94 | msgIf -->|No| zeroExit([Exit with return code = 0])
95 | printer --> nonZeroExit([Exit with return code != 0])
96 | ```
97 |
98 | ## Architecture Specs
99 | Based on the components and their definitions mentioned below the following architecture is proposed. It tries to be
100 | as standard and strict as possible in order to streamline development and reduce the creation of
101 | technical debt, but provides means to customize behavior if necessary (for those edge cases)
102 |
103 | *Note: The following code examples have been reduced as they are meant as grand scheme examples and not a detailed
104 | implementation.*
105 |
106 | ### Frontend
107 | The main task for the frontend is parsing user's configuration and normalizing into a common format for the Scheduler.
108 | To enforce the format, it has been declared into a dataclass.
109 |
110 | In the case of dependent linters, `filenames` shall consist solely of `__manifest__.py` files. The manifest file
111 | states all source code files (outside of Python) and lets the scheduler calculate any dependencies which need to be
112 | checked.
113 |
114 | For independent linters, `filenames` consists of all files to be checked. Ideally these will only be those changed
115 | by the current commit.
116 |
117 | ### Scheduler and Lifecycle
118 | As stated before, the scheduler must run all necessary checks. Besides running checks it also needs to run
119 | the following "Lifecycle methods":
120 |
121 | * `check_loop` - Runs all checks in the linter against a file.
122 | * `on_global_open` - Runs just before checks start being run.
123 | * `on_global_close` - Runs after all checks have run. The scheduler will yield control of the program after calling this method.
124 | * `on_open` - Runs before a check loop starts.
125 | * `on_close` - Runs after a check loop. The next check loop (if any) will be run after this
126 |
127 | ### Message Stack
128 | Stores all messages generated by checks. Must implement (at least) the following method:
129 |
130 | * `store_message` - Stores a new message into the stack.
131 |
132 | ### Printer
133 | Produces output for the user to see. This output should contain all messages generated by checks. Alternatively,
134 | the user can choose to display messages that can be generated by the linter instead of linting files. The printer
135 | must produce said output too (message descriptions).
136 |
137 | ### Abstract Super Class
138 | In order to enforce the structure and flow described in this document an abstract class is used. **All
139 | compliant linters must be subclasses of it**. In order to keeps thing simple the
140 | linter is encompassed in just one class, the five components are just a mental construct used to organize concerns,
141 | in practice they are all implemented in the linter.
142 |
143 | ```python
144 | from abc import ABCMeta, abstractmethod
145 | from argparse import ArgumentParser
146 | from dataclasses import dataclass
147 | from typing import Mapping
148 |
149 |
150 | @dataclass
151 | class Message:
152 | key: str # Contains the message-id, for example xml-duplicate-record-id
153 | file: str # Absolute path to the file which committed the violation
154 | args: tuple = tuple() # Extra arguments used by the Printer to format the message
155 | # The following values won't be shown to the user if they are negative.
156 | line: int = -1 # Optional. Line in the file where the violation was found.
157 | column: int = -1 # Optional. Column where the violation happened.
158 |
159 |
160 | @dataclass(frozen=True)
161 | class SchedulerConfiguration:
162 | filenames: list[str] # Files to run the checker on.
163 | enable: list[str] # All enabled messages.
164 | disable: list[str] # All disabled messages.
165 | list_msgs: bool = False # Do not run any checks. Just print out the messages this linter emits.
166 | zero_exit: bool = False # If true the linter will always produce 0 as a return code.
167 | kwargs: dict = None # Extra arguments which can be used for customization. Implementation dependent.
168 |
169 |
170 | class AbstractLinter(ArgumentParser, metaclass=ABCMeta):
171 | _messages: Mapping[str, str] = None
172 |
173 | def __init__(self):
174 | self.add_argument("filenames")
175 | self.add_argument("--enable")
176 | self.add_argument("--disable")
177 | self.add_argument("--zero-exit")
178 |
179 | # Lifecycle
180 | def on_global_open(self): pass
181 | def on_global_close(self): pass
182 | def on_open(self): pass
183 | def on_close(self): pass
184 |
185 | @abstractmethod
186 | def get_exit_status(self) -> int:
187 | pass
188 |
189 | # Entrypoint (this runs when the user calls the executable)
190 | def run(self, args) -> int:
191 | # Frontend is called
192 | config = self.generate_config(args)
193 | if config.list_msgs:
194 | self.print_message_descriptions()
195 | return 0
196 |
197 | # Scheduler starts with a call to _on_global_open
198 | self.on_global_open()
199 | # Files get linted. A check loop is run per file.
200 | for file in config.filenames:
201 | self.perform_check_run(file)
202 | # Scheduler ends with a call to _on_global_close
203 | self.on_global_close()
204 |
205 | self.print_generated_messages()
206 |
207 | return self.get_exit_status()
208 |
209 | # Frontend
210 | @abstractmethod
211 | def generate_config(self, args) -> SchedulerConfiguration:
212 | pass
213 |
214 | # Scheduler
215 | @abstractmethod
216 | def _check_loop(self, file: str):
217 | pass
218 |
219 | def perform_check_run(self, file: str):
220 | self.on_open()
221 | self._check_loop(file)
222 | self.on_close()
223 |
224 | # Message Stack
225 | def add_message(self, message: Message):
226 | if message.key not in self._messages:
227 | raise ValueError(f"Message type ${message.key} is not generated by this linter.")
228 |
229 | self._store_message(message)
230 |
231 | @abstractmethod
232 | def _store_message(self, message: Message):
233 | pass
234 |
235 | # Printer
236 | @abstractmethod
237 | def print_generated_messages(self):
238 | pass
239 |
240 | @abstractmethod
241 | def print_message_descriptions(self):
242 | pass
243 | ```
244 |
245 | ## pre-commit Integration
246 | [pre-commit](https://pre-commit.com/) is a tool that integrates with Git to automatically run checks before
247 | committing changes. It uses Git Hooks to accomplish this. Two important features are `require_serial` and the ability
248 | to run lints on only files that have been changed.
249 |
250 | `require_serial` lets linters run in parallel processes. This speeds up the linting process and is generally a nice
251 | thing to have. Linting the minimum amount of files needed (only those that have changed) is also a great benefit as it
252 | reduces CPU usage/time (and saves money).
253 |
254 | ### Independent Linters
255 | All compliant Independent Linters can and should run (if possible) as parallel processes. As of
256 | writing this, [it is this way by default](), so no extra configuration needs to be performed. They should also lint
257 | only files of their type that have been modified. This is also the default.
258 |
259 | A sample configuration for this type of linter could be:
260 | ```yaml
261 | # .pre-commit-hooks.yaml
262 | - id: independent-csv-checker
263 | name: check csv files
264 | entry: csv-checker
265 | types: [csv]
266 | ```
267 |
268 | ### Dependent Linters
269 | Dependent Linters can't run in parallel processes, since they need the same context in order to detect stuff like
270 | duplicate IDs. This means their entry in `.pre-commit-hooks.yaml` must contain `require_serial: true`.
271 |
272 | They can run only when files of their type (csv, po, xml) are changed. For example, there is no sense
273 | in running an XML Linter (and looking for duplicated ids) if only .po or .csv files were changed.
274 | Using the same example, they do have to run if just a single XML file has been changed, and they need to run on
275 | all XML files *in the module*.
276 |
277 | Emphasis is on *module*, if `views.xml` from the module `sale_management` is changed, there is no need to check files
278 | in other modules. Context is limited to the module in this case. A sample configuration for this type of
279 | linter would be:
280 |
281 | ```yaml
282 | # .pre-commit-hooks.yaml
283 | - id: dependent-xml-checker
284 | name: check xml files
285 | require_serial: true
286 | entry: xml-checker
287 | types: [xml]
288 | ```
289 |
290 | Following the example before, if `views.xml` is changed, `pre-commit` will pass it to the executable as part of
291 | the `filenames` argument. Initially the dependent linter will only have a xml file in its filenames
292 | (it needs all xml files in said module). This is where the flexibility provided by separating concerns comes into
293 | place.
294 |
295 | The Frontend receives `views.xml` as the only file. This Frontend has been written for Dependent Linters, so it knows
296 | it needs all XML files. Therefore, it finds the module `views.xml` belongs to, reads it `__manifest__.py` and
297 | generates a `SchedulerConfiguration` whose `filenames` contain all the module's XML files it found. This configuration
298 | is passed to the scheduler and all checks run as expected.
299 |
--------------------------------------------------------------------------------
/tests/common.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | import re
4 | import shutil
5 | import subprocess
6 | import sys
7 | import tempfile
8 | import unittest
9 | from collections import defaultdict
10 | from contextlib import contextmanager
11 | from distutils.dir_util import copy_tree # pylint:disable=deprecated-module
12 |
13 | from oca_pre_commit_hooks import utils
14 | from oca_pre_commit_hooks.global_parser import CONFIG_NAME, DISABLE_ENV_VAR, ENABLE_ENV_VAR
15 |
16 | RND = random.Random(987654321)
17 |
18 |
19 | def assertDictEqual(self, d1, d2, msg=None):
20 | # pylint:disable=invalid-name
21 | """Original method does not show the correct item diff
22 | Using ordered list it is showing the diff better"""
23 | real_dict2list = [(i, d1[i]) for i in sorted(d1)]
24 | expected_dict2list = [(i, d2[i]) for i in sorted(d2)]
25 | self.assertEqual(real_dict2list, expected_dict2list, msg)
26 |
27 |
28 | @contextmanager
29 | def chdir(directory):
30 | original_dir = os.getcwd()
31 | try:
32 | os.chdir(directory)
33 | yield
34 | finally:
35 | os.chdir(original_dir)
36 |
37 |
38 | def create_dummy_repo(src_path, dest_path):
39 | copy_tree(src_path, dest_path)
40 | subprocess.check_call(["git", "init", dest_path, "--initial-branch=main"])
41 |
42 |
43 | class ChecksCommon(unittest.TestCase):
44 | # pylint: disable=no-member
45 | @classmethod
46 | def setUpClass(cls):
47 | super().setUpClass()
48 | cls.maxDiff = None
49 | cls.original_test_repo_path = os.path.join(
50 | os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "test_repo"
51 | )
52 | # For the inherited classes set "compatible_with_directories = True"
53 | # if the checks can run with directory paths
54 | # set False otherwise
55 | # e.g. "*.po" files are not compatible with directories
56 | cls.compatible_with_directories = True
57 |
58 | def setUp(self):
59 | super().setUp()
60 | os.environ.pop(DISABLE_ENV_VAR, None)
61 | os.environ.pop(ENABLE_ENV_VAR, None)
62 | self.tmp_dir = os.path.realpath(tempfile.mkdtemp(suffix="_oca_pre_commit_hooks"))
63 | self.test_repo_path = self.tmp_dir
64 | create_dummy_repo(self.original_test_repo_path, self.tmp_dir)
65 |
66 | def tearDown(self):
67 | super().tearDown()
68 | if os.path.isdir(self.tmp_dir) and self.tmp_dir != "/":
69 | shutil.rmtree(self.tmp_dir, ignore_errors=True)
70 |
71 | @staticmethod
72 | def get_grouped_errors(all_check_errors):
73 | grouped_errors = defaultdict(list)
74 | for check_error in all_check_errors:
75 | grouped_errors[check_error.code].append(check_error)
76 | return grouped_errors
77 |
78 | @staticmethod
79 | def get_count_code_errors(all_check_errors):
80 | grouped_errors = ChecksCommon.get_grouped_errors(all_check_errors)
81 | return {code: len(errors) for code, errors in grouped_errors.items()}
82 |
83 | @staticmethod
84 | def re_replace(sub_start, sub_end, substitution, content):
85 | re_sub = re.compile(rf"^{re.escape(sub_start)}$.*^{re.escape(sub_end)}$", re.M | re.S)
86 | if not re_sub.findall(content):
87 | raise UserWarning("No matched content")
88 | new_content = re_sub.sub(f"{sub_start}\n\n{substitution}\n\n{sub_end}", content)
89 | return new_content
90 |
91 | @unittest.skip("Repeated")
92 | def test_checks_basic(self):
93 | all_check_errors = self.checks_run(self.file_paths, no_exit=True, no_verbose=False)
94 | real_errors = self.get_count_code_errors(all_check_errors)
95 | # Uncommet to get sorted values to update EXPECTED_ERRORS dict
96 | # print("\n".join(f"'{key}':{real_errors[key]}," for key in sorted(real_errors)))
97 | assertDictEqual(self, real_errors, self.expected_errors)
98 |
99 | def test_checks_with_cli(self):
100 | sys.argv = ["", "--no-exit", "--no-verbose"] + self.file_paths
101 | all_check_errors = self.checks_cli_main()
102 | real_errors = self.get_count_code_errors(all_check_errors)
103 | assertDictEqual(self, real_errors, self.expected_errors)
104 |
105 | def test_checks_disable_one_by_one_with_random_cli_env_conf(self):
106 | """Faster way to test disable one by one using random method selection"""
107 | methods = [
108 | getattr(self, name)
109 | for name in dir(self)
110 | if name.startswith("_test_checks_disable_one_by_one") and callable(getattr(self, name))
111 | ]
112 | file_paths = self.file_paths
113 | dir_paths = [os.path.dirname(i) for i in self.file_paths]
114 | for check2disable in self.expected_errors:
115 | os.environ.pop(ENABLE_ENV_VAR, None)
116 | os.environ.pop(DISABLE_ENV_VAR, None)
117 | method = RND.choice(methods)
118 | if self.compatible_with_directories:
119 | self.file_paths = RND.choice([file_paths, dir_paths]) # pylint: disable=attribute-defined-outside-init
120 | method(check2disable)
121 |
122 | def _test_checks_disable_one_by_one(self, check2disable):
123 | expected_errors = self.expected_errors.copy()
124 | all_check_errors = self.checks_run(self.file_paths, no_exit=True, no_verbose=True, disable={check2disable})
125 | expected_errors.pop(check2disable)
126 | real_errors = self.get_count_code_errors(all_check_errors)
127 | assertDictEqual(self, real_errors, expected_errors, f"Disabled only {check2disable}")
128 |
129 | def _test_checks_disable_one_by_one_with_cli(self, check2disable):
130 | expected_errors = self.expected_errors.copy()
131 | sys.argv = ["", "--no-exit", "--no-verbose", f"--disable={check2disable}"] + self.file_paths
132 | all_check_errors = self.checks_cli_main()
133 | expected_errors.pop(check2disable)
134 | real_errors = self.get_count_code_errors(all_check_errors)
135 | assertDictEqual(self, real_errors, expected_errors, f"Disabled only {check2disable}")
136 |
137 | def _test_checks_disable_one_by_one_with_env(self, check2disable):
138 | expected_errors = self.expected_errors.copy()
139 | sys.argv = ["", "--no-exit", "--no-verbose"] + self.file_paths
140 | os.environ[DISABLE_ENV_VAR] = check2disable
141 | all_check_errors = self.checks_cli_main()
142 | expected_errors.pop(check2disable)
143 | real_errors = self.get_count_code_errors(all_check_errors)
144 | assertDictEqual(self, real_errors, expected_errors, f"Disabled only {check2disable}")
145 |
146 | def _test_checks_disable_one_by_one_with_cli_conf_file(self, check2disable):
147 | file_tmpl = "[MESSAGES_CONTROL]\ndisable=%s"
148 | with tempfile.TemporaryDirectory() as tmp_dir:
149 | tmp_fname = os.path.join(tmp_dir, CONFIG_NAME)
150 | with open(tmp_fname, "w", encoding="UTF-8") as temp_fl:
151 | content = file_tmpl % check2disable
152 | temp_fl.write(content)
153 | temp_fl.flush()
154 |
155 | expected_errors = self.expected_errors.copy()
156 | sys.argv = ["", "--no-exit", "--no-verbose", f"--config={temp_fl.name}"] + self.file_paths
157 | all_check_errors = self.checks_cli_main()
158 | expected_errors.pop(check2disable)
159 | real_errors = self.get_count_code_errors(all_check_errors)
160 | self.assertTrue(real_errors == expected_errors, f"Disabled only {check2disable}")
161 |
162 | def test_checks_enable_one_by_one_with_random_cli_env_conf(self):
163 | """Faster way to test enable one by one using random method selection"""
164 | methods = [
165 | getattr(self, name)
166 | for name in dir(self)
167 | if name.startswith("_test_checks_enable_one_by_one") and callable(getattr(self, name))
168 | ]
169 | file_paths = self.file_paths
170 | dir_paths = [os.path.dirname(i) for i in self.file_paths]
171 | for check2enable in self.expected_errors:
172 | os.environ.pop(ENABLE_ENV_VAR, None)
173 | os.environ.pop(DISABLE_ENV_VAR, None)
174 | method = RND.choice(methods)
175 | if self.compatible_with_directories:
176 | self.file_paths = RND.choice([file_paths, dir_paths]) # pylint: disable=attribute-defined-outside-init
177 | method(check2enable)
178 |
179 | def _test_checks_enable_one_by_one(self, check2enable):
180 | all_check_errors = self.checks_run(self.file_paths, no_exit=True, no_verbose=True, enable={check2enable})
181 | real_errors = self.get_count_code_errors(all_check_errors)
182 | assertDictEqual(
183 | self, real_errors, {check2enable: self.expected_errors[check2enable]}, f"Enabled only {check2enable}"
184 | )
185 |
186 | def _test_checks_enable_one_by_one_with_cli(self, check2enable):
187 | sys.argv = ["", "--no-exit", "--no-verbose", f"--enable={check2enable}"] + self.file_paths
188 | all_check_errors = self.checks_cli_main()
189 | real_errors = self.get_count_code_errors(all_check_errors)
190 | assertDictEqual(
191 | self, real_errors, {check2enable: self.expected_errors[check2enable]}, f"Enabled only {check2enable}"
192 | )
193 |
194 | def _test_checks_enable_one_by_one_with_env(self, check2enable):
195 | sys.argv = ["", "--no-exit", "--no-verbose"] + self.file_paths
196 | os.environ[ENABLE_ENV_VAR] = check2enable
197 | all_check_errors = self.checks_cli_main()
198 | real_errors = self.get_count_code_errors(all_check_errors)
199 | assertDictEqual(
200 | self, real_errors, {check2enable: self.expected_errors[check2enable]}, f"Enabled only {check2enable}"
201 | )
202 |
203 | def _test_checks_enable_one_by_one_with_cli_conf_file(self, check2enable):
204 | file_tmpl = "[MESSAGES_CONTROL]\nenable=%s"
205 | with tempfile.TemporaryDirectory() as tmp_dir:
206 | with chdir(tmp_dir): # Should use the configuration file of the current path
207 | tmp_fname = os.path.join(tmp_dir, CONFIG_NAME)
208 | with open(tmp_fname, "w", encoding="UTF-8") as temp_fl:
209 | content = file_tmpl % check2enable
210 | temp_fl.write(content)
211 | temp_fl.flush()
212 |
213 | sys.argv = ["", "--no-exit", "--no-verbose"] + self.file_paths
214 | all_check_errors = self.checks_cli_main()
215 | real_errors = self.get_count_code_errors(all_check_errors)
216 | assertDictEqual(
217 | self,
218 | real_errors,
219 | {check2enable: self.expected_errors[check2enable]},
220 | f"Enabled only {check2enable}",
221 | )
222 |
223 | def test_checks_enable_priority(self):
224 | """Verify enable configuration options have the correct priority. It should be:
225 | 1. --enable/--disable arguments
226 | 2. Environment variables
227 | 3. Configuration files (either trough arguments or by being in default locations (e.g. repo root))
228 | """
229 | expected_errors = list(self.expected_errors.keys())
230 |
231 | cli_check = expected_errors[0]
232 | env_check = expected_errors[1]
233 | conf_check = expected_errors[2]
234 |
235 | os.environ[ENABLE_ENV_VAR] = env_check
236 | with tempfile.TemporaryDirectory() as tmp_dir:
237 | with open(os.path.join(tmp_dir, CONFIG_NAME), "w", encoding="UTF-8") as conf_file:
238 | conf_file.write(f"[MESSAGES_CONTROL]\nenable={conf_check}")
239 | conf_file.flush()
240 |
241 | # First case. Only expect cli_check, it comes first over everything else
242 | sys.argv = [
243 | "",
244 | "--no-exit",
245 | "--no-verbose",
246 | f"--enable={cli_check}",
247 | f"--config={conf_file.name}",
248 | ] + self.file_paths
249 | real_errors = self.get_count_code_errors(self.checks_cli_main())
250 | assertDictEqual(self, real_errors, {cli_check: self.expected_errors[cli_check]})
251 |
252 | # Second case. Only expect env_check, it overwrites whatever is in the config file
253 | sys.argv = ["", "--no-exit", "--no-verbose", f"--config={conf_file.name}"] + self.file_paths
254 | real_errors = self.get_count_code_errors(self.checks_cli_main())
255 | assertDictEqual(self, real_errors, {env_check: self.expected_errors[env_check]})
256 |
257 | # Third case. Expect only conf_check since there is no cli argument or env var
258 | os.environ.pop(ENABLE_ENV_VAR, None)
259 | sys.argv = ["", "--no-exit", "--no-verbose", f"--config={conf_file.name}"] + self.file_paths
260 | real_errors = self.get_count_code_errors(self.checks_cli_main())
261 | assertDictEqual(self, real_errors, {conf_check: self.expected_errors[conf_check]})
262 |
263 | def test_checks_disable_priority(self):
264 | """Verify disable configuration options have the correct priority. It should be:
265 | 1. --enable/--disable arguments
266 | 2. Environment variables
267 | 3. Configuration files (either trough arguments or by being in default locations (e.g. repo root))
268 | """
269 | expected_errors = list(self.expected_errors.keys())
270 |
271 | cli_check = expected_errors[0]
272 | env_check = expected_errors[1]
273 | conf_check = expected_errors[2]
274 |
275 | os.environ[DISABLE_ENV_VAR] = env_check
276 | with tempfile.TemporaryDirectory() as tmp_dir:
277 | with open(os.path.join(tmp_dir, CONFIG_NAME), "w", encoding="UTF-8") as conf_file:
278 | conf_file.write(f"[MESSAGES_CONTROL]\ndisable={conf_check}")
279 | conf_file.flush()
280 |
281 | # First case. Do not expect cli_check, it comes first over everything else
282 | sys.argv = [
283 | "",
284 | "--no-exit",
285 | "--no-verbose",
286 | f"--disable={cli_check}",
287 | f"--config={conf_file.name}",
288 | ] + self.file_paths
289 | real_errors = self.get_count_code_errors(self.checks_cli_main())
290 | expected_errors = self.expected_errors.copy()
291 | expected_errors.pop(cli_check)
292 | assertDictEqual(self, real_errors, expected_errors)
293 |
294 | # Second case. Do not expect env_check, it overwrites whatever is in the config file
295 | sys.argv = ["", "--no-exit", "--no-verbose", f"--config={conf_file.name}"] + self.file_paths
296 | expected_errors = self.expected_errors.copy()
297 | expected_errors.pop(env_check)
298 | real_errors = self.get_count_code_errors(self.checks_cli_main())
299 | assertDictEqual(self, real_errors, expected_errors)
300 |
301 | # Third case. Expect only conf_check since there is no cli argument or env var
302 | os.environ.pop(DISABLE_ENV_VAR, None)
303 | sys.argv = ["", "--no-exit", "--no-verbose", f"--config={conf_file.name}"] + self.file_paths
304 | expected_errors = self.expected_errors.copy()
305 | expected_errors.pop(conf_check)
306 | real_errors = self.get_count_code_errors(self.checks_cli_main())
307 | assertDictEqual(self, real_errors, expected_errors)
308 |
309 | def test_list_messages(self):
310 | all_messages = self.checks_run([], list_msgs=True, no_exit=True, no_verbose=False)
311 | checks_found = re.findall(utils.RE_CHECK_DOCSTRING, all_messages)
312 | self.assertFalse(set(self.expected_errors) - set(checks_found), "Missing list-message of checks")
313 |
314 | @unittest.skipUnless(os.getenv("DEBUG_TEST_CHECK"), "No message to debug was set")
315 | def test_debug_check(self):
316 | check_ut = os.getenv("DEBUG_TEST_CHECK")
317 | if not self.expected_errors.get(check_ut):
318 | return
319 |
320 | real_errors = self.get_count_code_errors(self.checks_run(self.file_paths, enable={check_ut}, no_exit=True))
321 | assertDictEqual(self, real_errors, {check_ut: self.expected_errors[check_ut]})
322 |
323 | def test_checks_as_string(self):
324 | all_check_errors = self.checks_run(self.file_paths, no_exit=True, no_verbose=False)
325 | for check_error in all_check_errors:
326 | self.assertTrue(str(check_error).count(check_error.code) >= 1)
327 |
--------------------------------------------------------------------------------