49 | (?:
50 | (?:\\\\)
51 | |(?:\\\$)
52 | |(?:(?!\$\{{).)
53 | )*
54 | ))(?P=string)
55 | (?:{JINJA_EXPRESSION})?
56 | """,
57 | re.X | re.DOTALL,
58 | )
59 |
60 | templates_to_check = {}
61 |
62 |
63 | def _remove_safe(expression):
64 | return re.sub(REMOVE_SAFE_REGEX, " ", expression).strip()
65 |
66 |
67 | def _transform_to_t_out(expression):
68 | return str(Markup('').format(_remove_safe(expression)))
69 |
70 |
71 | def convert_jinja_to_inline(string):
72 | result = []
73 | for element in re.finditer(JINJA_REGEX, string):
74 | static_string = element.group("string")
75 | expression = element.group("insidebracket")
76 | if not static_string and not expression:
77 | continue
78 |
79 | if static_string:
80 | result.append(static_string)
81 | if expression:
82 | result.append("{{ %s }}" % (_remove_safe(expression),))
83 | return "".join(result)
84 |
85 |
86 | def _convert_jinja_to_t_out_text(node):
87 | comment = isinstance(node, lxml.etree._Comment)
88 | last_node = None
89 |
90 | index = 0
91 | for element in re.finditer(JINJA_REGEX, node.text):
92 | static_string = element.group("string")
93 | expression = element.group("insidebracket")
94 | if not static_string and not expression:
95 | continue
96 |
97 | if last_node is None:
98 | node.text = static_string
99 | last_node = node
100 | elif comment and static_string:
101 | node.text += static_string
102 | elif static_string:
103 | last_node.tail = static_string
104 |
105 | if expression:
106 | if comment:
107 | node.text += _transform_to_t_out(expression)
108 | else:
109 | new_node = lxml.html.fragment_fromstring(_transform_to_t_out(expression))
110 | node.insert(index, new_node)
111 | index += 1
112 | last_node = new_node
113 |
114 |
115 | def _convert_jinja_to_t_out_tail(node):
116 | last_node = node
117 |
118 | for element in re.finditer(JINJA_REGEX, node.tail):
119 | static_string = element.group("string")
120 | expression = element.group("insidebracket")
121 | if not static_string and not expression:
122 | continue
123 |
124 | last_node.tail = ""
125 | if expression:
126 | node = lxml.html.fragment_fromstring(_transform_to_t_out(expression))
127 | last_node.addnext(node)
128 | if static_string:
129 | last_node.tail = static_string
130 | if expression:
131 | last_node = node
132 |
133 |
134 | def convert_jinja_to_qweb(string):
135 | string = re.sub(r"""^<\?xml version=("|')1\.0\1\?>\s*""", "", string, flags=re.M)
136 | # Create a parent in case there is multiples root nodes
137 | element = lxml.html.fragment_fromstring(string, create_parent="div")
138 | for el in element.getiterator():
139 | if el.text:
140 | _convert_jinja_to_t_out_text(el)
141 | if el.tail:
142 | _convert_jinja_to_t_out_tail(el)
143 | for key, value in el.attrib.items():
144 | if re.search(JINJA_EXPRESSION_REGEX, value):
145 | del el.attrib[key]
146 | el.set("t-attf-" + key, convert_jinja_to_inline(value))
147 | result = lxml.html.tostring(element, encoding="unicode")
148 | # Remove the technically created parent div, otherwise the first jinja
149 | # instruction will not match a jinja regex.
150 | result = result[5:-6]
151 |
152 | for func in [
153 | _replace_set,
154 | _replace_for,
155 | _replace_endfor,
156 | _replace_if,
157 | _replace_elif,
158 | _replace_else,
159 | _replace_endif,
160 | ]:
161 | result = func(result)
162 |
163 | # Make sure the html is correct
164 | result = lxml.etree.tostring(lxml.html.fragment_fromstring(result, create_parent="div"), encoding="unicode")
165 |
166 | # Remove the parent div
167 | return result[5:-6]
168 |
169 |
170 | def _get_set(matchobj):
171 | return Markup("""{}""").format(
172 | matchobj.group(1),
173 | html.unescape(matchobj.group(2).strip()),
174 | html.unescape(matchobj.group(3).strip()),
175 | )
176 |
177 |
178 | def _replace_set(string):
179 | reg = re.compile(r"^(\s*)%\s*set([^=]*)=(.*)", re.IGNORECASE | re.MULTILINE)
180 | return reg.sub(_get_set, string)
181 |
182 |
183 | def _get_for(matchobj):
184 | return Markup("""{}""").format(
185 | matchobj.group(1), html.unescape(matchobj.group(3).strip()), html.unescape(matchobj.group(2).strip())
186 | )
187 |
188 |
189 | def _replace_for(string):
190 | reg = re.compile(r"^(\s*)%\s*for((?:(?! in ).)*?) in (.*?):?\s*$", re.IGNORECASE | re.MULTILINE)
191 | return reg.sub(_get_for, string)
192 |
193 |
194 | def _replace_endfor(string):
195 | reg = re.compile(r"^(\s*)%\s*endfor.*", re.IGNORECASE | re.MULTILINE)
196 | return reg.sub(r"\1", string)
197 |
198 |
199 | def _get_if(matchobj):
200 | return Markup("""{}""").format(matchobj.group(1), html.unescape(matchobj.group(2).strip()))
201 |
202 |
203 | def _replace_if(string):
204 | reg = re.compile(r"^(\s*)%\s*if(.*?):?\s*$", re.IGNORECASE | re.MULTILINE)
205 | return reg.sub(_get_if, string)
206 |
207 |
208 | def _get_elif(matchobj):
209 | return Markup("""{}\n{}""").format(
210 | matchobj.group(1), matchobj.group(1), html.unescape(matchobj.group(2).strip())
211 | )
212 |
213 |
214 | def _replace_elif(string):
215 | reg = re.compile(r"^(\s*)%\s*elif(.*?):?\s*$", re.IGNORECASE | re.MULTILINE)
216 | return reg.sub(_get_elif, string)
217 |
218 |
219 | def _replace_else(string):
220 | reg = re.compile(r"^(\s*)%\s*else.*", re.IGNORECASE | re.MULTILINE)
221 | return reg.sub(r'\1\n\1', string)
222 |
223 |
224 | def _replace_endif(string):
225 | reg = re.compile(r"^(\s*)%\s*endif.*", re.IGNORECASE | re.MULTILINE)
226 | return reg.sub(r"\1", string)
227 |
228 |
229 | def upgrade_jinja_fields(
230 | cr,
231 | table_name,
232 | inline_template_fields,
233 | qweb_fields,
234 | name_field="name",
235 | model_name=None,
236 | table_model_name="model",
237 | fetch_model_name=False,
238 | ):
239 | _validate_table(table_name)
240 | all_field = inline_template_fields + qweb_fields
241 | if not model_name:
242 | all_field = [table_model_name] + all_field
243 | sql_fields = ", ".join(all_field)
244 |
245 | sql_where_inline_fields = [field + " like '%${%'" for field in inline_template_fields]
246 | sql_where_qweb_fields = [field + r"~ '(\$\{|%\s*(if|for))'" for field in qweb_fields]
247 | sql_where_fields = " OR ".join(sql_where_inline_fields + sql_where_qweb_fields)
248 |
249 | templates_to_check[table_name] = []
250 | model = model_of_table(cr, table_name)
251 |
252 | cr.commit() # ease the processing for PG
253 | cr.execute(
254 | f"""
255 | SELECT id, {name_field}, {sql_fields}
256 | FROM {table_name}
257 | WHERE {sql_where_fields}
258 | """
259 | )
260 | for data in cr.dictfetchall():
261 | _logger.info("process %s(%s) %s", table_name, data["id"], data[name_field])
262 |
263 | # convert the fields
264 | templates_converted = {}
265 |
266 | for field in inline_template_fields:
267 | _logger.info(" `- convert inline field %s", field)
268 | template = data[field]
269 | templates_converted[field] = convert_jinja_to_inline(template) if template else ""
270 |
271 | for field in qweb_fields:
272 | _logger.info(" `- convert qweb field %s", field)
273 | template = data[field]
274 | templates_converted[field] = convert_jinja_to_qweb(template) if template else ""
275 |
276 | fields = [f for f in (inline_template_fields + qweb_fields) if data[f] != templates_converted[f]]
277 | if fields:
278 | sql_fields = ",".join([field + "=%s" for field in fields])
279 | field_values = [templates_converted[field] for field in fields]
280 |
281 | cr.execute(
282 | f"""
283 | UPDATE {table_name}
284 | SET {sql_fields}
285 | WHERE id = %s
286 | """,
287 | field_values + [data["id"]],
288 | )
289 | # prepare data to check later
290 |
291 | # only for mailing.mailing
292 | if fetch_model_name:
293 | cr.execute(
294 | """
295 | SELECT model FROM ir_model WHERE id=%s
296 | """,
297 | [data[table_model_name]],
298 | )
299 | model_name = cr.fetchone()[0]
300 | else:
301 | model_name = model_name or data[table_model_name]
302 |
303 | templates_to_check[table_name].append(
304 | (
305 | data,
306 | name_field,
307 | model_name,
308 | inline_template_fields,
309 | qweb_fields,
310 | templates_converted,
311 | )
312 | )
313 |
314 | if not table_exists(cr, "ir_translation"):
315 | return
316 |
317 | _logger.info("process translations for model %s", model)
318 | # NOTE: Not all translations may not be updated.
319 | # Difference jinja values can give the same qweb value.
320 | # `${object.company_id.name|safe}` and `${object.company_id.name}` both give `{{ object.company_id.name }}`
321 | # Which will violates the unique constraint.
322 | # In this case, we just ignore the update and remove the duplicated row.
323 |
324 | inline_entries = [f"{model},{name}" for name in inline_template_fields]
325 | if inline_entries:
326 | cr.execute(
327 | """
328 | SELECT id, src, value
329 | FROM ir_translation
330 | WHERE name IN %s
331 | AND (src LIKE '%%${%%' OR value LIKE '%%${%%')
332 | """,
333 | [tuple(inline_entries)],
334 | )
335 | for tid, src, value in cr.fetchall():
336 | converted_src = convert_jinja_to_inline(src) if src else ""
337 | converted_value = convert_jinja_to_inline(value) if value else ""
338 | cr.execute(
339 | """
340 | DELETE FROM ir_translation orig
341 | USING ir_translation dup
342 | WHERE orig.id = %s
343 | AND dup.id != orig.id
344 | -- "ir_translation_unique" UNIQUE, btree (type, name, lang, res_id, md5(src))
345 | AND dup.type = orig.type
346 | AND dup.name = orig.name
347 | AND dup.lang = orig.lang
348 | AND dup.res_id = orig.res_id
349 | AND dup.src = %s
350 | RETURNING orig.id
351 | """,
352 | [tid, converted_src],
353 | )
354 | if not cr.rowcount:
355 | # no duplicate found, update the translation
356 | cr.execute(
357 | "UPDATE ir_translation SET src=%s, value=%s WHERE id=%s",
358 | [converted_src, converted_value, tid],
359 | )
360 |
361 | cr.commit() # commit changes for the named cursor below
362 | qweb_entries = [f"{model},{name}" for name in qweb_fields]
363 | if qweb_entries:
364 | ncr = named_cursor(cr, 1000)
365 | ncr.execute(
366 | r"""
367 | SELECT id, src, value
368 | FROM ir_translation
369 | WHERE name IN %s
370 | AND (src ~ '(\$\{|%%\s*(if|for))' OR value ~ '(\$\{|%%\s*(if|for))')
371 | """,
372 | [tuple(qweb_entries)],
373 | )
374 |
375 | for tid, src, value in ncr:
376 | converted_src = convert_jinja_to_qweb(src) if src else ""
377 | converted_value = convert_jinja_to_qweb(value) if value else ""
378 | cr.execute(
379 | """
380 | DELETE FROM ir_translation orig
381 | USING ir_translation dup
382 | WHERE orig.id = %s
383 | AND dup.id != orig.id
384 | -- "ir_translation_unique" UNIQUE, btree (type, name, lang, res_id, md5(src))
385 | AND dup.type = orig.type
386 | AND dup.name = orig.name
387 | AND dup.lang = orig.lang
388 | AND dup.res_id = orig.res_id
389 | AND dup.src = %s
390 | RETURNING orig.id
391 | """,
392 | [tid, converted_src],
393 | )
394 | if not cr.rowcount:
395 | # no duplicate found, update the translation
396 | cr.execute(
397 | "UPDATE ir_translation SET src=%s, value=%s WHERE id=%s",
398 | [converted_src, converted_value, tid],
399 | )
400 | ncr.close()
401 |
402 |
403 | def verify_upgraded_jinja_fields(cr):
404 | env = get_env(cr)
405 | for table_name, template_data in templates_to_check.items():
406 | field_errors = {}
407 | missing_records = []
408 | for (
409 | data,
410 | name_field,
411 | model_name,
412 | inline_template_fields,
413 | qweb_fields,
414 | templates_converted,
415 | ) in template_data:
416 | if model_name not in env:
417 | # custom model not loaded yet. Ignore
418 | continue
419 | model = env[model_name]
420 | record = model.with_context({"active_test": False}).search([], limit=1, order="id")
421 |
422 | key = (data["id"], data[name_field])
423 | field_errors[key] = []
424 |
425 | if not record:
426 | missing_records.append(key)
427 |
428 | for field in inline_template_fields:
429 | if not data[field]:
430 | continue
431 | is_valid = is_converted_template_valid(
432 | env, data[field], templates_converted[field], model_name, record.id, engine="inline_template"
433 | )
434 | if not is_valid:
435 | field_errors[key].append(field)
436 |
437 | for field in qweb_fields:
438 | is_valid = is_converted_template_valid(
439 | env, data[field], templates_converted[field], model_name, record.id, engine="qweb"
440 | )
441 | if not is_valid:
442 | field_errors[key].append(field)
443 |
444 | if missing_records:
445 | list_items = "\n".join(
446 | f'id: "{id}", {html_escape(name_field)}: "{html_escape(name)}" '
447 | for id, name in missing_records
448 | )
449 | add_to_migration_reports(
450 | f"""
451 |
452 |
453 | Some of the records for the table {html_escape(table_name)} could not be tested because there is no
454 | record in the database.
455 | The {html_escape(table_name)} records are:
456 |
457 |
458 |
459 | """,
460 | "Jinja upgrade",
461 | format="html",
462 | )
463 | field_errors = dict(filter(lambda x: bool(x[1]), field_errors.items()))
464 |
465 | if field_errors:
466 | string = []
467 | for (id, name), fields in field_errors.items():
468 | fields_string = "\n".join(f"{html_escape(field)}" for field in fields)
469 | string.append(
470 | f"""id: {id}, {html_escape(name_field)}: {html_escape(name)},
471 | fields: """
472 | )
473 |
474 | string = "\n".join(string)
475 | add_to_migration_reports(
476 | f"""
477 |
478 |
479 | Some of the fields of the table {html_escape(table_name)} does not render the same value
480 | before and after being converted.
481 | The mail.template are:
482 |
483 |
484 |
485 | """,
486 | "Jinja upgrade",
487 | format="html",
488 | )
489 |
490 |
491 | def is_converted_template_valid(env, template_before, template_after, model_name, record_id, engine="inline_template"):
492 | render_before = None
493 | with contextlib.suppress(Exception):
494 | render_before = _render_template_jinja(env, template_before, model_name, record_id)
495 |
496 | render_after = None
497 | if render_before is not None:
498 | try:
499 | with mute_logger("odoo.addons.mail.models.mail_render_mixin"):
500 | render_after = env["mail.render.mixin"]._render_template(
501 | template_after, model_name, [record_id], engine=engine
502 | )[record_id]
503 | except Exception:
504 | pass
505 |
506 | # post process qweb render to remove comments from the rendered jinja in
507 | # order to avoid false negative because qweb never render comments.
508 | if render_before and render_after and engine == "qweb":
509 | element_before = lxml.html.fragment_fromstring(render_before, create_parent="div")
510 | for comment_element in element_before.xpath("//comment()"):
511 | comment_element.getparent().remove(comment_element)
512 | render_before = lxml.html.tostring(element_before, encoding="unicode")
513 | render_after = lxml.html.tostring(
514 | lxml.html.fragment_fromstring(render_after, create_parent="div"), encoding="unicode"
515 | )
516 |
517 | return render_before is not None and render_before == render_after
518 |
519 |
520 | # jinja render
521 |
522 |
523 | def format_date(env, date, pattern=False, lang_code=False):
524 | try:
525 | return tools.format_date(env, date, date_format=pattern, lang_code=lang_code)
526 | except babel.core.UnknownLocaleError:
527 | return date
528 |
529 |
530 | def format_datetime(env, dt, tz=False, dt_format="medium", lang_code=False):
531 | try:
532 | return tools.format_datetime(env, dt, tz=tz, dt_format=dt_format, lang_code=lang_code)
533 | except babel.core.UnknownLocaleError:
534 | return dt
535 |
536 |
537 | def format_time(env, time, tz=False, time_format="medium", lang_code=False):
538 | try:
539 | return tools.format_time(env, time, tz=tz, time_format=time_format, lang_code=lang_code)
540 | except babel.core.UnknownLocaleError:
541 | return time
542 |
543 |
544 | def relativedelta_proxy(*args, **kwargs):
545 | # dateutil.relativedelta is an old-style class and cannot be directly
546 | # instantiated within a jinja2 expression, so a lambda "proxy" is
547 | # is needed, apparently
548 | return relativedelta.relativedelta(*args, **kwargs)
549 |
550 |
551 | template_env_globals = {
552 | "str": str,
553 | "quote": urls.url_quote,
554 | "urlencode": urls.url_encode,
555 | "datetime": safe_eval.datetime,
556 | "len": len,
557 | "abs": abs,
558 | "min": min,
559 | "max": max,
560 | "sum": sum,
561 | "filter": filter,
562 | "reduce": functools.reduce,
563 | "map": map,
564 | "relativedelta": relativedelta_proxy,
565 | "round": round,
566 | }
567 |
568 | jinja_template_env = SandboxedEnvironment(
569 | block_start_string="<%",
570 | block_end_string="%>",
571 | variable_start_string="${",
572 | variable_end_string="}",
573 | comment_start_string="<%doc>",
574 | comment_end_string="%doc>",
575 | line_statement_prefix="%",
576 | line_comment_prefix="##",
577 | trim_blocks=True, # do not output newline after blocks
578 | autoescape=True, # XML/HTML automatic escaping
579 | )
580 |
581 | jinja_template_env.globals.update(template_env_globals)
582 |
583 |
584 | def _render_template_jinja(env, template_txt, model, res_id):
585 | if not template_txt:
586 | return ""
587 |
588 | template = jinja_template_env.from_string(tools.ustr(template_txt))
589 |
590 | record = env[model].browse([res_id])
591 | variables = {
592 | "format_date": functools.partial(format_date, env),
593 | "format_datetime": functools.partial(format_datetime, env),
594 | "format_time": functools.partial(format_time, env),
595 | "format_amount": functools.partial(tools.format_amount, env),
596 | "format_duration": tools.format_duration,
597 | "user": env.user,
598 | "ctx": {},
599 | "is_html_empty": is_html_empty,
600 | "object": record,
601 | }
602 |
603 | safe_eval.check_values(variables)
604 | render_result = template.render(variables)
605 | if render_result == "False":
606 | render_result = ""
607 | return render_result
608 |
--------------------------------------------------------------------------------
/src/util/json.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | __all__ = ["dumps", "load", "loads"]
4 |
5 | try:
6 | import orjson
7 | except ImportError:
8 | import json
9 |
10 | def dumps(value, sort_keys=False):
11 | return json.dumps(value, sort_keys=sort_keys, separators=(",", ":"))
12 |
13 | def loads(value):
14 | return json.loads(value)
15 |
16 | def load(fp):
17 | return json.load(fp)
18 | else:
19 |
20 | def dumps(value, sort_keys=False):
21 | if isinstance(value, tuple):
22 | # downcast namedtuples
23 | value = tuple(value)
24 |
25 | option = orjson.OPT_NON_STR_KEYS
26 | if sort_keys:
27 | option |= orjson.OPT_SORT_KEYS
28 | return orjson.dumps(value, option=option).decode()
29 |
30 | def loads(value):
31 | return orjson.loads(value)
32 |
33 | def load(fp):
34 | return orjson.loads(fp.read())
35 |
--------------------------------------------------------------------------------
/src/util/logger.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import atexit
3 | import logging
4 | import os
5 |
6 | from .misc import on_CI
7 |
8 | _REGISTERED = False
9 |
10 | _logger = logging.getLogger(__name__.rpartition(".")[0])
11 |
12 |
13 | class CriticalHandler(logging.Handler):
14 | def __init__(self):
15 | super(CriticalHandler, self).__init__(logging.CRITICAL)
16 |
17 | def emit(self, record):
18 | global _REGISTERED # noqa: PLW0603
19 | if _REGISTERED:
20 | return
21 |
22 | # force exit with status_code=1 if any critical log is emit during upgrade
23 | atexit.register(os._exit, 1)
24 | _REGISTERED = True
25 |
26 |
27 | if on_CI(): # hopefully temporary restriction
28 | product = _logger.name.partition(".")[0]
29 | logging.getLogger(product).addHandler(CriticalHandler())
30 |
--------------------------------------------------------------------------------
/src/util/release-note.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello,
4 | Your database has successfully been upgraded to the latest version 🥳.
5 |
6 |
7 |
8 |
9 | Meet Odoo . now !
10 |
11 |
![]()
12 |
13 |
14 |
15 |
16 | Want to know more? Check out the full release note.
17 | Want to know more? Check out the full release note.
18 |
19 |
20 |
--------------------------------------------------------------------------------
/src/util/report-migration.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | Congratulations, you have just upgraded to Odoo
4 | Here are changes that may impact day to day flows in this new version.
5 | Want to know more? Check out the full functional release note.
6 | Want to know more? Check out the full functional release note.
7 |
8 |
9 |
10 |
11 |
12 |
13 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 | -
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 | During the upgrade some fields have been removed. The records below have been automatically corrected.
44 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
--------------------------------------------------------------------------------
/src/util/report.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import logging
3 | import os
4 | import re
5 | import sys
6 | from textwrap import dedent
7 |
8 | import lxml
9 | from docutils.core import publish_string
10 |
11 | from .helpers import _validate_model
12 | from .misc import parse_version
13 |
14 | # python3 shims
15 | try:
16 | basestring # noqa: B018
17 | except NameError:
18 | basestring = unicode = str
19 |
20 | try:
21 | from markupsafe import Markup, escape
22 |
23 | from odoo.tools.misc import html_escape
24 |
25 | if html_escape is not escape:
26 | Markup = None
27 | except ImportError:
28 | Markup = None
29 |
30 | try:
31 | try:
32 | from odoo.api import SUPERUSER_ID
33 | except ImportError:
34 | from odoo import SUPERUSER_ID
35 | from odoo import release
36 | from odoo.tools.mail import html_sanitize
37 | except ImportError:
38 | from openerp import SUPERUSER_ID, release
39 | from openerp.tools.mail import html_sanitize
40 |
41 |
42 | if sys.version_info > (3,):
43 | from odoo.tools import html_escape
44 | else:
45 | # In python2, `html_escape` always returns a byte-string with non-ascii characters replaced
46 | # by their html entities.
47 |
48 | import werkzeug.utils
49 |
50 | # Avoid DeprecationWarning while still remaining compatible with werkzeug pre-0.9
51 | if parse_version(getattr(werkzeug, "__version__", "0.0")) < parse_version("0.9.0"):
52 |
53 | def html_escape(text):
54 | return werkzeug.utils.escape(text, quote=True).encode("ascii", "xmlcharrefreplace")
55 |
56 | else:
57 |
58 | def html_escape(text):
59 | return werkzeug.utils.escape(text).encode("ascii", "xmlcharrefreplace")
60 |
61 |
62 | try:
63 | from odoo.addons.base.models.ir_module import MyWriter # > 11.0
64 | except ImportError:
65 | try:
66 | from odoo.addons.base.module.module import MyWriter
67 | except ImportError:
68 | from openerp.addons.base.module.module import MyWriter
69 |
70 | from .exceptions import MigrationError
71 | from .misc import has_enterprise, split_osenv, version_between, version_gte
72 | from .orm import env, get_admin_channel, guess_admin_id
73 |
74 | migration_reports = {}
75 | _logger = logging.getLogger(__name__)
76 |
77 |
78 | _ENV_AM = set(split_osenv("UPG_ANNOUNCE_MEDIA", default="discuss"))
79 | ANNOUNCE_MEDIA = _ENV_AM & {"", "discuss", "logger"}
80 | if _ENV_AM - ANNOUNCE_MEDIA:
81 | raise ValueError(
82 | "Invalid value for the environment variable `UPG_ANNOUNCE_MEDIA`: {!r}. "
83 | "Authorized values are a combination of 'discuss', 'logger', or an empty string.".format(
84 | os.getenv("UPG_ANNOUNCE_MEDIA")
85 | )
86 | )
87 | ANNOUNCE_MEDIA -= {""}
88 |
89 |
90 | ODOO_SHOWCASE_VIDEOS = {
91 | "saas~18.3": "oyev2DxC5yY",
92 | "saas~18.2": "bwn_HWuLuTA",
93 | "saas~18.1": "is9oLyIkQGk",
94 | "18.0": "gbE3azm_Io0",
95 | "saas~17.4": "8F4-uDwom8A",
96 | "saas~17.2": "ivjgo_2-wkE",
97 | "17.0": "qxb74CMR748",
98 | "16.0": "RVFZL3D9plg",
99 | }
100 |
101 |
102 | def add_to_migration_reports(message, category="Other", format="text"):
103 | assert format in {"text", "html", "md", "rst"}
104 | if format == "md":
105 | message = md2html(dedent(message))
106 | elif format == "rst":
107 | message = rst2html(message)
108 | raw = False
109 | if format != "text":
110 | if Markup:
111 | message = Markup(message)
112 | else:
113 | raw = True
114 | migration_reports.setdefault(category, []).append((message, raw))
115 | migration_reports_length = sum(len(msg) for reps in migration_reports.values() for msg, _ in reps) + sum(
116 | map(len, migration_reports)
117 | )
118 | if migration_reports_length > 1000000:
119 | _logger.warning("Upgrade report is growing suspiciously long: %s characters so far.", migration_reports_length)
120 |
121 |
122 | def announce_release_note(cr):
123 | filepath = os.path.join(os.path.dirname(__file__), "release-note.xml")
124 | with open(filepath, "rb") as fp:
125 | contents = fp.read()
126 | report = lxml.etree.fromstring(contents)
127 | e = env(cr)
128 | major_version, minor_version = re.findall(r"\d+", release.major_version)
129 | values = {
130 | "version": release.major_version,
131 | "major_version": major_version,
132 | "minor_version": minor_version,
133 | "odoo_showcase_video_id": ODOO_SHOWCASE_VIDEOS.get(release.major_version, ""),
134 | }
135 | _logger.info("Rendering release note for version %s", release.version)
136 | render = e["ir.qweb"].render if hasattr(e["ir.qweb"], "render") else e["ir.qweb"]._render
137 | message = render(report, values=values)
138 | _announce_to_db(cr, message, to_admin_only=False)
139 |
140 |
141 | def announce_migration_report(cr):
142 | filepath = os.path.join(os.path.dirname(__file__), "report-migration.xml")
143 | with open(filepath, "rb") as fp:
144 | contents = fp.read()
145 | if Markup:
146 | contents = contents.replace(b"t-raw", b"t-out")
147 | report = lxml.etree.fromstring(contents)
148 | e = env(cr)
149 | major_version, minor_version = re.findall(r"\d+", release.major_version)
150 | values = {
151 | "action_view_id": e.ref("base.action_ui_view").id,
152 | "version": release.major_version,
153 | "major_version": major_version,
154 | "minor_version": minor_version,
155 | "messages": migration_reports,
156 | "get_anchor_link_to_record": get_anchor_link_to_record,
157 | }
158 | _logger.info(migration_reports)
159 | render = e["ir.qweb"].render if hasattr(e["ir.qweb"], "render") else e["ir.qweb"]._render
160 | message = render(report, values=values)
161 | _announce_to_db(cr, message)
162 | # To avoid posting multiple time the same messages in case this method is called multiple times.
163 | migration_reports.clear()
164 |
165 |
166 | def _announce_to_db(cr, message, to_admin_only=True):
167 | """Send a rendered message to the database via mail channel."""
168 | if not isinstance(message, basestring):
169 | message = message.decode("utf-8")
170 | if message.strip():
171 | message = message.replace("{", "{{").replace("}", "}}")
172 | kw = {}
173 | # If possible, post the migration report message to administrators only.
174 | recipient = get_admin_channel(cr) if to_admin_only else None
175 | if recipient:
176 | kw["recipient"] = recipient
177 | announce(cr, release.major_version, message, format="html", header=None, footer=None, **kw)
178 |
179 |
180 | def rst2html(rst):
181 | overrides = {
182 | "embed_stylesheet": False,
183 | "doctitle_xform": False,
184 | "output_encoding": "unicode",
185 | "xml_declaration": False,
186 | }
187 | html = publish_string(source=dedent(rst), settings_overrides=overrides, writer=MyWriter())
188 | return html_sanitize(html, silent=False)
189 |
190 |
191 | def md2html(md):
192 | import markdown
193 |
194 | mdversion = markdown.__version_info__ if hasattr(markdown, "__version_info__") else markdown.version_info
195 | extensions = [
196 | "markdown.extensions.nl2br",
197 | "markdown.extensions.sane_lists",
198 | ]
199 | if mdversion[0] < 3:
200 | extensions.append("markdown.extensions.smart_strong")
201 |
202 | return markdown.markdown(md, extensions=extensions)
203 |
204 |
205 | _DEFAULT_HEADER = """
206 | Odoo has been upgraded to version {version}.
207 | What's new in this upgrade?
208 | """
209 |
210 | _DEFAULT_FOOTER = "Enjoy the new Odoo Online!
"
211 |
212 | _DEFAULT_RECIPIENT = "mail.%s_all_employees" % ["group", "channel"][version_gte("9.0")]
213 |
214 |
215 | def announce(
216 | cr,
217 | version,
218 | msg,
219 | format="rst",
220 | recipient=_DEFAULT_RECIPIENT,
221 | header=_DEFAULT_HEADER,
222 | footer=_DEFAULT_FOOTER,
223 | pluses_for_enterprise=None,
224 | ):
225 | if not ANNOUNCE_MEDIA:
226 | return
227 | if pluses_for_enterprise is None:
228 | # default value depend on format and version
229 | major = version[0]
230 | pluses_for_enterprise = (major == "s" or int(major) >= 9) and format == "md"
231 |
232 | if pluses_for_enterprise:
233 | plus_re = r"^(\s*)\+ (.+)\n"
234 | replacement = r"\1- \2\n" if has_enterprise() else ""
235 | msg = re.sub(plus_re, replacement, msg, flags=re.M)
236 |
237 | if format == "rst":
238 | msg = rst2html(msg)
239 | elif format == "md":
240 | msg = md2html(msg)
241 |
242 | message = ((header or "") + msg + (footer or "")).format(version=version)
243 | if "logger" in ANNOUNCE_MEDIA:
244 | _logger.info(message)
245 |
246 | if "discuss" not in ANNOUNCE_MEDIA:
247 | return
248 |
249 | # do not notify early, in case the migration fails halfway through
250 | ctx = {"mail_notify_force_send": False, "mail_notify_author": True}
251 |
252 | uid = guess_admin_id(cr)
253 | try:
254 | registry = env(cr)
255 | user = registry["res.users"].browse([uid])[0].with_context(ctx)
256 |
257 | def ref(xid):
258 | return registry.ref(xid).with_context(ctx)
259 |
260 | except MigrationError:
261 | try:
262 | from openerp.modules.registry import RegistryManager
263 | except ImportError:
264 | from openerp.modules.registry import Registry as RegistryManager
265 | registry = RegistryManager.get(cr.dbname)
266 | user = registry["res.users"].browse(cr, SUPERUSER_ID, uid, context=ctx)
267 |
268 | def ref(xid):
269 | rmod, _, rxid = recipient.partition(".")
270 | return registry["ir.model.data"].get_object(cr, SUPERUSER_ID, rmod, rxid, context=ctx)
271 |
272 | # default recipient
273 | poster = user.message_post if hasattr(user, "message_post") else user.partner_id.message_post
274 |
275 | if recipient:
276 | try:
277 | if isinstance(recipient, str): # noqa: SIM108
278 | recipient = ref(recipient)
279 | else:
280 | recipient = recipient.with_context(**ctx)
281 | poster = recipient.message_post
282 | except (ValueError, AttributeError):
283 | # Cannot find record, post the message on the wall of the admin
284 | pass
285 |
286 | type_field = ["type", "message_type"][version_gte("9.0")]
287 | # From 12.0, system notificatications are sent by email,
288 | # and do not increment the upper right notification counter.
289 | # While comments, in a mail.channel, do.
290 | # We want the notification counter to appear for announcements, so we force the comment type from 12.0.
291 | type_value = ["notification", "comment"][version_gte("12.0")]
292 | subtype_key = ["subtype", "subtype_xmlid"][version_gte("saas~13.1")]
293 |
294 | kw = {type_field: type_value, subtype_key: "mail.mt_comment"}
295 |
296 | try:
297 | poster(body=message, partner_ids=[user.partner_id.id], **kw)
298 | except Exception:
299 | _logger.warning("Cannot announce message", exc_info=True)
300 | else:
301 | # Chat window with the report will be open post-upgrade for the admin user
302 | if version_between("9.0", "saas~18.1") and user.partner_id and recipient:
303 | channel_member_model = (
304 | "discuss.channel.member"
305 | if version_gte("saas~16.3")
306 | else "mail.channel.member"
307 | if version_gte("16.0")
308 | else "mail.channel.partner"
309 | )
310 | domain = [("partner_id", "=", user.partner_id.id), ("channel_id", "=", recipient.id)]
311 | try:
312 | registry[channel_member_model].search(domain)[:1].with_context(ctx).fold_state = "open"
313 | except Exception:
314 | _logger.warning("Cannot unfold chat window", exc_info=True)
315 |
316 |
317 | def get_anchor_link_to_record(model, id, name, action_id=None):
318 | _validate_model(model)
319 | if not name:
320 | name = "{}(id={})".format(model, id)
321 | if version_gte("saas~17.2"):
322 | part1 = "action-{}".format(action_id) if action_id else model
323 | url = "/odoo/{}/{}?debug=1".format(part1, id)
324 | else:
325 | url = "/web?debug=1#view_type=form&model={}&action={}&id={}".format(model, action_id or "", id)
326 |
327 | anchor_tag = '{}'.format(url, html_escape(name))
328 | if Markup:
329 | anchor_tag = Markup(anchor_tag)
330 | return anchor_tag
331 |
--------------------------------------------------------------------------------
/src/util/snippets.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import inspect
3 | import logging
4 | import re
5 | import sys
6 | import uuid
7 | from concurrent.futures import ProcessPoolExecutor
8 |
9 | from lxml import etree, html
10 | from psycopg2 import sql
11 | from psycopg2.extensions import quote_ident
12 | from psycopg2.extras import Json
13 |
14 | from .const import NEARLYWARN
15 | from .exceptions import MigrationError
16 | from .helpers import table_of_model
17 | from .misc import import_script, log_progress
18 | from .pg import column_exists, column_type, get_max_workers, table_exists
19 |
20 | _logger = logging.getLogger(__name__)
21 | utf8_parser = html.HTMLParser(encoding="utf-8")
22 |
23 |
24 | class Snippet:
25 | def __init__(self, name, tag="*", klass="", selector=""):
26 | self.name = name
27 | self.tag = tag
28 | self.klass = klass or name
29 | self.selector = selector or f'//{tag}[hasclass("{self.klass}")]'
30 |
31 |
32 | def add_snippet_names(cr, table, column, snippets, select_query):
33 | """
34 | Execute the select_query then for each snippet contained in arch add the right data-snippet attribute on the right element.
35 |
36 | :param str table: The table we are working on
37 | :param str column: The column we are working on
38 | :param list snippets: list of all snippets to migrate
39 | :param str select_query: a query that when executed will return (id, list of snippets contained in the arch, arch)
40 | """
41 | _logger.info("Add snippet names on %s.%s", table, column)
42 | cr.execute(select_query)
43 |
44 | it = log_progress(cr.fetchall(), _logger, qualifier="rows", size=cr.rowcount, log_hundred_percent=True)
45 |
46 | def quote(ident):
47 | return quote_ident(ident, cr._cnx)
48 |
49 | for res_id, regex_matches, arch in it:
50 | regex_matches = [match[0] for match in regex_matches] # noqa: PLW2901
51 | arch = arch.replace("\r", "") # otherwise html parser below will transform \r ->
# noqa: PLW2901
52 | body = html.fromstring(arch, parser=utf8_parser)
53 | changed = False
54 | for snippet in snippets:
55 | if snippet.klass in regex_matches:
56 | body_snippets = body.xpath(snippet.selector)
57 | for body_snippet in body_snippets:
58 | body_snippet.attrib["data-snippet"] = snippet.name
59 | changed = True
60 | if changed:
61 | body = etree.tostring(body, encoding="unicode")
62 | cr.execute(f"UPDATE {quote(table)} SET {quote(column)} = %s WHERE id = %s", [body, res_id])
63 |
64 |
65 | def add_snippet_names_on_html_field(cr, table, column, snippets, regex):
66 | """Search for all the snippets in the fields mentioned (should be html fields) and add the corresponding data-snippet on them."""
67 | query = cr.mogrify(
68 | sql.SQL(
69 | """
70 | SELECT id, array((SELECT regexp_matches({column}, %(regex)s, 'g'))), {column}
71 | FROM {table}
72 | WHERE {column} ~ %(regex)s
73 | """
74 | ).format(column=sql.Identifier(column), table=sql.Identifier(table)),
75 | {"regex": regex},
76 | ).decode()
77 | where = cr.mogrify(sql.SQL("{column} ~ %s").format(column=sql.Identifier(column)), [regex]).decode()
78 | ids_ranges = determine_chunk_limit_ids(cr, table, [column], where)
79 | for id0, id1 in ids_ranges:
80 | add_snippet_names(cr, table, column, snippets, query + f" AND id BETWEEN {id0} AND {id1}")
81 |
82 |
83 | def get_regex_from_snippets_list(snippets):
84 | return "(%s)" % "|".join(snippet.klass for snippet in snippets)
85 |
86 |
87 | def get_html_fields(cr):
88 | # yield (table, column) of stored html fields (that needs snippets updates)
89 | for table, columns in html_fields(cr):
90 | for column in columns:
91 | yield table, quote_ident(column, cr._cnx)
92 |
93 |
94 | def html_fields(cr):
95 | cr.execute(
96 | """
97 | SELECT f.model, array_agg(f.name)
98 | FROM ir_model_fields f
99 | JOIN ir_model m ON m.id = f.model_id
100 | WHERE f.ttype = 'html'
101 | AND f.store = true
102 | AND m.transient = false
103 | AND f.model NOT LIKE 'ir.actions%'
104 | AND f.model != 'mail.message'
105 | GROUP BY f.model
106 | """
107 | )
108 | for model, columns in cr.fetchall():
109 | table = table_of_model(cr, model)
110 | if not table_exists(cr, table):
111 | # an SQL VIEW
112 | continue
113 | existing_columns = [column for column in columns if column_exists(cr, table, column)]
114 | if existing_columns:
115 | yield table, existing_columns
116 |
117 |
118 | def parse_style(attr):
119 | """
120 | Convert an HTML style attribute's text into a dict mapping property names to property values.
121 |
122 | :param str attr: value of an HTML style attribute
123 | :return: dict of CSS property values per property name
124 | """
125 | # Captures two groups:
126 | # - identifier: sequence of word character or hyphen that is followed by a colon
127 | # - value: sequence of:
128 | # - any non semicolon character or
129 | # - sequence of any non single quote character or escaped single quote
130 | # surrounded by single quotes or
131 | # - sequence of any non double quote character or escaped double quote
132 | # surrounded by double quotes
133 | regex = r"""
134 | ([\w\-]+)\s*:\s*((?:[^;\"']|'(?:[^']|(?:\\'))*'|\"(?:[^\"]|(?:\\\"))*\")+)
135 | """.strip()
136 | return dict(re.findall(regex, attr))
137 |
138 |
139 | def format_style(styles):
140 | """
141 | Convert a dict of CSS property names to property values into an HTML style attribute string.
142 |
143 | :param dict styles: CSS property value per property name
144 | :return: str HTML style attribute
145 | """
146 | style = "; ".join(["%s: %s" % entry for entry in styles.items()])
147 | if len(style) > 0 and style[-1] != ";":
148 | style += ";"
149 | return style
150 |
151 |
152 | def html_converter(transform_callback, selector=None):
153 | """
154 | Create an upgrade converter for a single HTML text content or for HTML elements that match a selector.
155 |
156 | :param func transform_callback: transforms an HTML tree and returns True if
157 | a change happened
158 | :param str selector: targets the elements to loop on
159 | :return: object HTMLConverter with callback
160 | """
161 | return HTMLConverter(make_pickleable_callback(transform_callback), selector)
162 |
163 |
164 | def make_pickleable_callback(callback):
165 | """
166 | Make a callable importable.
167 |
168 | `ProcessPoolExecutor.map` arguments needs to be pickleable
169 | Functions can only be pickled if they are importable.
170 | However, the callback's file is not importable due to the dash in the filename.
171 | We should then put the executed function in its own importable file.
172 | """
173 | callback_filepath = inspect.getfile(callback)
174 | name = f"_upgrade_{uuid.uuid4().hex}"
175 | mod = sys.modules[name] = import_script(callback_filepath, name=name)
176 | try:
177 | return getattr(mod, callback.__name__)
178 | except AttributeError:
179 | error_msg = (
180 | f"The converter callback `{callback.__name__}` is a nested function in `{callback.__module__}`.\n"
181 | "Move it outside the `migrate()` function to make it top-level."
182 | )
183 | raise MigrationError(error_msg) from None
184 |
185 |
186 | class BaseConverter:
187 | def __init__(self, callback, selector=None):
188 | self.callback = callback
189 | self.selector = selector
190 |
191 | def for_html(self):
192 | return HTMLConverter(self.callback, self.selector)
193 |
194 | def for_qweb(self):
195 | return QWebConverter(self.callback, self.selector)
196 |
197 | def has_changed(self, els):
198 | if self.selector:
199 | converted = [self.callback(el) for el in els.xpath(self.selector)]
200 | return any(converted)
201 | return self.callback(els)
202 |
203 | def __call__(self, content):
204 | # Remove `` header
205 | if not content:
206 | return (False, content)
207 | content = re.sub(r"^<\?xml .+\?>\s*", "", content.strip())
208 | # Wrap in node before parsing to preserve external comments and multi-root nodes,
209 | # except for when this looks like a full html doc, because in this case the wrap tag breaks the logic in
210 | # https://github.com/lxml/lxml/blob/2ac88908ffd6df380615c0af35f2134325e4bf30/src/lxml/html/html5parser.py#L184
211 | els = self._loads(content if content.strip()[:5].lower() == "{content}")
212 | has_changed = self.has_changed(els)
213 | new_content = re.sub(r"(^|$|^$)", "", self._dumps(els).strip()) if has_changed else content
214 | return (has_changed, new_content)
215 |
216 | def _loads(self, string):
217 | raise NotImplementedError
218 |
219 | def _dumps(self, node):
220 | raise NotImplementedError
221 |
222 |
223 | class HTMLConverter(BaseConverter):
224 | def for_html(self):
225 | return self
226 |
227 | def _loads(self, string):
228 | return html.fromstring(string, parser=utf8_parser)
229 |
230 | def _dumps(self, node):
231 | return html.tostring(node, encoding="unicode")
232 |
233 |
234 | class QWebConverter(BaseConverter):
235 | def for_qweb(self):
236 | return self
237 |
238 | def _loads(self, string):
239 | return html.fromstring(string, parser=html.XHTMLParser(encoding="utf-8"))
240 |
241 | def _dumps(self, node):
242 | return etree.tostring(node, encoding="unicode")
243 |
244 |
245 | class Convertor:
246 | def __init__(self, converters, callback):
247 | self.converters = converters
248 | self.callback = callback
249 |
250 | def __call__(self, row):
251 | converters = self.converters
252 | columns = self.converters.keys()
253 | converter_callback = self.callback
254 | res_id, *contents = row
255 | changes = {}
256 | for column, content in zip(columns, contents):
257 | if content and converters[column]:
258 | # jsonb column; convert all keys
259 | new_content = {}
260 | has_changed, new_content["en_US"] = converter_callback(content.pop("en_US"))
261 | if has_changed:
262 | for lang, value in content.items():
263 | _, new_content[lang] = converter_callback(value)
264 | new_content = Json(new_content)
265 | else:
266 | has_changed, new_content = converter_callback(content)
267 | changes[column] = new_content
268 | if has_changed:
269 | changes["id"] = res_id
270 | return changes
271 |
272 |
273 | def convert_html_columns(cr, table, columns, converter_callback, where_column="IS NOT NULL", extra_where="true"):
274 | r"""
275 | Convert HTML content for the given table column.
276 |
277 | :param cursor cr: database cursor
278 | :param str table: table name
279 | :param str column: column name
280 | :param func converter_callback: conversion function that converts the HTML
281 | text content and returns a tuple with a boolean that indicates whether a
282 | change happened and the new content must be saved
283 | :param str where_column: filtering such as
284 | - "like '%abc%xyz%'"
285 | - "~* '\yabc.*xyz\y'"
286 | :param str extra_where: extra filtering on the where clause
287 | """
288 | assert "id" not in columns
289 |
290 | converters = {column: "->>'en_US'" if column_type(cr, table, column) == "jsonb" else "" for column in columns}
291 | select = ", ".join(f'"{column}"' for column in columns)
292 | where = " OR ".join(f'"{column}"{converters[column]} {where_column}' for column in columns)
293 |
294 | base_select_query = f"""
295 | SELECT id, {select}
296 | FROM {table}
297 | WHERE ({where})
298 | AND ({extra_where})
299 | """
300 | split_queries = [
301 | (base_select_query + "\n AND id BETWEEN {} AND {}".format(*x))
302 | for x in determine_chunk_limit_ids(cr, table, columns, "({}) AND ({})".format(where, extra_where))
303 | ]
304 |
305 | update_sql = ", ".join(f'"{column}" = %({column})s' for column in columns)
306 | update_query = f"UPDATE {table} SET {update_sql} WHERE id = %(id)s"
307 |
308 | with ProcessPoolExecutor(max_workers=get_max_workers()) as executor:
309 | convert = Convertor(converters, converter_callback)
310 | for query in log_progress(split_queries, logger=_logger, qualifier=f"{table} updates"):
311 | cr.execute(query)
312 | for data in executor.map(convert, cr.fetchall(), chunksize=1000):
313 | if "id" in data:
314 | cr.execute(update_query, data)
315 |
316 |
317 | def determine_chunk_limit_ids(cr, table, column_arr, where):
318 | bytes_per_chunk = 100 * 1024 * 1024
319 | columns = ", ".join(quote_ident(column, cr._cnx) for column in column_arr if column != "id")
320 | cr.execute(
321 | f"""
322 | WITH info AS (
323 | SELECT id,
324 | sum(pg_column_size(({columns}, id))) OVER (ORDER BY id) / {bytes_per_chunk} AS chunk
325 | FROM {table}
326 | WHERE {where}
327 | ) SELECT min(id), max(id) FROM info GROUP BY chunk
328 | """
329 | )
330 | return cr.fetchall()
331 |
332 |
333 | def convert_html_content(
334 | cr,
335 | converter_callback,
336 | where_column="IS NOT NULL",
337 | **kwargs,
338 | ):
339 | r"""
340 | Convert HTML content.
341 |
342 | :param cursor cr: database cursor
343 | :param func converter_callback: conversion function that converts the HTML
344 | text content and returns a tuple with a boolean that indicates whether a
345 | change happened and the new content must be saved
346 | :param str where_column: filtering such as
347 | - "like '%abc%xyz%'"
348 | - "~* '\yabc.*xyz\y'"
349 | :param dict kwargs: extra keyword arguments to pass to :func:`convert_html_column`
350 | """
351 | if hasattr(converter_callback, "for_html"): # noqa: SIM108
352 | html_converter = converter_callback.for_html()
353 | else:
354 | # trust the given converter to handle HTML
355 | html_converter = converter_callback
356 |
357 | for table, columns in html_fields(cr):
358 | convert_html_columns(cr, table, columns, html_converter, where_column=where_column, **kwargs)
359 |
360 | if hasattr(converter_callback, "for_qweb"):
361 | qweb_converter = converter_callback.for_qweb()
362 | else:
363 | _logger.log(NEARLYWARN, "Cannot adapt converter callback %r for qweb; using it directly", converter_callback)
364 | qweb_converter = converter_callback
365 |
366 | convert_html_columns(
367 | cr,
368 | "ir_ui_view",
369 | ["arch_db"],
370 | qweb_converter,
371 | where_column=where_column,
372 | **dict(kwargs, extra_where="type = 'qweb'"),
373 | )
374 |
--------------------------------------------------------------------------------
/src/util/specific.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import logging
3 |
4 | from .helpers import _validate_table
5 | from .misc import _cached
6 | from .models import rename_model
7 | from .modules import rename_module
8 | from .pg import column_exists, rename_table, table_exists
9 | from .report import add_to_migration_reports
10 |
11 | _logger = logging.getLogger(__name__)
12 |
13 |
14 | def dbuuid(cr):
15 | return _dbuuids(cr)[-1]
16 |
17 |
18 | @_cached
19 | def _dbuuids(cr):
20 | cr.execute(
21 | """
22 | SELECT REPLACE(value, 'upg-neuter-', '')
23 | FROM ir_config_parameter
24 | WHERE key IN ('database.uuid', 'origin.database.uuid')
25 | ORDER BY key
26 | """
27 | )
28 | return [uuid for (uuid,) in cr.fetchall()]
29 |
30 |
31 | def dispatch_by_dbuuid(cr, version, callbacks):
32 | """
33 | Allow to execute a migration script for a specific database only, based on its dbuuid.
34 |
35 | .. example::
36 | .. code-block:: python
37 | def db_yellowbird(cr, version):
38 | cr.execute("DELETE FROM ir_ui_view WHERE id=837")
39 |
40 | util.dispatch_by_dbuuid(cr, version, {
41 | "ef81c07aa90936a89f4e7878e2ebc634a24fcd66": db_yellowbird,
42 | })
43 |
44 | :param str version: Odoo version
45 | :param dict[str, function] callbacks: mapping dbuuids to the functions to run against matching dbs
46 |
47 | .. warning::
48 | - Only the first match of (icp["database.uuid"], icp["origin.database.uuid"]) in `callbacks` is executed.
49 |
50 | .. tip::
51 | - If looking to prevent a callback from running against a descendant db, one can use a noop `callback`:
52 | .. example::
53 | .. code-block:: python
54 | noop = lambda *args: None
55 | util.dispatch_by_dbuuid(cr, version, {
56 | "dbuuid": noop,
57 | "ancestor's dbuuid": db_yellowbird,
58 | })
59 | """
60 | for uuid in _dbuuids(cr):
61 | if uuid in callbacks:
62 | func = callbacks[uuid]
63 | _logger.info("calling dbuuid-specific function `%s`", func.__name__)
64 | func(cr, version)
65 | break
66 |
67 |
68 | def rename_custom_model(cr, model_name, new_model_name, custom_module=None, report_details=""):
69 | cr.execute("SELECT 1 FROM ir_model WHERE model = %s", [model_name])
70 | if not cr.rowcount:
71 | _logger.warning("Model %r not found: skip renaming", model_name)
72 | return
73 |
74 | rename_model(cr, model_name, new_model_name, rename_table=True)
75 | module_details = " from module '{}'".format(custom_module) if custom_module else ""
76 | add_to_migration_reports(
77 | category="Custom models",
78 | message="The custom model '{model_name}'{module_details} was renamed to '{new_model_name}'. {report_details}".format(
79 | **locals()
80 | ),
81 | )
82 |
83 |
84 | def rename_custom_module(cr, old_module_name, new_module_name, report_details="", author="%"):
85 | cr.execute("SELECT 1 FROM ir_module_module WHERE name = %s AND author ILIKE %s", [old_module_name, author])
86 | if not cr.rowcount:
87 | return
88 |
89 | rename_module(cr, old_module_name, new_module_name)
90 | _logger.warning("Custom module %r renamed to %r", old_module_name, new_module_name)
91 | add_to_migration_reports(
92 | category="Custom modules",
93 | message="The custom module '{old_module_name}' was renamed to '{new_module_name}'. {report_details}".format(
94 | **locals()
95 | ),
96 | )
97 |
98 |
99 | def rename_custom_table(
100 | cr,
101 | table_name,
102 | new_table_name,
103 | custom_module=None,
104 | report_details="",
105 | ):
106 | if not table_exists(cr, table_name):
107 | _logger.warning("Table %r not found: skip renaming", table_name)
108 | return
109 |
110 | rename_table(cr, table_name, new_table_name, remove_constraints=False)
111 |
112 | module_details = " from module '{}'".format(custom_module) if custom_module else ""
113 | add_to_migration_reports(
114 | category="Custom tables/columns",
115 | message="The custom table '{table_name}'{module_details} was renamed to '{new_table_name}'. {report_details}".format(
116 | **locals()
117 | ),
118 | )
119 |
120 |
121 | def rename_custom_column(cr, table_name, col_name, new_col_name, custom_module=None, report_details=""):
122 | _validate_table(table_name)
123 | if not column_exists(cr, table_name, col_name):
124 | _logger.warning("Column %r not found on table %r: skip renaming", col_name, table_name)
125 | return
126 | cr.execute('ALTER TABLE "{}" RENAME COLUMN "{}" TO "{}"'.format(table_name, col_name, new_col_name))
127 | module_details = " from module '{}'".format(custom_module) if custom_module else ""
128 | add_to_migration_reports(
129 | category="Custom tables/columns",
130 | message="The custom column '{col_name}' of the table '{table_name}'{module_details} was renamed to '{new_col_name}'."
131 | " {report_details}".format(**locals()),
132 | )
133 |
134 |
135 | def reset_cowed_views(cr, xmlid, key=None):
136 | if "." not in xmlid:
137 | raise ValueError("Please use fully qualified name .")
138 |
139 | module, _, name = xmlid.partition(".")
140 | if not key:
141 | key = xmlid
142 | cr.execute(
143 | """
144 | UPDATE ir_ui_view u
145 | SET arch_prev = u.arch_db,
146 | arch_db = v.arch_db
147 | FROM ir_ui_view v
148 | JOIN ir_model_data m
149 | ON m.res_id = v.id AND m.model = 'ir.ui.view'
150 | WHERE u.key = %s
151 | AND m.module = %s
152 | AND m.name = %s
153 | AND u.website_id IS NOT NULL
154 | RETURNING u.id
155 | """,
156 | [key, module, name],
157 | )
158 | return set(sum(cr.fetchall(), ()))
159 |
--------------------------------------------------------------------------------
/src/util/spreadsheet/__init__.py:
--------------------------------------------------------------------------------
1 | from .misc import *
2 | from .tokenizer import *
3 |
--------------------------------------------------------------------------------
/src/util/spreadsheet/misc.py:
--------------------------------------------------------------------------------
1 | from .. import json
2 |
3 |
4 | def iter_commands(cr, like_all=(), like_any=()):
5 | if not (bool(like_all) ^ bool(like_any)):
6 | raise ValueError("Please specify `like_all` or `like_any`, not both")
7 | cr.execute(
8 | """
9 | SELECT id,
10 | commands
11 | FROM spreadsheet_revision
12 | WHERE commands LIKE {}(%s::text[])
13 | """.format("ALL" if like_all else "ANY"),
14 | [list(like_all or like_any)],
15 | )
16 | for revision_id, data in cr.fetchall():
17 | data_loaded = json.loads(data)
18 | if "commands" not in data_loaded:
19 | continue
20 | data_old = json.dumps(data_loaded, sort_keys=True)
21 |
22 | changed = yield data_loaded["commands"]
23 | if changed is None:
24 | changed = data_old != json.dumps(data_loaded, sort_keys=True)
25 |
26 | if changed:
27 | cr.execute(
28 | "UPDATE spreadsheet_revision SET commands=%s WHERE id=%s", [json.dumps(data_loaded), revision_id]
29 | )
30 |
31 |
32 | def process_commands(cr, callback, *args, **kwargs):
33 | gen = iter_commands(cr, *args, **kwargs)
34 | try:
35 | cmd = next(gen)
36 | while True:
37 | changed = callback(cmd)
38 | cmd = gen.send(changed)
39 |
40 | except StopIteration:
41 | pass
42 |
--------------------------------------------------------------------------------
/src/util/spreadsheet/tokenizer.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | """
4 | This entire file is a direct translation of the original JavaScript code found in https://github.com/odoo/o-spreadsheet/blob/master/src/formulas/tokenizer.ts.
5 | """
6 |
7 |
8 | class CellErrorType:
9 | NotAvailable = "#N/A"
10 | InvalidReference = "#REF"
11 | BadExpression = "#BAD_EXPR"
12 | CircularDependency = "#CYCLE"
13 | UnknownFunction = "#NAME?"
14 | DivisionByZero = "#DIV/0!"
15 | GenericError = "#ERROR"
16 |
17 |
18 | DEFAULT_LOCALES = [
19 | {
20 | "name": "English (US)",
21 | "code": "en_US",
22 | "thousandsSeparator": ",",
23 | "decimalSeparator": ".",
24 | "dateFormat": "m/d/yyyy",
25 | "timeFormat": "hh:mm:ss a",
26 | "formulaArgSeparator": ",",
27 | }
28 | ]
29 | DEFAULT_LOCALE = DEFAULT_LOCALES[0]
30 |
31 | NEWLINE = "\n"
32 |
33 |
34 | def get_formula_number_regex(decimal_separator):
35 | decimal_separator = re.escape(decimal_separator)
36 | return re.compile(r"^-?\d+(%s?\d*(e\d+)?)?|^-?%s\d+(?!\w|!)" % (decimal_separator, decimal_separator))
37 |
38 |
39 | def escape_regexp(string):
40 | return re.escape(string)
41 |
42 |
43 | full_row_xc = r"(\$?[A-Z]{1,3})?\$?[0-9]{1,7}\s*:\s*(\$?[A-Z]{1,3})?\$?[0-9]{1,7}\s*"
44 | full_col_xc = r"\$?[A-Z]{1,3}(\$?[0-9]{1,7})?\s*:\s*\$?[A-Z]{1,3}(\$?[0-9]{1,7})?\s*"
45 |
46 | cell_reference = re.compile(r"\$?([A-Z]{1,3})\$?([0-9]{1,7})", re.IGNORECASE)
47 | range_reference = re.compile(
48 | r"^\s*('.+'!|[^']+!)?(%s|%s|%s)$" % (cell_reference.pattern, full_row_xc, full_col_xc), re.IGNORECASE
49 | )
50 |
51 | white_space_special_characters = [
52 | "\t",
53 | "\f",
54 | "\v",
55 | chr(int("00a0", 16)),
56 | chr(int("1680", 16)),
57 | chr(int("2000", 16)),
58 | chr(int("200a", 16)),
59 | chr(int("2028", 16)),
60 | chr(int("2029", 16)),
61 | chr(int("202f", 16)),
62 | chr(int("205f", 16)),
63 | chr(int("3000", 16)),
64 | chr(int("feff", 16)),
65 | ]
66 | white_space_regexp = re.compile("|".join(map(re.escape, white_space_special_characters)) + r"|(\r\n|\r|\n)")
67 |
68 |
69 | def replace_special_spaces(text):
70 | if not text:
71 | return ""
72 | if not white_space_regexp.search(text):
73 | return text
74 | return white_space_regexp.sub(lambda match: NEWLINE if match.group(1) else " ", text)
75 |
76 |
77 | POSTFIX_UNARY_OPERATORS = ["%"]
78 | OPERATORS = ["+", "-", "*", "/", ":", "=", "<>", ">=", ">", "<=", "<", "^", "&"] + POSTFIX_UNARY_OPERATORS
79 |
80 |
81 | def tokenize(string, locale=DEFAULT_LOCALE):
82 | string = replace_special_spaces(string)
83 | result = []
84 | if string:
85 | chars = TokenizingChars(string)
86 |
87 | while not chars.is_over():
88 | token = (
89 | tokenize_space(chars)
90 | or tokenize_args_separator(chars, locale)
91 | or tokenize_parenthesis(chars)
92 | or tokenize_operator(chars)
93 | or tokenize_string(chars)
94 | or tokenize_debugger(chars)
95 | or tokenize_invalid_range(chars)
96 | or tokenize_number(chars, locale)
97 | or tokenize_symbol(chars)
98 | )
99 |
100 | if not token:
101 | token = ("UNKNOWN", chars.shift())
102 |
103 | result.append(token)
104 |
105 | return result
106 |
107 |
108 | def tokenize_debugger(chars):
109 | if chars.current == "?":
110 | chars.shift()
111 | return "DEBUGGER", "?"
112 | return None
113 |
114 |
115 | parenthesis = {"(": ("LEFT_PAREN", "("), ")": ("RIGHT_PAREN", ")")}
116 |
117 |
118 | def tokenize_parenthesis(chars):
119 | value = chars.current
120 | if value in parenthesis:
121 | chars.shift()
122 | return parenthesis[value]
123 | return None
124 |
125 |
126 | def tokenize_args_separator(chars, locale):
127 | if chars.current == locale["formulaArgSeparator"]:
128 | value = chars.shift()
129 | return "ARG_SEPARATOR", value
130 | return None
131 |
132 |
133 | def tokenize_operator(chars):
134 | for op in OPERATORS:
135 | if chars.current_starts_with(op):
136 | chars.advance_by(len(op))
137 | return "OPERATOR", op
138 | return None
139 |
140 |
141 | FIRST_POSSIBLE_NUMBER_CHARS = set("0123456789")
142 |
143 |
144 | def tokenize_number(chars, locale):
145 | if chars.current not in FIRST_POSSIBLE_NUMBER_CHARS and chars.current != locale["decimalSeparator"]:
146 | return None
147 | match = re.match(get_formula_number_regex(locale["decimalSeparator"]), chars.remaining())
148 | if match:
149 | chars.advance_by(len(match.group(0)))
150 | return "NUMBER", match.group(0)
151 | return None
152 |
153 |
154 | def tokenize_string(chars):
155 | if chars.current == '"':
156 | start_char = chars.shift()
157 | letters = start_char
158 | while chars.current and (chars.current != start_char or letters[-1] == "\\"):
159 | letters += chars.shift()
160 | if chars.current == '"':
161 | letters += chars.shift()
162 | return "STRING", letters
163 | return None
164 |
165 |
166 | separator_regexp = re.compile(r"^[\w\.!\$]+")
167 |
168 |
169 | def tokenize_symbol(chars):
170 | result = ""
171 | if chars.current == "'":
172 | last_char = chars.shift()
173 | result += last_char
174 | while chars.current:
175 | last_char = chars.shift()
176 | result += last_char
177 | if last_char == "'":
178 | if chars.current and chars.current == "'":
179 | last_char = chars.shift()
180 | result += last_char
181 | else:
182 | break
183 | if last_char != "'":
184 | return "UNKNOWN", result
185 | match = separator_regexp.match(chars.remaining())
186 | if match:
187 | value = match.group(0)
188 | result += value
189 | chars.advance_by(len(value))
190 | if result:
191 | value = result
192 | is_reference = range_reference.match(value)
193 | if is_reference:
194 | return "REFERENCE", value
195 | return "SYMBOL", value
196 | return None
197 |
198 |
199 | def tokenize_space(chars):
200 | length = 0
201 | while chars.current == NEWLINE:
202 | length += 1
203 | chars.shift()
204 | if length:
205 | return "SPACE", NEWLINE * length
206 |
207 | while chars.current == " ":
208 | length += 1
209 | chars.shift()
210 |
211 | if length:
212 | return "SPACE", " " * length
213 | return None
214 |
215 |
216 | def tokenize_invalid_range(chars):
217 | if chars.current.startswith(CellErrorType.InvalidReference):
218 | chars.advance_by(len(CellErrorType.InvalidReference))
219 | return "INVALID_REFERENCE", CellErrorType.InvalidReference
220 | return None
221 |
222 |
223 | class TokenizingChars:
224 | def __init__(self, text):
225 | self.text = text
226 | self.current_index = 0
227 | self.current = text[0]
228 |
229 | def shift(self):
230 | current = self.current
231 | self.current_index += 1
232 | self.current = self.text[self.current_index] if self.current_index < len(self.text) else None
233 | return current
234 |
235 | def advance_by(self, length):
236 | self.current_index += length
237 | self.current = self.text[self.current_index] if self.current_index < len(self.text) else None
238 |
239 | def is_over(self):
240 | return self.current_index >= len(self.text)
241 |
242 | def remaining(self):
243 | return self.text[self.current_index :]
244 |
245 | def current_starts_with(self, string):
246 | if self.current != string[0]:
247 | return False
248 | return all(self.text[self.current_index + j] == string[j] for j in range(1, len(string)))
249 |
--------------------------------------------------------------------------------
/tools/compile23.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # ruff: noqa: T201
3 | import subprocess
4 | import sys
5 | from pathlib import PurePath
6 | from shutil import which
7 |
8 | py2_only_patterns = []
9 | py2_files = []
10 |
11 | py3_only_patterns = [
12 | "tools/*.py",
13 | # tests are only run from version 12. python2 compatibility is not needed.
14 | "src/testing.py",
15 | "src/util/jinja_to_qweb.py",
16 | "src/util/snippets.py",
17 | "src/util/convert_bootstrap.py",
18 | "src/*/tests/*.py",
19 | "src/*/17.0.*/*.py",
20 | ]
21 | py3_files = []
22 |
23 | rc = 0
24 |
25 | for filename in sys.argv[1:]:
26 | p = PurePath(filename)
27 | if p.suffix != ".py":
28 | continue
29 |
30 | if not filename.islower():
31 | print(f"filename {filename!r} is not lowercase")
32 | rc = 1
33 |
34 | if any(p.match(pattern) for pattern in py2_only_patterns):
35 | py2_files.append(filename)
36 | elif any(p.match(pattern) for pattern in py3_only_patterns):
37 | py3_files.append(filename)
38 | else:
39 | # not an explicit match to a python version. Test against both versions.
40 | py2_files.append(filename)
41 | py3_files.append(filename)
42 |
43 |
44 | if py2_files:
45 | if which("python2"):
46 | s = subprocess.run(["python2", "-m", "compileall", "-f", "-q", *py2_files], check=False)
47 | if s.returncode:
48 | rc = 1
49 | else:
50 | lines = [
51 | "WARNING: `python2` hasn't been found in $PATH",
52 | "You must ensure the following files are compatible with python2:",
53 | *[f" - {f}" for f in py2_files],
54 | ]
55 | width = max(map(len, lines))
56 | message = "\n".join(f"@ {line: <{width}s} @" for line in lines)
57 | extra = "@" * (width + 4)
58 | print(f"{extra}\n{message}\n{extra}", file=sys.stderr)
59 |
60 | if py3_files:
61 | s = subprocess.run(["python3", "-m", "compileall", "-f", "-q", *py3_files], check=False)
62 | if s.returncode:
63 | rc = 1
64 |
65 | sys.exit(rc)
66 |
--------------------------------------------------------------------------------
/tools/fetch-release-notes-video-id.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env -S uv run --script --quiet
2 |
3 | # /// script
4 | # requires-python = ">=3.12"
5 | # dependencies = [
6 | # "httpx",
7 | # "libcst",
8 | # "lxml",
9 | # ]
10 | # ///
11 |
12 | import re
13 | import sys
14 | from pathlib import Path
15 |
16 | import httpx
17 | import libcst as cst
18 | from lxml import etree
19 |
20 | if len(sys.argv) != 2:
21 | sys.exit(f"Usage: {sys.argv[0]} VERSION")
22 |
23 | version = sys.argv[1]
24 |
25 | VERSION_RE = re.compile(r"^(?:saas[~-])?([0-9]+)(?:\.([0-9]+))?$")
26 |
27 | if (match := VERSION_RE.match(version)) is None:
28 | sys.exit(f"Invalid version: {version!r}")
29 |
30 | major, minor = match.groups(default="0")
31 |
32 | version_url = major if minor == "0" else f"{major}-{minor}"
33 | full_version = f"{major}.0" if minor == "0" else f"saas~{major}.{minor}"
34 |
35 | html = httpx.get(f"https://www.odoo.com/odoo-{version_url}-release-notes")
36 | if html.status_code != 200:
37 | sys.exit(f"Cannot fetch release notes page for version {version}")
38 |
39 | root = etree.fromstring(html.text, parser=etree.HTMLParser())
40 | iframe = root.xpath("//main//iframe[contains(@src, 'youtube.com') or contains(@src, 'youtube-nocookie.com')]")
41 | if not iframe:
42 | sys.exit(f"Cannot find youtube video in {html.url}")
43 |
44 | yt_link = httpx.URL(iframe[0].attrib["src"])
45 | video_id = yt_link.path.removeprefix("/embed/")
46 |
47 |
48 | report_py = Path(__file__).parent.parent / "src" / "util" / "report.py"
49 |
50 | source_tree = cst.parse_module(report_py.read_bytes())
51 |
52 |
53 | class Transformer(cst.CSTTransformer):
54 | def __init__(self):
55 | self.video_dict = None
56 | self.key_found = False
57 | super().__init__()
58 |
59 | def visit_Assign(self, node):
60 | match node:
61 | case cst.Assign(
62 | targets=[cst.AssignTarget(target=cst.Name(value="ODOO_SHOWCASE_VIDEOS"))],
63 | value=video_dict,
64 | ):
65 | self.video_dict = video_dict
66 | return True
67 | return False
68 |
69 | def visit_Dict(self, node):
70 | return node is self.video_dict
71 |
72 | def leave_DictElement(self, original_node, updated_node):
73 | if original_node.key.raw_value == full_version:
74 | self.key_found = True
75 | if original_node.value.raw_value != video_id:
76 | updated_node = updated_node.with_changes(value=cst.SimpleString(f'"{video_id}"'))
77 | return updated_node
78 |
79 | def leave_Dict(self, original_node, updated_node):
80 | if original_node is self.video_dict:
81 | if self.key_found:
82 | elements = updated_node.elements
83 | else:
84 | new_elem = updated_node.elements[0].with_changes(
85 | key=cst.SimpleString(f'"{full_version}"'), value=cst.SimpleString(f'"{video_id}"')
86 | )
87 | elements = [new_elem, *updated_node.elements]
88 |
89 | elements = sorted(elements, reverse=True, key=lambda e: VERSION_RE.match(e.key.raw_value).groups("0"))
90 | updated_node = updated_node.with_changes(elements=elements)
91 | return updated_node
92 |
93 |
94 | modified_tree = source_tree.visit(Transformer())
95 |
96 | report_py.write_text(modified_tree.code)
97 |
--------------------------------------------------------------------------------
/tools/generate-inherit.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # ruff: noqa: ERA001
3 |
4 | import io
5 | import itertools
6 | import logging
7 | import subprocess
8 | import sys
9 | import tokenize
10 | from argparse import ArgumentParser, Namespace
11 | from ast import literal_eval
12 | from collections import defaultdict
13 | from dataclasses import dataclass, field
14 | from functools import total_ordering
15 | from pathlib import Path
16 | from typing import Dict, List, NamedTuple, Optional, Set, Tuple
17 |
18 | import black
19 | import tomli
20 |
21 | try:
22 | from black.nodes import Visitor
23 | except ImportError:
24 | # old black version
25 | from black import Visitor
26 |
27 |
28 | logging.basicConfig(
29 | level=logging.INFO, stream=sys.stderr, format="%(asctime)s %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
30 | )
31 | if sys.stderr.isatty():
32 | logging.addLevelName(logging.INFO, "\033[1;32m\033[1;49mINFO\033[0m")
33 | logging.addLevelName(logging.CRITICAL, "\033[1;37m\033[1;41mCRITICAL\033[0m")
34 |
35 | logger = logging.getLogger(__name__)
36 |
37 | if int(black.__version__.split(".")[0]) >= 22:
38 | logger.critical("Too recent version of `black`. Please install version 21.12b0 in order to parse python2 code.")
39 | sys.exit(1)
40 |
41 | MODELS = ["osv", "osv_memory", "Model", "TransientModel", "AbstractModel"]
42 | MODELS += [".".join(x).lstrip(".") for x in itertools.product(["openerp", "odoo", ""], ["osv", "models"], MODELS)]
43 |
44 |
45 | class Repo(NamedTuple):
46 | name: str
47 |
48 | @property
49 | def remote(self):
50 | return f"git@github.com:odoo/{self.name}.git"
51 |
52 |
53 | REPOSITORIES = [
54 | Repo("odoo"),
55 | Repo("enterprise"),
56 | Repo("design-themes"),
57 | ]
58 |
59 |
60 | @total_ordering
61 | @dataclass(eq=False)
62 | class Version:
63 | name: str
64 |
65 | @property
66 | def fqn(self):
67 | if "." in self.name:
68 | return self.name.replace("-", "~")
69 | major, minor = self.ints
70 | return f"{major}.saas~{minor}"
71 |
72 | def __repr__(self):
73 | # This is hacky.
74 | # It will only be used when outputting the generated file.
75 | return f"parse_version({self.fqn!r})"
76 |
77 | @property
78 | def ints(self):
79 | s = list(map(int, self.name.replace("saas-", "").split(".")))
80 | if len(s) == 1:
81 | # < 11.0
82 | major = {range(1, 6): 7, range(6, 7): 8, range(7, 14): 9, range(14, 19): 10}
83 | for m, n in major.items():
84 | if s[0] in m:
85 | return (n, s[0])
86 | raise ValueError(self.name)
87 | return tuple(s)
88 |
89 | def __eq__(self, other):
90 | return self.name == other.name
91 |
92 | def __lt__(self, other):
93 | return self.ints < other.ints
94 |
95 | def __hash__(self):
96 | return hash(self.name) # Only name is relevant
97 |
98 | @classmethod
99 | def parse(cls, name):
100 | name = name.replace("~", "-")
101 | if not (name.startswith("saas-") or name.endswith(".0")):
102 | name = name.split(".", 1)[-1]
103 | return cls(name)
104 |
105 | @property
106 | def python_target(self):
107 | bounds = [
108 | [(7, 0), black.mode.TargetVersion.PY27],
109 | [(10, 17), black.mode.TargetVersion.PY36],
110 | [(15, 0), black.mode.TargetVersion.PY37],
111 | [(16, 3), black.mode.TargetVersion.PY310],
112 | ]
113 | for bound, target in reversed(bounds):
114 | if bound <= self.ints:
115 | return target
116 | raise RuntimeError(f"Cannot determine python target for {self.name}")
117 |
118 |
119 | @dataclass(order=True)
120 | class Inherit:
121 | model: str
122 | born: Version # inclusive
123 | dead: Optional[Version] = None # non-inclusive
124 | via: Optional[str] = None # Many2one field to parent in case of `_inherits`
125 |
126 | def apply_on(self, version: Version) -> bool:
127 | if self.dead is None:
128 | return self.born <= version
129 | return self.born <= version < self.dead
130 |
131 |
132 | _LAST_MAJOR = 17
133 | _VERSIONS = {Version(f"{major}.0") for major in range(7, _LAST_MAJOR + 1)}
134 | _VERSIONS |= {Version(f"saas-{saas}") for saas in range(1, 19)}
135 | _VERSIONS |= {Version(f"saas-{major}.{minor}") for major in range(11, _LAST_MAJOR) for minor in range(1, 6)}
136 |
137 | VERSIONS = sorted(_VERSIONS)
138 |
139 | IGNORED_FILES = [
140 | # defines `_name = LITERAL % CONSTANT`
141 | # does not have _inherit(s)
142 | "odoo/addons/google_calendar/google_calendar.py",
143 | "odoo/addons/google_calendar/models/google_calendar.py",
144 | "odoo/addons/website_version/models/google_management.py",
145 | "enterprise/website_version/models/google_management.py",
146 | ]
147 |
148 | # Sometimes, new modules are added during a version lifetime and not forward-ported to dead saas~* version.
149 | # Theses versions being dead and no upgrade to these versions being made, we can consider it contains some models
150 | # Without it, we would end with holes in inherit tree.
151 | VIRTUAL_INHERITS = {
152 | "account.avatax": [
153 | Inherit(model="account.move", born=Version("14.0"), dead=Version("15.0"), via=None),
154 | Inherit(model="sale.order", born=Version("14.0"), dead=Version("15.0"), via=None),
155 | Inherit(model="sale.subscription", born=Version("14.0"), dead=Version("15.0"), via=None),
156 | ],
157 | "account.avatax.unique.code": [
158 | Inherit(model="account.avatax", born=Version("14.0"), dead=Version("15.0"), via=None),
159 | Inherit(model="res.partner", born=Version("14.0"), dead=Version("15.0"), via=None),
160 | ],
161 | "account.edi.common": [
162 | Inherit("account.edi.xml.cii", born=Version("14.0"), dead=Version("15.0")),
163 | Inherit("account.edi.xml.ubl_20", born=Version("14.0"), dead=Version("15.0")),
164 | ],
165 | "account.edi.xml.ubl_20": [
166 | Inherit("account.edi.xml.ubl_21", born=Version("14.0"), dead=Version("15.0")),
167 | Inherit("account.edi.xml.ubl_efff", born=Version("14.0"), dead=Version("15.0")),
168 | ],
169 | "account.edi.xml.ubl_21": [
170 | Inherit("account.edi.xml.ubl_bis3", born=Version("14.0"), dead=Version("15.0")),
171 | ],
172 | "account.edi.xml.ubl_bis3": [
173 | Inherit("account.edi.xml.ubl_de", born=Version("14.0"), dead=Version("15.0")),
174 | ],
175 | "account.report": [
176 | Inherit("account.cash.flow.report", born=Version("saas-11.1"), dead=Version("saas-12.5")),
177 | Inherit("l10n.lu.report.partner.vat.intra", born=Version("saas-13.1"), dead=Version("saas-13.2")),
178 | ],
179 | "l10n_cl.edi.util": [
180 | Inherit("stock.picking", born=Version("14.0"), dead=Version("saas-14.2")),
181 | Inherit(model="l10n_cl.daily.sales.book", born=Version("14.0"), dead=Version("saas-14.3"), via=None),
182 | ],
183 | "l10n_es.sii.account.tax.mixin": [
184 | Inherit(model="account.tax", born=Version("14.0"), dead=Version("saas-14.4"), via=None),
185 | Inherit(model="account.tax.template", born=Version("14.0"), dead=Version("saas-14.4"), via=None),
186 | ],
187 | "l10n_mx.trial.report": [
188 | Inherit("l10n_mx.trial.closing.report", born=Version("saas-11.1"), dead=Version("saas-12.2")),
189 | ],
190 | "l10n_mx_edi.pac.sw.mixin": [
191 | Inherit("account.invoice", born=Version("saas-11.1"), dead=Version("saas-12.5")),
192 | Inherit("account.payment", born=Version("saas-11.1"), dead=Version("saas-12.2")),
193 | ],
194 | "mail.activity.mixin": [
195 | Inherit("l10n_lu.yearly.tax.report.manual", born=Version("13.0"), dead=Version("15.0")),
196 | Inherit("l10n_uk.vat.obligation", born=Version("saas-15"), dead=Version("12.0")),
197 | ],
198 | "mail.thread": [
199 | Inherit("account.online.link", born=Version("12.0"), dead=Version("14.0")),
200 | Inherit(model="l10n_cl.daily.sales.book", born=Version("14.0"), dead=Version("saas-14.3"), via=None),
201 | Inherit("l10n_lu.yearly.tax.report.manual", born=Version("13.0"), dead=Version("15.0")),
202 | Inherit("l10n_uk.vat.obligation", born=Version("saas-15"), dead=Version("12.0")),
203 | ],
204 | "microsoft.outlook.mixin": [
205 | Inherit(model="fetchmail.server", born=Version("12.0"), dead=Version("saas-15.3"), via=None),
206 | Inherit(model="ir.mail_server", born=Version("12.0"), dead=Version("saas-15.3"), via=None),
207 | ],
208 | "pos.order.line": [
209 | Inherit(model="pos.order_line_pro_forma_be", born=Version("14.0"), dead=Version("saas-16.1")),
210 | ],
211 | "studio.mixin": [
212 | Inherit(model="ir.default", born=Version("14.0"), dead=Version("saas-14.2")),
213 | ],
214 | "google.gmail.mixin": [
215 | Inherit(model="fetchmail.server", born=Version("12.0"), dead=Version("15.0"), via=None),
216 | Inherit(model="ir.mail_server", born=Version("12.0"), dead=Version("15.0"), via=None),
217 | ],
218 | }
219 |
220 |
221 | # from lib2to3.refactor.RefactoringTool class
222 | def _read_python_source(filename):
223 | """Do our best to decode a Python source file correctly."""
224 | try:
225 | f = open(filename, "rb") # noqa: SIM115
226 | except OSError:
227 | return None, None
228 | try:
229 | encoding = tokenize.detect_encoding(f.readline)[0]
230 | finally:
231 | f.close()
232 | with io.open(filename, "r", encoding=encoding, newline="") as f:
233 | return f.read(), encoding
234 |
235 |
236 | class BootstrapVisitor(Visitor):
237 | result: Dict[str, List[Version]] = {}
238 |
239 | def to_str(self, node):
240 | if isinstance(node, black.Node):
241 | return "".join(self.to_str(c) for c in node.children)
242 | return node.value
243 |
244 | def visit_dictsetmaker(self, node):
245 | eval_context = {
246 | "Inherit": Inherit,
247 | "parse_version": Version.parse,
248 | }
249 |
250 | self.result = eval(f"{{ {self.to_str(node)} }}", eval_context)
251 | return []
252 |
253 |
254 | @dataclass
255 | class OdooVisitor(Visitor):
256 | inh: Dict[str, Set[Tuple[str, str]]] = field(default_factory=lambda: defaultdict(set))
257 |
258 | def to_str(self, node):
259 | if isinstance(node, black.Node):
260 | return "".join(self.to_str(c) for c in node.children)
261 | return node.value
262 |
263 | def visit_classdef(self, node):
264 | classparent = None
265 |
266 | children = iter(node.children)
267 | child = next(children)
268 | while child.type != black.token.COLON:
269 | if child.type == black.token.LPAR:
270 | classparent = self.to_str(next(children))
271 | child = next(children)
272 |
273 | if classparent in MODELS:
274 | suite = next(children)
275 | name = None
276 | inh = []
277 | for child in suite.children:
278 | if child.type == black.syms.simple_stmt:
279 | expr_stmt = child.children[0]
280 | if expr_stmt.type != black.syms.expr_stmt:
281 | continue
282 | attr = self.to_str(expr_stmt.children[0])
283 | if attr == "_name":
284 | node = expr_stmt.children[2]
285 | if node.type == black.token.NAME and node.value == "_description":
286 | # `_description` being required, some devs uses the following syntax: https://git.io/JUfhO
287 | node = expr_stmt.children[4]
288 | name = literal_eval(self.to_str(node))
289 | elif attr == "_inherit":
290 | node = expr_stmt.children[2]
291 | if node.type == black.token.NAME and node.value == "_name":
292 | inh.append((name, None))
293 | else:
294 | val = literal_eval(self.to_str(node))
295 | if isinstance(val, str):
296 | val = [val]
297 | inh.extend((v, None) for v in val)
298 | elif attr == "_inherits":
299 | val = literal_eval(self.to_str(expr_stmt.children[2]))
300 | inh.extend(val.items())
301 | else: # noqa: PLR5501
302 | # handle Many2one with delegate=True attribute
303 | if (
304 | len(expr_stmt.children) == 3
305 | and expr_stmt.children[1].type == black.token.EQUAL
306 | and expr_stmt.children[2].type == black.syms.power
307 | ):
308 | pw = expr_stmt.children[2]
309 | if (self.to_str(pw.children[0]) + self.to_str(pw.children[1])) == "fields.Many2one":
310 | via = self.to_str(expr_stmt.children[0])
311 | arglist = pw.children[2].children[1]
312 | comodel = None
313 | delegate = False
314 | for arg in arglist.children:
315 | if arg.type == black.token.STRING and comodel is None:
316 | comodel = literal_eval(self.to_str(arg))
317 | elif arg.type == black.syms.argument:
318 | if (
319 | self.to_str(arg.children[0]) == "delegate"
320 | and self.to_str(arg.children[2]) == "True"
321 | ):
322 | delegate = True
323 | if (
324 | self.to_str(arg.children[0]) == "comodel_name"
325 | and arg.children[2].type == black.token.STRING
326 | ):
327 | comodel = literal_eval(self.to_str(arg.children[2]))
328 | if delegate and comodel:
329 | inh.append((comodel, via))
330 |
331 | if name:
332 | for i, via in inh:
333 | if i != name:
334 | self.inh[i].add((name, via))
335 |
336 | return []
337 |
338 |
339 | def init_repos(path: Path) -> None:
340 | path.mkdir(parents=True, exist_ok=True)
341 |
342 | for repo in REPOSITORIES:
343 | p = path / repo.name
344 | if not p.exists():
345 | subprocess.run(
346 | ["git", "clone", repo.remote, repo.name],
347 | cwd=str(path),
348 | check=True,
349 | )
350 | else:
351 | subprocess.run(["git", "fetch", "-q"], cwd=str(p), check=True)
352 |
353 |
354 | def checkout(wd: Path, repo: Repo, version: Version) -> bool:
355 | gitdir = str(wd / repo.name)
356 |
357 | hasref = subprocess.run(
358 | ["git", "show-ref", "-q", "--verify", f"refs/remotes/origin/{version.name}"], cwd=gitdir, check=False
359 | )
360 | if hasref.returncode != 0:
361 | return False # unknown branch
362 | subprocess.run(
363 | ["git", "checkout", "-q", "--force", "-B", version.name, f"origin/{version.name}"], cwd=gitdir, check=True
364 | )
365 | return True
366 |
367 |
368 | def bootstrap(from_file: Path):
369 | logger.info("📂 Bootstrapping from %s", from_file)
370 | visitor = BootstrapVisitor()
371 |
372 | code, _ = _read_python_source(from_file)
373 | node = black.lib2to3_parse(code)
374 |
375 | list(visitor.visit(node))
376 | return defaultdict(list, visitor.result)
377 |
378 |
379 | def main(options: Namespace):
380 | wd = options.working_dir
381 | logger.info("⚙️ Initialize repositories into %s", wd)
382 | init_repos(wd)
383 |
384 | result = bootstrap(options.bootstrap_file) if options.bootstrap_file else defaultdict(list)
385 |
386 | for version in VERSIONS:
387 | if not (options.from_branch <= version <= options.to_branch):
388 | logger.info("⏭ Skip version %s", version.name)
389 | continue
390 |
391 | visitor = OdooVisitor()
392 |
393 | for model, virtuals in VIRTUAL_INHERITS.items():
394 | for virtual in virtuals:
395 | if virtual.apply_on(version):
396 | visitor.inh[model].add((virtual.model, virtual.via))
397 |
398 | any_repo = False
399 | for repo in REPOSITORIES:
400 | if not checkout(wd, repo, version):
401 | continue
402 | any_repo = True
403 | logger.info("🔎 Process %s at version %s", repo.name, version.name)
404 | r = wd / repo.name
405 | for pyfile in r.glob("**/*.py"):
406 | fname = str(pyfile.relative_to(wd))
407 | if fname in IGNORED_FILES or "test" in fname:
408 | continue
409 | code, _ = _read_python_source(pyfile)
410 | node = black.lib2to3_parse(code, [version.python_target])
411 | try:
412 | list(visitor.visit(node))
413 | except Exception:
414 | logger.critical("💥 Cannot parse %s (%s %s)", pyfile, repo.name, version.name)
415 | raise
416 |
417 | if not any_repo:
418 | # branch not found in any repo, don't store any inherits, even virtual ones
419 | continue
420 |
421 | if not visitor.inh:
422 | continue
423 |
424 | for model, children in result.items():
425 | for child in children:
426 | if (child.model, child.via) not in visitor.inh[model] and not child.dead and child.born < version:
427 | child.dead = version
428 |
429 | for model, children in visitor.inh.items():
430 | for child, via in children:
431 | for inh in result[model]:
432 | if inh.model == child and inh.via == via and (not inh.dead or inh.dead >= version):
433 | break
434 | else:
435 | result[model].append(Inherit(model=child, born=version, via=via))
436 |
437 | result = {m: sorted(result[m]) for m in sorted(result)}
438 | me = Path(sys.argv[0])
439 | pyproject = Path(black.find_pyproject_toml((str(me.parent),)))
440 |
441 | output = f"""\
442 | # This file is auto-generated by `{me.resolve().relative_to(pyproject.parent)}`. Edits will be lost.
443 |
444 | from collections import namedtuple
445 |
446 | try:
447 | from odoo.tools.misc import frozendict
448 | from odoo.tools.parse_version import parse_version
449 | except ImportError:
450 | from openerp.tools.parse_version import parse_version
451 | try:
452 | from openerp.tools.misc import frozendict
453 | except ImportError:
454 | # frozendict only appears with new api in 8.0
455 | frozendict = dict
456 |
457 | Inherit = namedtuple("Inherit", "model born dead via") # NOTE: dead is non-inclusive
458 |
459 | inheritance_data = frozendict({result!r})
460 | """
461 |
462 | with open(pyproject, "rb") as fp:
463 | line_length = tomli.load(fp)["tool"]["ruff"]["line-length"]
464 | mode = black.FileMode(target_versions={black.TargetVersion.PY27}, line_length=line_length)
465 | print(black.format_str(output, mode=mode), end="") # noqa: T201
466 |
467 |
468 | # def debug(options: Namespace):
469 | # from black.debug import DebugVisitor
470 | # assert options.bootstrap_file
471 | # code, _ = _read_python_source(options.bootstrap_file)
472 | # node = black.lib2to3_parse(code)
473 |
474 | # # DebugVisitor.show(node)
475 | # result = bootstrap(options.bootstrap_file)
476 | # print(f"inheritance_data = frozendict({result!r})")
477 | # # v = OdooVisitor()
478 | # # list(v.visit(node))
479 | # # print(v.inh)
480 |
481 |
482 | if __name__ == "__main__":
483 | parser = ArgumentParser(description="Regenerate `_inherit.py` from source files")
484 |
485 | parser.add_argument("--working-dir", "-w", dest="working_dir", type=Path, default="/tmp/inh")
486 | parser.add_argument("--bootstrap-file", "-b", dest="bootstrap_file", type=Path)
487 |
488 | parser.add_argument(
489 | "--from-branch", "-f", dest="from_branch", type=Version, choices=VERSIONS, default=VERSIONS[0], metavar="BRANCH"
490 | )
491 | parser.add_argument(
492 | "--to-branch", "-t", dest="to_branch", type=Version, choices=VERSIONS, default=VERSIONS[-1], metavar="BRANCH"
493 | )
494 |
495 | options = parser.parse_args()
496 |
497 | # debug(options)
498 | main(options)
499 |
--------------------------------------------------------------------------------
/tools/graph-upgrade-timing.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env -S uv run --script --quiet
2 | # ruff: noqa: RET503
3 |
4 | # /// script
5 | # dependencies = [
6 | # "pygal[png]",
7 | # ]
8 | # ///
9 |
10 | import argparse
11 | import os
12 | import re
13 | import sys
14 |
15 | import pygal # also need cairosvg for png output
16 |
17 |
18 | def process(options):
19 | pie = pygal.Pie()
20 | dt = None
21 | others = 0.0
22 | for line in sys.stdin.readlines():
23 | if dt is None:
24 | match = re.match(r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}):\d{2},\d{3} ", line)
25 | if match:
26 | dt = match.group(1)
27 |
28 | match = re.search(r"Module ([a-zA-Z0-9_]+) loaded in (\d+\.\d\d)s, \d+ queries", line)
29 | if match:
30 | time = float(match.group(2))
31 | if time > options.min_time:
32 | pie.add(match.group(1), time)
33 | else:
34 | others += time
35 |
36 | if options.min_time and others:
37 | pie.add("Other modules", others)
38 |
39 | title = f"{dt}"
40 | if options.min_time:
41 | title = f"{title} • Modules loaded in more than {options.min_time} seconds"
42 | pie.title = title
43 |
44 | if options.format == "png":
45 | return pie.render_to_png()
46 | elif options.format == "svg":
47 | return pie.render()
48 |
49 |
50 | def main():
51 | # cat migration-14.0-latest.log | python3 graph-upgrade-timing.py -m 15 > graph.svg
52 | parser = argparse.ArgumentParser()
53 | parser.add_argument("-f", "--format", type=str, choices=["svg", "png"], default="svg")
54 | parser.add_argument("-m", "--min-time", dest="min_time", type=float, default=0.0)
55 |
56 | options = parser.parse_args()
57 | with os.fdopen(sys.stdout.fileno(), "wb") as fp:
58 | fp.write(process(options))
59 |
60 | return 0
61 |
62 |
63 | if __name__ == "__main__":
64 | sys.exit(main())
65 |
--------------------------------------------------------------------------------