├── .gitignore
├── COPYING
├── README.md
├── README.rst
├── odoo_convert_path_to_image.py
├── odoo_convert_url_to_image.py
├── odoo_csv_tools
├── __init__.py
├── export_threaded.py
├── import_threaded.py
├── lib
│ ├── __init__.py
│ ├── checker.py
│ ├── conf_lib.py
│ ├── internal
│ │ ├── __init__.py
│ │ ├── csv_reader.py
│ │ ├── exceptions.py
│ │ ├── io.py
│ │ ├── rpc_thread.py
│ │ └── tools.py
│ ├── mapper.py
│ ├── transform.py
│ └── workflow.py
└── migrate.py
├── odoo_export_thread.py
├── odoo_import_thread.py
├── pics
├── account_move.png
├── cascade_update.png
├── fail.png
├── group_by_1.png
├── group_by_2.png
├── import_tool_options.png
├── o2m_csv.png
├── o2m_csv_gen.png
├── phase_load.png
├── phase_transform.png
├── run_time_1.png
├── run_time_2.png
└── run_time_3.png
├── requirements.txt
├── setup.py
└── tests
├── .coveragerc
├── 5_partner_export.sh
├── 6_o2m_import.sh
├── 7_convert_binary.sh
├── clean.sh
├── conf
└── connection.conf
├── const.py
├── launch_test.sh
├── origin
├── compagny.csv
├── contact.csv
├── contact_url.csv
├── img
│ ├── employee-image.png
│ ├── employee_al-image.jpg
│ ├── employee_chs-image.jpg
│ ├── employee_djj-image.png
│ ├── employee_dzc-image.jpg
│ ├── employee_fme-image.jpg
│ ├── employee_fp-image.jpg
│ ├── employee_fpi-image.jpg
│ ├── employee_han-image.png
│ ├── employee_hne-image.png
│ ├── employee_jep-image.jpg
│ ├── employee_jgo-image.jpg
│ ├── employee_jod-image.png
│ ├── employee_jog-image.jpg
│ ├── employee_jth-image.png
│ ├── employee_jve-image.jpg
│ ├── employee_lur-image.png
│ ├── employee_mit-image.png
│ ├── employee_ngh-image.jpg
│ ├── employee_niv-image.jpg
│ ├── employee_qdp-image.png
│ ├── employee_stw-image.jpg
│ └── employee_vad-image.jpg
├── product.csv
├── res.partner_o2m.csv
├── test_merge1.csv
└── test_merge2.csv
├── test_from_file.py
├── test_import.py
├── test_merge.py
├── test_product_v10.py
├── test_product_v9.py
└── test_split.py
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | .project
3 | .pydevproject
4 |
5 |
--------------------------------------------------------------------------------
/COPYING:
--------------------------------------------------------------------------------
1 | GNU LESSER GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 |
9 | This version of the GNU Lesser General Public License incorporates
10 | the terms and conditions of version 3 of the GNU General Public
11 | License, supplemented by the additional permissions listed below.
12 |
13 | 0. Additional Definitions.
14 |
15 | As used herein, "this License" refers to version 3 of the GNU Lesser
16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU
17 | General Public License.
18 |
19 | "The Library" refers to a covered work governed by this License,
20 | other than an Application or a Combined Work as defined below.
21 |
22 | An "Application" is any work that makes use of an interface provided
23 | by the Library, but which is not otherwise based on the Library.
24 | Defining a subclass of a class defined by the Library is deemed a mode
25 | of using an interface provided by the Library.
26 |
27 | A "Combined Work" is a work produced by combining or linking an
28 | Application with the Library. The particular version of the Library
29 | with which the Combined Work was made is also called the "Linked
30 | Version".
31 |
32 | The "Minimal Corresponding Source" for a Combined Work means the
33 | Corresponding Source for the Combined Work, excluding any source code
34 | for portions of the Combined Work that, considered in isolation, are
35 | based on the Application, and not on the Linked Version.
36 |
37 | The "Corresponding Application Code" for a Combined Work means the
38 | object code and/or source code for the Application, including any data
39 | and utility programs needed for reproducing the Combined Work from the
40 | Application, but excluding the System Libraries of the Combined Work.
41 |
42 | 1. Exception to Section 3 of the GNU GPL.
43 |
44 | You may convey a covered work under sections 3 and 4 of this License
45 | without being bound by section 3 of the GNU GPL.
46 |
47 | 2. Conveying Modified Versions.
48 |
49 | If you modify a copy of the Library, and, in your modifications, a
50 | facility refers to a function or data to be supplied by an Application
51 | that uses the facility (other than as an argument passed when the
52 | facility is invoked), then you may convey a copy of the modified
53 | version:
54 |
55 | a) under this License, provided that you make a good faith effort to
56 | ensure that, in the event an Application does not supply the
57 | function or data, the facility still operates, and performs
58 | whatever part of its purpose remains meaningful, or
59 |
60 | b) under the GNU GPL, with none of the additional permissions of
61 | this License applicable to that copy.
62 |
63 | 3. Object Code Incorporating Material from Library Header Files.
64 |
65 | The object code form of an Application may incorporate material from
66 | a header file that is part of the Library. You may convey such object
67 | code under terms of your choice, provided that, if the incorporated
68 | material is not limited to numerical parameters, data structure
69 | layouts and accessors, or small macros, inline functions and templates
70 | (ten or fewer lines in length), you do both of the following:
71 |
72 | a) Give prominent notice with each copy of the object code that the
73 | Library is used in it and that the Library and its use are
74 | covered by this License.
75 |
76 | b) Accompany the object code with a copy of the GNU GPL and this license
77 | document.
78 |
79 | 4. Combined Works.
80 |
81 | You may convey a Combined Work under terms of your choice that,
82 | taken together, effectively do not restrict modification of the
83 | portions of the Library contained in the Combined Work and reverse
84 | engineering for debugging such modifications, if you also do each of
85 | the following:
86 |
87 | a) Give prominent notice with each copy of the Combined Work that
88 | the Library is used in it and that the Library and its use are
89 | covered by this License.
90 |
91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license
92 | document.
93 |
94 | c) For a Combined Work that displays copyright notices during
95 | execution, include the copyright notice for the Library among
96 | these notices, as well as a reference directing the user to the
97 | copies of the GNU GPL and this license document.
98 |
99 | d) Do one of the following:
100 |
101 | 0) Convey the Minimal Corresponding Source under the terms of this
102 | License, and the Corresponding Application Code in a form
103 | suitable for, and under terms that permit, the user to
104 | recombine or relink the Application with a modified version of
105 | the Linked Version to produce a modified Combined Work, in the
106 | manner specified by section 6 of the GNU GPL for conveying
107 | Corresponding Source.
108 |
109 | 1) Use a suitable shared library mechanism for linking with the
110 | Library. A suitable mechanism is one that (a) uses at run time
111 | a copy of the Library already present on the user's computer
112 | system, and (b) will operate properly with a modified version
113 | of the Library that is interface-compatible with the Linked
114 | Version.
115 |
116 | e) Provide Installation Information, but only if you would otherwise
117 | be required to provide such information under section 6 of the
118 | GNU GPL, and only to the extent that such information is
119 | necessary to install and execute a modified version of the
120 | Combined Work produced by recombining or relinking the
121 | Application with a modified version of the Linked Version. (If
122 | you use option 4d0, the Installation Information must accompany
123 | the Minimal Corresponding Source and Corresponding Application
124 | Code. If you use option 4d1, you must provide the Installation
125 | Information in the manner specified by section 6 of the GNU GPL
126 | for conveying Corresponding Source.)
127 |
128 | 5. Combined Libraries.
129 |
130 | You may place library facilities that are a work based on the
131 | Library side by side in a single library together with other library
132 | facilities that are not Applications and are not covered by this
133 | License, and convey such a combined library under terms of your
134 | choice, if you do both of the following:
135 |
136 | a) Accompany the combined library with a copy of the same work based
137 | on the Library, uncombined with any other library facilities,
138 | conveyed under the terms of this License.
139 |
140 | b) Give prominent notice with the combined library that part of it
141 | is a work based on the Library, and explaining where to find the
142 | accompanying uncombined form of the same work.
143 |
144 | 6. Revised Versions of the GNU Lesser General Public License.
145 |
146 | The Free Software Foundation may publish revised and/or new versions
147 | of the GNU Lesser General Public License from time to time. Such new
148 | versions will be similar in spirit to the present version, but may
149 | differ in detail to address new problems or concerns.
150 |
151 | Each version is given a distinguishing version number. If the
152 | Library as you received it specifies that a certain numbered version
153 | of the GNU Lesser General Public License "or any later version"
154 | applies to it, you have the option of following the terms and
155 | conditions either of that published version or of any later version
156 | published by the Free Software Foundation. If the Library as you
157 | received it does not specify a version number of the GNU Lesser
158 | General Public License, you may choose any version of the GNU Lesser
159 | General Public License ever published by the Free Software Foundation.
160 |
161 | If the Library as you received it specifies that a proxy can decide
162 | whether future versions of the GNU Lesser General Public License shall
163 | apply, that proxy's public statement of acceptance of any version is
164 | permanent authorization for you to choose that version for the
165 | Library.
166 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Odoo CSV Import Export Library
2 | ==============================
3 | This library provides tools to easily and quickly import data into Odoo or export data from Odoo using CSV file.
4 | It also provide a framework to manipulate data from CSV.
5 |
6 | - [Odoo CSV Import Export Library](#odoo-csv-import-export-library)
7 | - [Installation](#installation)
8 | - [Importing Data](#importing-data)
9 | - [Import Parameters](#import-parameters)
10 | - [--config CONFIG](#config-config)
11 | - [--file FILENAME](#file-filename)
12 | - [--sep SEPARATOR](#sep-separator)
13 | - [--skip LINE](#skip-line)
14 | - [--model MODEL](#model-model)
15 | - [--size BATCH_SIZE](#size-batchsize)
16 | - [--worker WORKER](#worker-worker)
17 | - [--groupby SPLIT](#groupby-split)
18 | - [--ignore IGNORE](#ignore-ignore)
19 | - [--context CONTEXT](#context-context)
20 | - [Import Related Keys](#import-related-keys)
21 | - [ORM and Performance Related Keys](#orm-and-performance-related-keys)
22 | - [Model Specific Keys](#model-specific-keys)
23 | - [--o2m](#o2m)
24 | - [--check](#check)
25 | - [--fail](#fail)
26 | - [Using the Script](#using-the-script)
27 | - [Transformations](#transformations)
28 | - [Basic Concepts](#basic-concepts)
29 | - [A Simple Partner Import](#a-simple-partner-import)
30 | - [Dealing with Relationships](#dealing-with-relationships)
31 | - [Many2one Relationships](#many2one-relationships)
32 | - [One2many Relationships](#one2many-relationships)
33 | - [Many2many Relationships](#many2many-relationships)
34 | - [Controlling the Load sequence](#controlling-the-load-sequence)
35 | - [Mapper Functions](#mapper-functions)
36 | - [mapper.const(value)](#mapperconstvalue)
37 | - [mapper.val(field, default='', postprocess=lambda x: x, skip=False)](#mappervalfield-default-postprocesslambda-x-x-skipfalse)
38 | - [mapper.map_val(field, mapping, default='')](#mappermapvalfield-mapping-default)
39 | - [mapper.num(field, default='0.0')](#mappernumfield-default00)
40 | - [mapper.bool_val(field, true_vals=[], false_vals=[])](#mapperboolvalfield-truevals-falsevals)
41 | - [mapper.binary(field, path_prefix, skip=False, encoding="utf-8")](#mapperbinaryfield-pathprefix-skipfalse-encoding%22utf-8%22)
42 | - [mapper.concat(separator, *fields)](#mapperconcatseparator-fields)
43 | - [mapper.m2o(PREFIX, field, default='', skip=False)](#mapperm2oprefix-field-default-skipfalse)
44 | - [mapper.m2o_map(PREFIX, mapper, default='', skip=False)](#mapperm2omapprefix-mapper-default-skipfalse)
45 | - [mapper.m2m(PREFIX, *fields)](#mapperm2mprefix-fields)
46 | - [mapper.m2m_id_list(PREFIX, *args, **kwargs)](#mapperm2midlistprefix-args-kwargs)
47 | - [mapper.m2m_value_list(*args, **kwargs)](#mapperm2mvaluelistargs-kwargs)
48 | - [Advanced Transformations](#advanced-transformations)
49 | - [User Defined Mappers](#user-defined-mappers)
50 | - [Managing the Client CSV file](#managing-the-client-csv-file)
51 | - [Adding a column](#adding-a-column)
52 | - [Removing Lines](#removing-lines)
53 | - [Updating Records With Database IDs](#updating-records-with-database-ids)
54 | - [A Real Life Example](#a-real-life-example)
55 | - [Performances Considerations](#performances-considerations)
56 | - [Importing Related or Computed Fields](#importing-related-or-computed-fields)
57 | - [Troubleshooting](#troubleshooting)
58 | - [When the number of records does not match](#when-the-number-of-records-does-not-match)
59 | - [Tips and Tricks](#tips-and-tricks)
60 | - [Importing Data of Multiple Companies](#importing-data-of-multiple-companies)
61 | - [Importing Translations](#importing-translations)
62 | - [Importing Account Move Lines](#importing-account-move-lines)
63 | - [Exporting Data](#exporting-data)
64 | - [Requirements](#requirements)
65 |
66 | # Installation
67 | * From GitHub
68 |
69 | ```
70 | git clone git@github.com:tfrancoi/odoo_csv_import.git
71 | ```
72 |
73 | * From PyPi
74 |
75 | ```
76 | [sudo] pip install odoo_import_export_client
77 | ```
78 | # Importing Data
79 | The Odoo CSV Import Export library provides the script `odoo_import_thread.py` to import data into Odoo. The script is designed to load one data file into one model. That means you might need to run the script several times with different data files, models and other options to complete an import.
80 |
81 |

82 |
83 | Data are not inserted directly into the database, instead they are loaded by calling the method `models.load`. Doing so, the standard behaviour of each model is respected.
84 |
85 | This script has several options. Type the command `odoo_import_thread.py --help` to get the usage.
86 |
87 | ```
88 | usage: odoo_import_thread.py [-h] -c CONFIG --file FILENAME --model MODEL
89 | [--worker WORKER] [--size BATCH_SIZE]
90 | [--skip SKIP] [--fail] [-s SEPARATOR]
91 | [--groupby SPLIT] [--ignore IGNORE] [--check]
92 | [--context CONTEXT] [--o2m]
93 |
94 | Import data in batch and in parallel
95 |
96 | optional arguments:
97 | -h, --help show this help message and exit
98 | -c CONFIG, --config CONFIG
99 | Configuration File that contains connection parameters
100 | --file FILENAME File to import
101 | --model MODEL Model to import
102 | --worker WORKER Number of simultaneous connection
103 | --size BATCH_SIZE Number of line to import per connection
104 | --skip SKIP Skip until line [SKIP]
105 | --fail Fail mode
106 | -s SEPARATOR, --sep SEPARATOR
107 | CSV separator
108 | --groupby SPLIT Group data per batch with the same value for the given
109 | column in order to avoid concurrent update error
110 | --ignore IGNORE list of column separate by comma. Those column will be
111 | remove from the import request
112 | --check Check if record are imported after each batch.
113 | --context CONTEXT context that will be passed to the load function, need
114 | to be a valid python dict
115 | --o2m When you want to import o2m field, don't cut the batch
116 | until we find a new id
117 | ```
118 | One of the most important feature is the ability to import in parallel while controlling the transaction size.
119 | These options allow to import huge data files while dealing with performance, what is not possible with the builtin Odoo import wizard.
120 |
121 | Here is how looks like a typical execution of the script:
122 |
123 | 
124 |
125 | The parameter values are set for illustrating purposes.
126 |
127 | When running the script, an amount of threads are spawned. Each of them handles an amount of records by transaction. Each transaction inserts or updates records in the Odoo instance defined in the configuration file.
128 |
129 | ## Import Parameters
130 |
131 | ### --config CONFIG
132 | The configuration file `CONFIG` is a text file that defines the parameters used in the import. Here is an example.
133 |
134 | ```
135 | [Connection]
136 | hostname = mydb.odoo.com
137 | database = mydb
138 | login = admin
139 | password = admin
140 | protocol = jsonrpcs
141 | port = 443
142 | uid = 2
143 | ```
144 | The section `[connection]` is mandatory. Then the following parameters must be set accordingly.
145 |
146 | Parameter | Description |
147 |
148 |
149 | hostname
150 | |
151 |
152 | Name of the host where the Odoo resides.
153 | |
154 |
155 |
156 |
157 | database
158 | |
159 |
160 | Name of the Postgresql database.
161 | |
162 |
163 |
164 |
165 | login
166 | |
167 |
168 | The login used to create or update the records.
169 | |
170 |
171 |
172 |
173 | password
174 | |
175 |
176 | The login's password.
177 | |
178 |
179 |
180 |
181 | protocol
182 | |
183 |
184 | Protocol used for RPC calls. It can be one of the following values: xmlrpc, xmlrpcs, jsonrpc, jsonrpcs.
185 | For a remote database, it's strongly advised to used an encrypted protocol (xmlrcps or jsonrpcs).
186 | |
187 |
188 |
189 |
190 | port
191 | |
192 |
193 | TCP port where Odoo can be reached. Usually 443 for encrypted remote connections, or 8069 for a local Odoo with its default configuration.
194 | |
195 |
196 |
197 |
198 | uid
199 | |
200 |
201 | This is the database id of the res.user identified by the parameter 'login'.
202 | Well known ids are:
203 | 1 = admin user prior to V12.
204 | 2 = admin user as of V12.
205 | |
206 |
207 |
208 |
209 | > **Tips:** On premise, it's advised to use a dedicated user with the minimal access rights on all the models related to the import.
210 |
211 | By default, `CONFIG` is set to `conf/connection.conf`. Under windows, you must always set this option because the path separator is not compliant with the OS.
212 |
213 | ### --file FILENAME
214 | Define the CSV `FILENAME` to import. The CSV format is mandatory. In order to be importable in Odoo, this file must follow some rules:
215 | - The file must be in UTF-8 encoding.
216 | - One file must contain data of only one model.
217 | - The first line is the column names. All columns must have the technical name of the fields.
218 | - All lines must have an `id` column fullfilled with an XML_ID that identifies the record.
219 | - Some field formats must be respected:
220 | - Boolean values must be 0 or 1.
221 | - Binary data must be encoded in base64.
222 | - Datetime fields format depends on the language (often %Y-%m-%d %H:%M:%S).
223 | - The decimal separator of float values also depends on the language (often '.').
224 | - Selection fields must always contain database values.
225 | - Many2one fields must be suffixed with `/id` if their value is an XML_ID or `.id` if it's a database id.
226 | - Many2many fields must be a comma separated list of XML_IDs.
227 | - If a field value is splitted into multiple lines, it must be enclosed with double quotes (").
228 |
229 | The fields separator can be set with the option `--sep SEPARATOR`.
230 |
231 | You can skip the first lines of the file with the option `--skip LINE`.
232 |
233 | The name of the CSV file can be used to set the model by default. Ex: the file name `res_partner.csv` sets the model to `res.partner`. See the `--model` option for more detail.
234 |
235 | ### --sep SEPARATOR
236 | Define the column separator. Ex: `--sep=,`. By default, it's the semicolon (;).
237 |
238 | If the separator is present in a field value, the value must be enclosed with double quotes (").
239 |
240 | ### --skip LINE
241 | Allow to skip the first `LINE`line(s) of the file. Ex: `--skip=3` will skip the first three lines.
242 |
243 | The first line must be the column names. Don't skip any line if it's the case.
244 |
245 | ### --model MODEL
246 | Set the `MODEL` to import data into. Ex: `--model=res.partner`.
247 |
248 | By default the model is the name of the CSV file with the underscores (_) replaced by dots (.) and without extension. Meaning, if the CSV file is named`res_partner.csv`, the model is `res.partner` by default .
249 |
250 | ### --size BATCH_SIZE
251 | Controls the number of records (`BATCH_SIZE`) imported in one transaction.
252 |
253 | When using the standard import wizard of Odoo, an import is always handled by one worker in one single transaction for the whole file.
254 |
255 | 
256 |
257 | When dealing with big data files, this may lead to two main issues:
258 | - the time needed to import the whole file could exceed the maximum time allowed for a transaction to run. This time is set by the Odoo parameters `limit-time-cpu` and `limit-time-real`,
259 | - if an error occurs on one record, the whole transaction fails and all the records are rejected.
260 |
261 | The solution is then to reduce the number of records in one transaction by setting the `--size` parameter to the desired number.
262 |
263 | 
264 |
265 | Here colored in blue, the transaction contains two records (`--size=2`). Now, only two records instead of the whole file must be imported during the time allocated for the transaction.
266 |
267 | This option is also helpful when importing large records over a WAN connection because a smaller transaction size leads to smaller JSON or XML payloads to send over the network. However it causes a bit more network overhead which could slow down the total run time. This run time can be drastically decreased by using the `--worker` parameter.
268 |
269 | ### --worker WORKER
270 | Controls the number of import threads in parallel.
271 |
272 | Here is how a import looks like whith `--worker=2`.
273 |
274 | 
275 |
276 | The whole file is now handled by two workers in parallel. The total run time is then divided by two.
277 |
278 | As a rule of thumb, you can set the number of workers up to 80% of the number Odoo workers. So that other users can still work while the import runs.
279 |
280 | When working with multiple workers, there is a potential drawback: the concurrent updates.
281 |
282 | In the following example, suppose Import Thread 1 is importing my_partner_2 while Import Thread 2 is importing my_partner_3.
283 |
284 | 
285 |
286 | Both partners have the same parent_id: my_partner_1. As the `parent_id` is a related field non readonly, the insert/update of my_partner_2 and my_partner_3 will both trigger an update on my_partner_1. That's a concurrent update. As a consequence the current transaction of both threads will fail. To solve such an issue, the parameter `--groupby` can be used.
287 |
288 | ### --groupby SPLIT
289 | Selects the field to group in one thread.
290 |
291 | To avoid the concurrent update issue described previously, you can use `--groupby=parent_id/id`. By doing this, we ensure all the records with the same `parent_id/id` are imported by the same thread. It thus eliminates the concurrent updates **caused by the parent_id**.
292 |
293 | 
294 |
295 | ### --ignore IGNORE
296 | Specifies the columns that do not need to be imported. Multiple columns can be set in a comma separated list. Ex: `--ignore=col1,col2,col3`.
297 |
298 | This is typically used to avoid cascade updates while importing related fields. Refer to [Importing Related or Computed fields](#Importing-Related-or-Computed-Fields).
299 |
300 | ### --context CONTEXT
301 | Define the context of the ORM while importing. Ex:`--context="{'tracking_disable': True}"`.
302 |
303 | Here are some useful context keys.
304 |
305 | #### Import Related Keys
306 |
307 |
308 | Key | Description |
309 |
310 |
311 | write_metadata
312 | |
313 |
314 | When True, allow to import audit log fields (create_uid, create_date, write_uid, write_date). The import must run with the `admin` user. Requires the module import_metadata available here.
315 | |
316 |
317 |
318 |
319 | update_many2many
320 | |
321 |
322 | Set it to True when the data file contains a many2many relationship splitted as one record per line instead of a comma separated list of XML_IDs in one column. Suppose we want to assign categories to products, a regular record is:
323 | product1;categ1,categ2,categ3
324 | With 'update_many2many': True, you can import a file with the following structure:
325 | product1;categ1
326 | product1;categ2
327 | product1;categ3
328 | |
329 |
330 |
331 |
332 | #### ORM and Performance Related Keys
333 |
334 |
335 | Key | Description |
336 |
337 |
338 | tracking_disable
339 | |
340 |
341 | When True, don't create messages in the chatter
342 | |
343 |
344 |
345 |
346 | defer_fields_computation
347 | |
348 |
349 | When True, recompute the computed fields at the end of the transaction instead of after each record. Useless if --size=1. Requires the module defer_fields_computation available here.
350 | |
351 |
352 |
353 |
354 | defer_parent_store_computation
355 | |
356 |
357 | Defer the computation of the fields parent_left and parent_right at the end of the transaction.
358 | Valid up to Odoo 11.
359 | |
360 |
361 |
362 |
363 | lang
364 | |
365 |
366 | Set the current language. Ex: 'lang': fr_FR
367 | |
368 |
369 |
370 |
371 | force_company
372 | |
373 |
374 | Set the current company. Use the database identifier of the company.
375 | |
376 |
377 |
378 |
379 | #### Model Specific Keys
380 |
381 |
382 | Key | Description |
383 |
384 |
385 | check_move_validity
386 | |
387 |
388 | Set it to False when you import account moves and account move lines. Refer to "Importing Account Move Lines" for more details.
389 | |
390 |
391 |
392 |
393 | create_product_product
394 | |
395 |
396 | Set it to True when you import product templates and also the variants. Without this key, the ORM will automatically create the variants when the templates are imported.
397 | |
398 |
399 |
400 |
401 | These are some examples. Feel free to look into Odoo code to find out all context keys.
402 |
403 | ### --o2m
404 | Use this option when you import a data file with one2many relationships. The import file must follow a specific structure.
405 |
406 | 
407 |
408 | Suppose the model `master` has two one2many fields `child1_ids` and `child2_ids`, linking respectively the models `child1` and `child2`.
409 | In the line beginning a master record, you can set all the master fields, like a regular import file. In addition, you can add the fields of child records. In the next lines, you can add the data of the next childs, leaving empty the columns of the master record and the unexisting childs.
410 |
411 | With the `--o2m` option, the master record will be imported with its two `child1` and its three `child2` in the same time.
412 | It worths noticing that it's impossible to set XML_IDs on the child records. As a consequence:
413 | - you cannot run the import again to update the childs data,
414 | - the childs cannot be referenced in another import file.
415 |
416 |
417 |
418 | ### --check
419 | With this option, at the end of each transaction, the number of records in the transaction is compared to the number of imported records. If these numbers do not match, an error message is printed. Most likely, the transaction contains records with duplicate XML_IDs. Refer to [When the number of records does not match](#when-the-number-of-records-does-not-match) for more explanations.
420 |
421 | ### --fail
422 | Engage the fail mode.
423 |
424 | When you run `odoo_import_thread.py` whithout the `--fail` option, it runs in _normal_ mode. In this mode, any rejected record is printed in a file with the same name as the parameter `--file` suffixed by `.fail` and located in the same folder.
425 |
426 | This `.fail` file may contain records rejected for good reasons (ie. a required field is missing) or _bad_ reasons. If you run an import with multiple workers, a bad reason could be a concurrent update. And even in a single thread, when an error occurs in one record, all the records of the transaction (`--size`) are rejected.
427 | This means the `.fail` file may contain records that could be imported if the process had run by a single thread and in a specific transaction for each record. That's the role of the fail mode.
428 |
429 | 
430 |
431 | In fail mode, `odoo_import_thread.py` will try to import the records of the `.fail` file. Additionaly, neither `--worker` nor `--size` is set. The default values of `1` are then used to ensure a single thread and a single record per transaction.
432 |
433 | In this mode, the rejected records are placed in the `.fail.bis` file in the same folder. This file contains only rejections that need your attention and must be solved before importing again.
434 |
435 | ## Using the Script
436 | To use all the benefits of the script, most of the time imports are run with multiple workers and a user defined transaction size. In this case and because of the fail mode, you always need two command lines to import one file.
437 |
438 | Assuming:
439 | - your configuration file is `connection.conf` located in the current folder,
440 | - your data file is `my_data.csv` located in the current folder,
441 | - the target model is `my.model`,
442 | - you want to run a fast multithreaded import.
443 |
444 | Run the following two commands (as an example):
445 | ```
446 | odoo_import_thread.py -c connection.conf --file=my_data.csv --model=my.model --worker=4 --size=200 [other options]
447 | odoo_import_thread.py -c connection.conf --file=my_data.csv --model=my.model --fail [other options]
448 | ```
449 |
450 | The first command runs the import in parallel and in batch. The rejected records are printed in the file `my_data.csv.fail`. We don't care about this file, it will be handled in the next command.
451 |
452 | In the second command, the parameter `--fail` replaces `--worker` and `--size`. The import runs in fail mode. It will read `my_data.csv.fail` (note the parameter `--file` is unchanged) and print the rejected records in the file `my_data.csv.fail.bis`. If the `.fail.bis` file empty, all the records are imported (inserted or updated).
453 |
454 | Refer to the [Troubleshooting](#troubleshooting) section to know how to solve some issues.
455 |
456 | ## Transformations
457 |
458 | When the file to import doesn't respect the [expected format](#file-FILENAME) of `odoo_import_thread.py`, it's necessary to apply some transformations in order to create compliant CSV files. The Odoo CSV Import Export library helps in creating a python script able to transform CSV files thanks to the `Processor` and the `mapper` objects.
459 |
460 | 
461 |
462 | ### Basic Concepts
463 |
464 | Let's start with a simple use case to introduce the main concepts of the tranformations. Once you're familiar with, a more complete use case is provided [here](#a-real-life-example).
465 |
466 | #### A Simple Partner Import
467 | A customer wants to import some partners. He provides the following CSV file, say `client_file.csv`:
468 |
469 | ```
470 | Firstname;Lastname;Birthdate
471 | John;Doe;31/12/1980
472 | David;Smith;28/02/1985
473 | ```
474 | This file cannot be imported directly because:
475 | - the fields `Lastname`, `Firstname`, `Birthdate` do not exist in the model `res.partner`,
476 | - the date format is not compliant,
477 | - there is no `id` field.
478 |
479 | The first step to do is to ensure that all the fields exist in the target model. Here, the birthdate is a new data. We assume it must be stored in the field `birthdate` created before the import. Instead, `Firstname` and `Lastname` will be used as the `name`of the partner.
480 |
481 | Now the transformation step can begin. It consists on writing a python script that builds another CSV file compliant with the model `res.partner`, this is our Odoo CSV file.
482 | In this case, the transformation steps will:
483 | - define how we build the `name` field from the columns `Lastname` and `Firstname`,
484 | - change the date format to a compliant one.
485 |
486 | Another important point to consider is what happens when we load the data several times (*it could occur if the tranformations must be rewritten*). Basically, when we import the Odoo CSV file the first time, the two partners will be created. But if we run it again, we don't want to create duplicates. Instead, we want to update the partner information. So the transformation phase is also necessary to:
487 |
488 | - assign an XML_ID to each partner of the file.
489 |
490 | The presence of an XML_ID ensures that a record is created if it doesn't exist, or updated if it already exists. This behaviour is included in the method `load` of each Odoo model.
491 |
492 | Let's build the transformation script, say `res_partner.py`. We start with importing the needed objects from the library.
493 |
494 | ```
495 | # -*- coding: utf-8 -*-
496 | from odoo_csv_tools.lib import mapper
497 | from odoo_csv_tools.lib.transform import Processor
498 |
499 | # Custom import
500 | from datetime import datetime # used to change the format of datetime fields
501 | ```
502 |
503 | Then we build a `Processor` object from the client CSV file `client_file.csv`. Assuming this file resides in the current folder:
504 |
505 | ```
506 | processor = Processor('client_file.csv', delimiter=';')
507 | ```
508 |
509 | Now we create a mapping dictionary where the keys are the fields of the target model (`res.partner`) we want to import -**at least the required fields without default value**- and how we get them from the client file.
510 |
511 | ```
512 | res_partner_mapping = {
513 | 'id': mapper.m2o_map('my_import_res_partner', mapper.concat('_', 'Firstname', 'Lastname', 'Birthdate')),
514 | 'name: mapper.concat(' ','Firstname','Lastname'),
515 | 'birthdate': mapper.val('Birthdate', postprocess=lambda x: datetime.strptime(x, "%d/%m/%y").strftime("%Y-%m-%d 00:00:00")),
516 | }
517 | ```
518 | All the fields are extracted with the methods of the `mapper` object. These are described [here](#mapper-functions).
519 |
520 | An important thing to notice is the `id` field. It is mandatory by the script `odoo_import_thread.py`. It contains an XML_ID that we build *as we want* as soon as its unicity is garanteed. In this example, we assume the concatenation of three columns (`Lastname`, `Firstname` and `Birthdate`) is enough to identify a record. It will create XML_IDs like `my_import_res_partner.John_Doe_31/12/1980` in the Odoo CSV file. You are free to choose whatever module name you want, here `my_import_res_partner`, but it's a good idea to include the model name and something like the *project* name.
521 |
522 | Now we can invoke the transformation by itself.
523 |
524 | ```
525 | processor.process(res_partner_mapping, 'res.partner.csv', {'model': 'res.partner', 'context': "{'tracking_disable': True}", 'worker': 2, 'batch_size': 20})
526 | ```
527 | This step will create the import file `res.partner.csv` for the model. `res.partner`. It should look like this, conforming to `res_partner_mapping`:
528 |
529 | ```
530 | id;name;birthdate
531 | my_import_res_partner.John_Doe_31/12/1980;John Doe;31-12-1980 00:00:00
532 | my_import_res_partner.David_Smith_28/02/1985;David Smith;28-02-1985 00:00:00
533 | ```
534 | > **Note:** The order of the columns is not related to the client file or the keys in the transform mapping dictionary.
535 |
536 | Notice some options are set when invoking the transformation: `'context': "{'tracking_disable': True}", 'worker': 2, 'batch_size': 20}`.
537 | They don't play any role in the transformation by itself. Instead it will be used by the import shell script later. Hopefully, we can automatically create the shell script by adding this line:
538 |
539 | ```
540 | processor.write_to_file("res_partner.sh", python_exe='', path='')
541 | ```
542 |
543 | This will create the script `res_partner.sh` that will load the data with `odoo_import_thread.py`, first in normal mode, then in fail mode. It looks like this:
544 |
545 | ```
546 | odoo_import_thread.py -c conf/connection.conf --file=res.partner.csv --model=res.partner --worker=2 --size=20 --groupby= --ignore= --sep=";" --context="{'tracking_disable': True}"
547 | odoo_import_thread.py -c conf/connection.conf --fail --file=res.partner.csv --model=res.partner --ignore= --sep=";" --context="{'tracking_disable': True}"
548 | ```
549 |
550 | The complete python script:
551 | ```
552 | # -*- coding: utf-8 -*-
553 | from odoo_csv_tools.lib import mapper
554 | from odoo_csv_tools.lib.transform import Processor
555 |
556 | # Custom import
557 | from datetime import datetime # used to change the format of datetime fields
558 |
559 | processor = Processor('client_file.csv', delimiter=';')
560 |
561 | res_partner_mapping = {
562 | 'id': mapper.m2o_map('my_import_res_partner', mapper.concat('_', 'Firstname', 'Lastname', 'Birthdate')),
563 | 'name: mapper.concat(' ','Firstname','Lastname'),
564 | 'birthdate': mapper.val('Birthdate', postprocess=lambda x: datetime.strptime(x, "%d/%m/%y").strftime("%Y-%m-%d 00:00:00")),
565 | }
566 |
567 | processor.process(res_partner_mapping, 'res.partner.csv', {'model': 'res.partner', 'context': "{'tracking_disable': True}", 'worker': 2, 'batch_size': 20})
568 | processor.write_to_file("res_partner.sh", python_exe='', path='')
569 | ```
570 |
571 | Run the transformations
572 |
573 | ```
574 | python res_partner.py
575 | ```
576 |
577 | You should have created:
578 | - the import file `res.partner.csv` in the same folder as the client file `res_partner.csv`,
579 | - the shell script `res_partner.sh` in your current folder.
580 |
581 | #### Dealing with Relationships
582 |
583 | ##### Many2one Relationships
584 |
585 | Coming back to our simple example, let's suppose the client adds the partner's company in his data. Here, we are not in a multi companies environment, the company is just the partner's parent. The file could look like this:
586 | ```
587 | Company;Firstname;Lastname;Birthdate
588 | The World Company;John;Doe;31/12/1980
589 | The Famous Company;David;Smith;28/02/1985
590 | ```
591 | In this case we must import four partners (the two companies and the two persons) and set the field `parent_id` of the two persons to their respective company. In a relational database we link records thanks to their internal identifiers (`id`). But at this step, these ids are unknown because the records are not imported yet. We will then use the XML_IDs to link the records.
592 |
593 | It means when we transform a company, we assign an XML_ID to it, then we use this XML_ID as the `parent_id` of the person who is a member of this company. As a consequence the companies must be imported before the persons. More precisely, the XML_IDs set in the `parent_id` must exist before being used as a relationship value.
594 |
595 | Let's create the transformation script. As usual, we start with the needed imports and the creation of a `Processor` on the client file.
596 | ```
597 | # -*- coding: utf-8 -*-
598 | from odoo_csv_tools.lib import mapper
599 | from odoo_csv_tools.lib.transform import Processor
600 | from datetime import datetime # used to change the format of datetime fields
601 |
602 | processor = Processor('client_file.csv', delimiter=';')
603 | ```
604 |
605 | Now we can define the mapping to extract the companies. These are records in the model `res.partner` with the boolean field `is_company` set. We also assume the company name is unique so that we can use it as an identifier in the XML_ID.
606 | ```
607 | res_partner_company_mapping = {
608 | 'id': mapper.m2o_map('my_import_res_partner', mapper.val('Company')),
609 | 'name': mapper.val('Company'),
610 | 'is_company': mapper.const('1'),
611 | }
612 |
613 | processor.process(res_partner_company_mapping, 'res.partner.company.csv', {}, 'set')
614 | ```
615 | It worths noting the option`'set'` of `processor.process` while invoking the companies transformation. This option allows to remove duplicates in the Odoo CSV file. It could be the case if several partners belong to the same company.
616 |
617 | And here is the mapping to extract the persons. It's exactly the same as before except we've added the field `parent_id`.
618 | ```
619 | res_partner_mapping = {
620 | 'id': mapper.m2o_map('my_import_res_partner', mapper.concat('_', 'Firstname', 'Lastname', 'Birthdate')),
621 | 'name': mapper.concat(' ','Firstname','Lastname'),
622 | 'birthdate': mapper.val('Birthdate', postprocess=lambda x: datetime.strptime(x, "%d/%m/%y").strftime("%Y-%m-%d 00:00:00")),
623 | 'parent_id/id': mapper.m2o_map('my_import_res_partner', mapper.val('Company')),
624 | }
625 |
626 | processor.process(res_partner_mapping, 'res.partner.csv', {})
627 | ```
628 | The important thing to notice here is that we use exactly the same transformation method for the partner ids in both mappings in order to generate the same XML_ID (`res_partner_mapping['parent_id/id']` = `res_partner_company_mapping['id']`). *Remember also the suffix `/id` when using XML_IDs in relation fields.*
629 |
630 | The results will be two differents Odoo CSV files:
631 |
632 | - The partners who are companies: `res.partner.company.csv`.
633 | ```
634 | id;name;is_company
635 | my_import_res_partner.The World Company;The World Company;1
636 | my_import_res_partner.The Famous Company;The Famous Company;1
637 | ```
638 |
639 | - The persons: `res.partner.csv` where the column `partner_id/id` refer to an existing `id` in `res.partner.company.csv`.
640 | ```
641 | id;parent_id/id;name;birthdate
642 | my_import_res_partner.John_Doe_31/12/1980;my_import_res_partner.The World Company;John Doe;31-12-1980 00:00:00
643 | my_import_res_partner.David_Smith_28/02/1985;my_import_res_partner.The Famous Company;David Smith;28-02-1985 00:00:00
644 | ```
645 |
646 | Finally we generate the shell script that will load the files by adding this line in the transformation script.
647 |
648 | ```
649 | processor.write_to_file("res_partner.sh", python_exe='', path='')
650 | ```
651 |
652 | This will create the shell script `res_partner.sh` that will load the data. It looks like this:
653 |
654 | ```
655 | odoo_import_thread.py -c conf/connection.conf --file=res.partner.company.csv --model=res.partner --groupby= --ignore= --sep=";" --context="{}"
656 | odoo_import_thread.py -c conf/connection.conf --fail --file=res.partner.company.csv --model=res.partner --ignore= --sep=";" --context="{}"
657 |
658 | odoo_import_thread.py -c conf/connection.conf --file=res.partner.csv --model=res.partner --groupby= --ignore= --sep=";" --context="{}"
659 | odoo_import_thread.py -c conf/connection.conf --fail --file=res.partner.csv --model=res.partner --ignore= --sep=";" --context="{}"
660 | ```
661 | The script contains all the commands to load both Odoo CSV files. They are written in the same order as in the transformation script. So the import sequence is respected.
662 |
663 | The complete python script:
664 | ```
665 | # -*- coding: utf-8 -*-
666 | from odoo_csv_tools.lib import mapper
667 | from odoo_csv_tools.lib.transform import Processor
668 | from datetime import datetime # used to change the format of datetime fields
669 |
670 | processor = Processor('client_file.csv', delimiter=';')
671 |
672 | res_partner_company_mapping = {
673 | 'id': mapper.m2o_map('my_import_res_partner', mapper.val('Company')),
674 | 'name': mapper.val('Company'),
675 | 'is_company': mapper.const('1'),
676 | }
677 |
678 | processor.process(res_partner_company_mapping, 'res.partner.company.csv', {}, 'set')
679 |
680 | res_partner_mapping = {
681 | 'id': mapper.m2o_map('my_import_res_partner', mapper.concat('_', 'Firstname', 'Lastname', 'Birthdate')),
682 | 'name': mapper.concat(' ','Firstname','Lastname'),
683 | 'birthdate': mapper.val('Birthdate', postprocess=lambda x: datetime.strptime(x, "%d/%m/%y").strftime("%Y-%m-%d 00:00:00")),
684 | 'parent_id/id': mapper.m2o_map('my_import_res_partner', mapper.val('Company')),
685 | }
686 |
687 | processor.process(res_partner_mapping, 'res.partner.csv', {})
688 |
689 | processor.write_to_file("res_partner.sh", python_exe='', path='')
690 | ```
691 |
692 |
693 | ##### One2many Relationships
694 |
695 | Usually we don't import `One2many` fields. Instead, we import the inverse `Many2one` relation in the linked model.
696 |
697 | ##### Many2many Relationships
698 |
699 | Let's suppose our customer adds some categories to the partners. The client file could look like this:
700 | ```
701 | Company;Firstname;Lastname;Birthdate;Category
702 | The World Company;John;Doe;31/12/1980;Premium
703 | The Famous Company;David;Smith;28/02/1985;Normal,Bad Payer
704 | ```
705 | The categories are set in one column in the same line of the partner record and separated with a comma.
706 |
707 | By looking into Odoo, we see that the model `res.partner` contains a field `category_id` which is a Many2many to the model `res.partner.category`. If you remember the rule _Many2many fields must be a comma separated list of XML_IDs_ and that an XML_ID must be created before being used in a relationship, you get an idea of the procedure to apply.
708 |
709 | 1- Create all the categories by extracting them from the client file and assign them and XML_ID.
710 |
711 | 2- Build a comma separated list of XML_IDs of categories for each partner.
712 |
713 | Let's start the transformation script. As usual, we start with the needed imports and the creation of a `Processor` on the client file.
714 | ```
715 | # -*- coding: utf-8 -*-
716 | from odoo_csv_tools.lib import mapper
717 | from odoo_csv_tools.lib.transform import Processor
718 | from datetime import datetime # used to change the format of datetime fields
719 |
720 | processor = Processor('client_file.csv', delimiter=';')
721 | ```
722 |
723 | The first transformation extracts the categories. We assume the name is unique to create the XML_IDs.
724 | ```
725 | partner_category_mapping = {
726 | 'id': mapper.m2m_id_list('res_partner_category', 'Category'),
727 | 'name': mapper.m2m_value_list('Category'),
728 | }
729 |
730 | processor.process(partner_category_mapping, 'res.partner.category.csv', {}, m2m=True)
731 | ```
732 | Notice we use two mapper functions to deal with Many2many relationships: `m2m_id_list` and `m2m_value_list`, and the option `m2m=True` in the `processor.process`command.
733 |
734 | This will create the file `res.partner.category.csv` with all the unique categories as follow:
735 | ```
736 | id;name
737 | res_partner_category.Premium;Premium
738 | res_partner_category.Normal;Normal
739 | res_partner_category.Bad Payer;Bad Payer
740 | ```
741 |
742 | Now we can complete the person mapping. It's exactly the same as before except we have added the field `category_id`.
743 |
744 | ```
745 | res_partner_mapping = {
746 | 'id': mapper.m2o_map('my_import_res_partner', mapper.concat('_', 'Firstname', 'Lastname', 'Birthdate')),
747 | 'name': mapper.concat(' ','Firstname','Lastname'),
748 | 'birthdate': mapper.val('Birthdate', postprocess=lambda x: datetime.strptime(x, "%d/%m/%y").strftime("%Y-%m-%d 00:00:00")),
749 | 'parent_id/id': mapper.m2o_map('my_import_res_partner', mapper.val('Company')),
750 | 'category_id/id': mapper.m2m('res_partner_category', 'Category'),
751 | }
752 |
753 | processor.process(res_partner_mapping, 'res.partner.csv', {})
754 | ```
755 | The important thing here is to use the same XML_ID `'res_partner_category' + Category'` for both
756 | `res_partner_mapping[category_id/id] = mapper.m2m` and `partner_category_mapping[id] = mapper.m2m_id_list`.
757 |
758 | The mapping `res_partner_mapping` will create a new file `res.partner.csv` like this:
759 | ```
760 | id;parent_id/id;name;birthdate;category_id/id
761 | my_import_res_partner.John_Doe_31/12/1980;my_import_res_partner.The World Company;John Doe;31-12-1980 00:00:00;res_partner_category.Premium
762 | my_import_res_partner.David_Smith_28/02/1985;my_import_res_partner.The Famous Company;David Smith;28-02-1985 00:00:00;res_partner_category.Normal,res_partner_category.Bad Payer
763 | ```
764 | Notice the column `category_id/id` that contains a comma separated list of XML_IDs of partner categories.
765 |
766 | Finally we create the load script by adding this line:
767 | ```
768 | processor.write_to_file("res_partner.sh", python_exe='', path='')
769 | ```
770 | It creates the script `res_partner.sh` looking like this:
771 | ```
772 | odoo_import_thread.py -c conf/connection.conf --file=res.partner.category.csv --model=res.partner.category --groupby= --ignore= --sep=";" --context="{}"
773 | odoo_import_thread.py -c conf/connection.conf --fail --file=res.partner.category.csv --model=res.partner.category --ignore= --sep=";" --context="{}"
774 |
775 | odoo_import_thread.py -c conf/connection.conf --file=res.partner.csv --model=res.partner --groupby= --ignore= --sep=";" --context="{}"
776 | odoo_import_thread.py -c conf/connection.conf --fail --file=res.partner.csv --model=res.partner --ignore= --sep=";" --context="{}"
777 | ```
778 |
779 | The complete python script:
780 | ```
781 | # -*- coding: utf-8 -*-
782 | from odoo_csv_tools.lib import mapper
783 | from odoo_csv_tools.lib.transform import Processor
784 | from datetime import datetime # used to change the format of datetime fields
785 |
786 | processor = Processor('client_file.csv', delimiter=';')
787 |
788 | partner_category_mapping = {
789 | 'id': mapper.m2m_id_list('res_partner_category', 'Category'),
790 | 'name': mapper.m2m_value_list('Category'),
791 | }
792 |
793 | processor.process(partner_category_mapping, 'res.partner.category.csv', {}, m2m=True)
794 |
795 | res_partner_mapping = {
796 | 'id': mapper.m2o_map('my_import_res_partner', mapper.concat('_', 'Firstname', 'Lastname', 'Birthdate')),
797 | 'name': mapper.concat(' ','Firstname','Lastname'),
798 | 'birthdate': mapper.val('Birthdate', postprocess=lambda x: datetime.strptime(x, "%d/%m/%y").strftime("%Y-%m-%d 00:00:00")),
799 | 'parent_id/id': mapper.m2o_map('my_import_res_partner', mapper.val('Company')),
800 | 'category_id/id': mapper.m2m('res_partner_category', 'Category'),
801 | }
802 |
803 | processor.process(res_partner_mapping, 'res.partner.csv', {})
804 |
805 | processor.write_to_file("res_partner.sh", python_exe='', path='')
806 | ```
807 |
808 | > **Note:** it's possible to import many2many relationships with another file structure. Refer to the context key [update_many2many](#import-related-keys) to learn how.
809 |
810 |
811 | #### Controlling the Load sequence
812 | The load order in the shell script depends on the order of the `processor.process` instructions in the transformation script.
813 |
814 | This example script:
815 | ```
816 | processor = Processor('client_file.csv', delimiter=';')
817 |
818 | res_partner_company_mapping = {
819 | }
820 |
821 | res_partner_mapping = {
822 | }
823 |
824 | processor.process(res_partner_company_mapping, 'res.partner.company.csv', {}, 'set')
825 | processor.process(res_partner_mapping, 'res.partner.csv', {})
826 | processor.write_to_file("res_partner.sh", python_exe='', path='')
827 | ```
828 | will create the load script `res_partner.sh` with:
829 |
830 | 1- the load of `res.partner.company.csv` because it's the first invoked transformation,
831 |
832 | 2- the load of `res.partner.csv`.
833 |
834 | If you want to import the persons first (which is a bad idea here), just inverse the two `processor.process` commands.
835 | ```
836 | processor = Processor('client_file.csv', delimiter=';')
837 |
838 | res_partner_company_mapping = {
839 | }
840 |
841 | res_partner_mapping = {
842 | }
843 |
844 | processor.process(res_partner_mapping, 'res.partner.csv', {})
845 | processor.process(res_partner_company_mapping, 'res.partner.company.csv', {}, 'set')
846 | processor.write_to_file("res_partner.sh", python_exe='', path='')
847 | ```
848 |
849 | Now if you want to create two separated shell scripts, one for companies and another for the persons, you need to create a new `Processor`.
850 | ```
851 | #For the 1st load script
852 | processor = Processor('client_file.csv', delimiter=';')
853 | res_partner_company_mapping = {
854 | }
855 | processor.process(res_partner_company_mapping, 'res.partner.company.csv', {}, 'set')
856 | processor.write_to_file("res_partner_company.sh", python_exe='', path='')
857 |
858 | #For the 2nd load script
859 | processor = Processor('client_file.csv', delimiter=';')
860 | res_partner_mapping = {
861 | }
862 | processor.process(res_partner_mapping, 'res.partner.csv', {})
863 | processor.write_to_file("res_partner.sh", python_exe='', path='')
864 | ```
865 | This will create the script `res_partner_company.sh` that loads only `res.partner.company.csv`, and the script `res_partner.sh` that loads only `res.partner.csv`. It can be useful if you need to do something between importing the companies and the persons (ie. to call another load or RPC calls). The drawback is that the client file is read twice.
866 |
867 | ### Mapper Functions
868 | You can get the value of columns in the client file with several methods defined in the `mapper` object. Take a look at `lib/mapper.py` to get an up to date list of methods. Here are the most commonly used.
869 |
870 | #### mapper.const(value)
871 | Use it to assign always the same value to a field.
872 |
873 |
874 | Client File | Mapper | Import File |
875 |
876 |
877 | my_column
878 | my_value1
879 | my_value2
880 | |
881 |
882 | {
883 | 'my_field': mapper.const('forced_value'),
884 | }
885 | |
886 |
887 | my_field
888 | forced_value
889 | forced_value
890 | |
891 |
892 |
893 |
894 | Example: setting a company missing in the client file:
895 | ```
896 | fields_mapping = {
897 | ...
898 | 'company_id/id': mapper.const('base.main_company'),
899 | ...
900 | }
901 | ```
902 |
903 | #### mapper.val(field, default='', postprocess=lambda x: x, skip=False)
904 |
905 | Takes the value of the column. Use it for a `Char` or `Text` field.
906 |
907 |
908 | Client File | Mapper | Import File |
909 |
910 |
911 | my_column
912 | my_value1
913 | my_value2
914 | |
915 |
916 | {
917 | 'my_field': mapper.val('my_column'),
918 | }
919 | |
920 |
921 | my_field
922 | my_value1
923 | my_value2
924 | |
925 |
926 |
927 |
928 | #### mapper.map_val(field, mapping, default='')
929 | Takes the value from a dictionary where the key is the value of the column.
930 |
931 |
932 | Client File | Mapper | Import File |
933 |
934 |
935 | my_column
936 | key1
937 | key2
938 | |
939 |
940 | mapping = {
941 | 'key1': 'value1',
942 | 'key2': 'value2'
943 | }
944 | {
945 | 'my_field': mapper.map_val('my_column', mapping),
946 | }
947 | |
948 |
949 | my_field
950 | value1
951 | value2
952 | |
953 |
954 |
955 |
956 | Example: setting a country.
957 |
958 | ```
959 | Country_column;
960 | BE;
961 | FR;
962 | ```
963 |
964 | ```
965 | country_map = {
966 | 'BE': 'base.be',
967 | 'FR': 'base.fr',
968 | }
969 |
970 | fields_mapping = {
971 | ...
972 | 'country_id/id': mapper.map_val('Country_column', country_map),
973 | ...
974 | }
975 | ```
976 |
977 | #### mapper.num(field, default='0.0')
978 |
979 | Takes the numeric value of the column. Transform the comma decimal separator by a dot. Use it for `Integer` or `Float` fields.
980 |
981 |
982 | Client File | Mapper | Import File |
983 |
984 |
985 | my_column
986 | 01;
987 | 2,3;
988 | |
989 |
990 | {
991 | 'my_field': mapper.num('my_column'),
992 | }
993 | |
994 |
995 | my_field
996 | 01
997 | 2.3
998 | |
999 |
1000 |
1001 |
1002 | #### mapper.bool_val(field, true_vals=[], false_vals=[])
1003 |
1004 | A boolean field in Odoo is always imported as 1 or 0. `true_vals` and `false_vals` are used to map the original values to 1 and 0. If the value in the client file is not in `true_vals` or `false_vals`, it is considered as TRUE if a value is present or FALSE if the column is empty.
1005 |
1006 |
1007 | Client File | Mapper | Import File |
1008 |
1009 |
1010 | my_column
1011 | Yes
1012 | No
1013 |
1014 | something else
1015 | |
1016 |
1017 | {
1018 | 'my_field': mapper.bool_val('my_column', ['Yes'], ['No']),
1019 | }
1020 | |
1021 |
1022 | my_field
1023 | 1
1024 | 0
1025 | 0
1026 | 1
1027 | |
1028 |
1029 |
1030 |
1031 |
1032 | #### mapper.binary(field, path_prefix, skip=False, encoding="utf-8")
1033 | Use it to convert a binary file in base64 and to put the result in a binary field. Typically used to import images and attachments.
1034 |
1035 | Assuming images `pict_1.png` and `pict2.png` are located in `/home/Pictures`:
1036 |
1037 |
1038 | Client File | Mapper | Import File |
1039 |
1040 |
1041 | my_column
1042 | pict_1.png
1043 | pict_2.png
1044 | |
1045 |
1046 | {
1047 | 'my_field': mapper.binary('my_column', '/home/Pictures/'),
1048 | }
1049 | |
1050 |
1051 | my_field
1052 | kllkxqlxsqnxqxhHJVJSFSVSJDYVDV......
1053 | KKjdsndb77573çinjhffxxcdkllkxq......
1054 | |
1055 |
1056 |
1057 |
1058 | #### mapper.concat(separator, *fields)
1059 |
1060 | Concatenate the value of one or several columns separated with `separator`.
1061 |
1062 |
1063 | Client File | Mapper | Import File |
1064 |
1065 |
1066 | my_column1;my_column2
1067 | val1;val2
1068 | val3,val4
1069 | |
1070 |
1071 | {
1072 | 'my_field': mapper.concat('_','my_column1','my_column2'),
1073 | }
1074 | |
1075 |
1076 | my_field
1077 | val1_val2;
1078 | val3_val4;
1079 | |
1080 |
1081 |
1082 |
1083 |
1084 | #### mapper.m2o(PREFIX, field, default='', skip=False)
1085 |
1086 | Use it to create an XML_ID where the module is `PREFIX` and the name is the value of the column.
1087 |
1088 |
1089 | Client File | Mapper | Import File |
1090 |
1091 |
1092 | my_column
1093 | my_value1
1094 | my_value2
1095 | |
1096 |
1097 | {
1098 | 'my_field/id': mapper.m2o('my_import_my_model','my_column'),
1099 | }
1100 | |
1101 |
1102 | my_field/id
1103 | my_import_my_model.my_value1
1104 | my_import_my_model.my_value2
1105 | |
1106 |
1107 |
1108 |
1109 | > Notice the field name suffixed with /id in the mapping dictionary.
1110 |
1111 | #### mapper.m2o_map(PREFIX, mapper, default='', skip=False)
1112 |
1113 | Use it to create an XML_ID where the module is `PREFIX` and the name is the result of `mapper`. It is often used with the mapper `mapper.concat` to create XML_IDs as the concatenation of several columns.
1114 |
1115 |
1116 | Client File | Mapper | Import File |
1117 |
1118 |
1119 | my_column1;my_column2
1120 | val1,val2
1121 | val3,val4
1122 | |
1123 |
1124 | {
1125 | 'my_field/id': mapper.m2o_map('my_import_my_model',mapper.concat('_','my_column1','my_column2')),
1126 | }
1127 | |
1128 |
1129 | my_field/id
1130 | my_import_my_model.val1_val2;
1131 | my_import_my_model.val3_val4;
1132 | |
1133 |
1134 |
1135 |
1136 | > Notice the field name suffixed with /id in the mapping dictionary.
1137 |
1138 | #### mapper.m2m(PREFIX, *fields)
1139 | Returns a comma separated list of one or several columns, each value being prefixed by `PREFIX`. Use it to build XML_IDs list for a `Many2many` field.
1140 |
1141 | Client File | Mapper | Import File |
1142 |
1143 |
1144 | my_column1;my_column2
1145 | val1,val2;val3
1146 | |
1147 |
1148 | {
1149 | 'my_field/id': mapper.m2m('my_import_my_model','my_column1', 'my_column2')),
1150 | }
1151 | |
1152 |
1153 | my_field/id;
1154 | my_import_my_model.val1,my_import_my_model.val2,my_import_my_model.val3
1155 | |
1156 |
1157 |
1158 |
1159 | > Notice the field name suffixed with /id in the mapping dictionary.
1160 |
1161 | > Notice val1, val2 of my_column1 are handled the same way as val3 in my_column2.
1162 |
1163 |
1164 |
1165 | #### mapper.m2m_id_list(PREFIX, *args, **kwargs)
1166 | Build one record (line) per distinct value of a comma separated list of data inside a column, and prefix the value with `PREFIX`.
1167 |
1168 |
1169 | Client File | Mapper | Import File (1) |
1170 |
1171 |
1172 | my_column1
1173 | val1,val2
1174 | val1,val3
1175 | val4
1176 | |
1177 |
1178 | {
1179 | 'my_field/id': mapper.m2m_id_list('my_import_my_model','my_column1')),
1180 | }
1181 | |
1182 |
1183 | my_field/id
1184 | my_import_my_model.val1
1185 | my_import_my_model.val2
1186 | my_import_my_model.val3
1187 | my_import_my_model.val4
1188 | |
1189 |
1190 |
1191 |
1192 | (1) **To use in conjunction with the option `m2m` while invoking the transformation** (see [Many2many Relationships](#many2many-relationships)).
1193 |
1194 | ```
1195 | processor.process(mapping, 'output.file.csv', {}, m2m=True)
1196 | ```
1197 | Without this option, the import file would look like this:
1198 | ```
1199 | my_field/id
1200 | [my_import_my_model.val1, my_import_my_model.val2]
1201 | [my_import_my_model.val1, my_import_my_model.val3]
1202 | [my_import_my_model.val4]
1203 | ```
1204 |
1205 | #### mapper.m2m_value_list(*args, **kwargs)
1206 | Build one record (line) per distinct value of a comma separated list of data inside a column.
1207 |
1208 | Client File | Mapper | Import File (1) |
1209 |
1210 |
1211 | my_column1
1212 | val1,val2
1213 | val1,val3
1214 | val4
1215 | |
1216 |
1217 | {
1218 | 'my_field': mapper.m2m_value_list('my_column1')),
1219 | }
1220 | |
1221 |
1222 | my_field
1223 | val1
1224 | val2
1225 | val3
1226 | val4
1227 | |
1228 |
1229 |
1230 |
1231 | (1) **To use in conjunction with the option `m2m` while invoking the transformation** (see [Many2many Relationships](#many2many-relationships)).
1232 | ```
1233 | processor.process(mapping, 'output.file.csv', {}, m2m=True)
1234 | ```
1235 | Without this option, the import file would look like this:
1236 | ```
1237 | my_field/id
1238 | [val1, val2]
1239 | [val1, val3]
1240 | [val4]
1241 | ```
1242 |
1243 | ### Advanced Transformations
1244 |
1245 | #### User Defined Mappers
1246 | Sometimes, the builtin mappers do not meet your needs, even with a `postprocess` function. In this case, you can map a field to a tailor made function.
1247 | This function takes an argument representing an entire line of the client file as a dictionary where the columns are the keys.
1248 |
1249 | ```
1250 | def my_field_mapper(line):
1251 | if line['column1'] == 'a_value':
1252 | return 'something'
1253 | return 'something_else'
1254 |
1255 | fields_mapping = {
1256 | ...
1257 | 'my_field': my_field_mapper,
1258 | ...
1259 | }
1260 | ```
1261 |
1262 | #### Managing the Client CSV file
1263 | Sometimes it's useful to change the client file according to some needs (ie. removing useless lines, adding columns with data, ...). You can do that with a preprocessor function when building the `Processor`. See this example that does... nothing.
1264 |
1265 | ```
1266 | def myPreprocessor(header, data):
1267 | return header, data
1268 |
1269 | processor = Processor('client_file.csv', delimiter=';', preprocess=myPreprocessor)
1270 | ```
1271 |
1272 | The `preprocessor` function takes two arguments: `header` is a list of all the columns, and `data` is a list of dictionaries, each dictionary being a line. Let's see two learning examples of preprocessing.
1273 |
1274 | ##### Adding a column
1275 | Here we add the column `NEW_COLUMN` filled with the value `NEW_VALUE` for all the records.
1276 | ```
1277 | def myPreprocessor(header, data):
1278 | header.append('NEW_COLUMN')
1279 | for i, j in enumerate(data):
1280 | data[i].append(NEW_VALUE)
1281 | return header, data
1282 | ```
1283 | ##### Removing Lines
1284 | Say we want to remove all the lines having the column `Firstname` with the value `John`.
1285 | ```
1286 | def myPreprocessor(header, data):
1287 | data_new = []
1288 | for i, j in enumerate(data):
1289 | line = dict(zip(header, j))
1290 | if line['Firstname'] != 'John':
1291 | data_new.append(j)
1292 | return header, data_new
1293 | ```
1294 |
1295 | > **Note:** The client file is not physically changed. Only the buffer used by the Processor is changed in memory. Anyway the new columns are usable in the fields mapping dictionary, and the removed lines are not processed.
1296 |
1297 | #### Updating Records With Database IDs
1298 | It is possible to **update** records knowing their database ID instead of their XML_ID. The field `.id` designates a database ID. But also, the script `odoo_import_thread.py` requires an `id` field. So the trick is to build an empty `id` field and to map the `.id`from the client file.
1299 | ```
1300 | my_mapping = {
1301 | 'id': mapper.const(''),
1302 | '.id': mapper.val('id_column'),
1303 | ...
1304 | }
1305 | ```
1306 | ## A Real Life Example
1307 | A complete import project (transformation and load) is available in the repo [odoo_import_example](https://github.com/tfrancoi/odoo_import_example). It demonstrates use cases such as:
1308 | - importing partners with multiple categories
1309 | - importing products and variants with their suppliers
1310 | - importing messages
1311 |
1312 | > **Note:** The project was done in Odoo 11. Some models may differ in other versions.
1313 |
1314 | ## Performances Considerations
1315 |
1316 | ### Importing Related or Computed Fields
1317 |
1318 | Importing related fields (non readonly) causes cascade updates that drastically increase the import run time. Suppose the following example.
1319 |
1320 | 
1321 |
1322 | Importing my_partner_2000 will trigger an update of my_partner_1 because the parent_id is a related field non readonly. And updating my_parent_1 will also trigger the update of all its childs. When importing the next record, my_partner_2001, the same scenario happens. My_partner_1 will be updated a second time and this will trigger the update of all its childs again. But this time the number of childs is increased by my_partner_2000. So each time a new partner is created, the number of updates behind the scene increases.
1323 |
1324 | You can figure out this scenario by looking at the transaction time that increases exponantially.
1325 |
1326 | The solution is to use the parameter `--ignore`. In this case, you should import with the option `--ignore=parent_id/id`.
1327 |
1328 | ## Troubleshooting
1329 |
1330 | ### When the number of records does not match
1331 | Suppose your Odoo CSV file contain 100 records but after the load, you count less than 100 records more.
1332 |
1333 | Possible cause:
1334 | - One or more records have the same XML_ID. So the first record with that XML_ID was inserted, while the other records have updated the first one instead of creating new ones.
1335 |
1336 | To check the unicity of _what you think is_ a record identifier in the client file:
1337 | - create a new CSV file with one column containing the supposed identifier,
1338 | - check the unicity of the values with the command, for example, in bash:
1339 | ```
1340 | sort my_xml_ids.csv | uniq -c | sort -nr
1341 | ```
1342 | - look for an output line begining with a number > 1.
1343 |
1344 | ## Tips and Tricks
1345 |
1346 | ### Importing Data of Multiple Companies
1347 | When you run an import, the current company is the company of the user defined by the parameters `login` and `uid`in the [configuration file](#config-CONFIG).
1348 | As a rule of thumb, it's advised to separate the imports by company.
1349 |
1350 | Assuming you have to import data for Company_A and Company_B:
1351 | - build import files with data of Company_A
1352 | - build import files with data of Company_B
1353 | - set the import user as a member of Company_A
1354 | - import the files with the data of Company_A
1355 | - change the import user company to Company_B
1356 | - import the files with the data of Company_B
1357 |
1358 | ### Importing Translations
1359 |
1360 | Of course, the translations can be imported with data files tailored for the translation models. But it's a lot easier with the `lang` key set in the context. Let's take an example with the products.
1361 |
1362 | First, import the sources terms. This can be done while importing legacy data. Say we have the following CSV file `product_template.csv`
1363 |
1364 | ```
1365 | id;name;price
1366 | my_module.product_wallet;Wallet;10.0
1367 | my_module.product_bicyle;Bicycle;400.0
1368 | ```
1369 |
1370 | Import the products with the following command:
1371 | ```
1372 | odoo_import_thread.py -c connection.conf --file=product_template.csv
1373 | ```
1374 | _The option `--model=product.template` is not necessary since the CSV file is correctly named according to the model._
1375 |
1376 | Then, build another CSV for the translations, say `product_template_FR.csv` for the french translations. This file contain only the translated terms for the products.
1377 |
1378 | ```
1379 | id;name
1380 | my_module.product_wallet;Portefeuille
1381 | my_module.product_bicyle;Bicyclette
1382 | ```
1383 |
1384 | Import this file by setting the language in the context.
1385 | ```
1386 | odoo_import_thread.py -c connection.conf --file=product_template_FR.csv --model=product.template --context="{'lang': fr_FR}"
1387 | ```
1388 | And it's done.
1389 |
1390 | Actually, it doesn't import the translations explicitly. What happened is an update of the products name in the fr_FR language, which is equivalent and a more convenient process to build the translations file. First because it is based on the legacy file and overall because it lets the ORM manage the translations process.
1391 |
1392 | ### Importing Account Move Lines
1393 |
1394 | This is an interesting use case of one2many relationship. Let's take a look at the simplified relationship model of `account.move` and `account.move.line`.
1395 |
1396 | 
1397 |
1398 | As a rule of thumb, avoid to import one2many relationships because the inverse many2one relation always exists. So, one strategy could be to import first all account.move in one CSV file, then all the account.move.line in another CSV file. But here, this strategy doesn't work because there is a balance check on the account.move. *One account.move must have at least two account.move.line where the credit amount of the one balances the debit amount of the other.*
1399 |
1400 | That means, the import of the first account.move.line will fail because there its amount is not balanced yet. And the import of the second account.move.line will also fail because the first one is missing, and so on.
1401 |
1402 | One possible solution is to use a context with `'check_move_validity': False`. In that case you can import first all the account.move, then all the account.move.line. As there is no more balance check, you must be sure that all the account.move are well balanced.
1403 |
1404 | Another solution is to import the account.move and the account.move.line together. To do this, you can build a mixed CSV file like this, say `account_move.csv`.
1405 |
1406 | 
1407 |
1408 | Then import it with the `--o2m` option.
1409 | ```
1410 | odoo_import_thread.py -c connection.conf --file=account_move.csv --model=account.move --o2m
1411 | ```
1412 |
1413 | The option `--o2m` prevents the batch to be cut while importing the same account.move. The account.move is imported with its account.move.line in the same time. So the balance check can be performed at the end of the transaction.
1414 |
1415 | The drawback of this method is that you cannot set an XML_ID to an account.move.line. See [--o2m](#o2m) for more details.
1416 | # Exporting Data
1417 |
1418 | The Odoo CSV Import Export library provides the script `odoo_export_thread.py` to export data from Odoo. This script has several options. Type the command `odoo_export_thread.py --help` to get the usage.
1419 |
1420 | ```
1421 | usage: odoo_export_thread.py [-h] -c CONFIG --file FILENAME --model MODEL
1422 | --field FIELDS [--domain DOMAIN]
1423 | [--worker WORKER] [--size BATCH_SIZE]
1424 | [-s SEPARATOR] [--context CONTEXT]
1425 |
1426 | Import data in batch and in parallel
1427 |
1428 | optional arguments:
1429 | -h, --help show this help message and exit
1430 | -c CONFIG, --config CONFIG
1431 | Configuration File that contains connection parameters
1432 | --file FILENAME Output File
1433 | --model MODEL Model to Export
1434 | --field FIELDS Fields to Export
1435 | --domain DOMAIN Filter
1436 | --worker WORKER Number of simultaneous connection
1437 | --size BATCH_SIZE Number of line to import per connection
1438 | -s SEPARATOR, --sep SEPARATOR
1439 | CSV separator
1440 | --context CONTEXT context that will be passed to the load function, need
1441 | to be a valid python dict
1442 | ```
1443 | (To be continued...)
1444 |
1445 | # Requirements
1446 | - [odoo-client-lib](https://github.com/odoo/odoo-client-lib)
1447 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | Odoo CSV Import Export Library
2 | ==============================
3 | This library provides tools to easily and quickly import data into Odoo or export data from Odoo using CSV file.
4 | It also provide a framework to manipulate date from csv.
5 |
6 | Requirements
7 | --------------
8 | * openerp-client-lib
9 |
--------------------------------------------------------------------------------
/odoo_convert_path_to_image.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #-*- coding: utf-8 -*-
3 | '''
4 | Copyright (C) Thibault Francois
5 |
6 | This program is free software: you can redistribute it and/or modify
7 | it under the terms of the GNU Lesser General Public License as
8 | published by the Free Software Foundation, version 3.
9 |
10 | This program is distributed in the hope that it will be useful, but
11 | WITHOUT ANY WARRANTY; without even the implied warranty of
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 | Lesser General Lesser Public License for more details.
14 |
15 | You should have received a copy of the GNU Lesser General Public License
16 | along with this program. If not, see .
17 | '''
18 |
19 | import argparse
20 | import os
21 | from odoo_csv_tools.lib import mapper
22 | from odoo_csv_tools.lib.transform import Processor
23 |
24 | if __name__ == '__main__':
25 | parser = argparse.ArgumentParser(description='Convert csv column Image Path into base64')
26 | parser.add_argument('file', metavar='F', help='file to convert')
27 | parser.add_argument('--path', dest='path', help='Image Path Prefix, default is the working directory')
28 | parser.add_argument('--out', dest='out', help='name of the result file, default out.csv', default="out.csv")
29 | parser.add_argument('-f', dest='fields', help='Fields to convert from path to base64, comma separated', required = True)
30 | args = parser.parse_args()
31 |
32 | file_csv = args.file
33 | out_csv = args.out
34 | path = args.path
35 | fields = args.fields
36 | if not path:
37 | path = os.getcwd()
38 | if not path.endswith(os.sep):
39 | path += os.sep
40 |
41 |
42 | processor = Processor(file_csv)
43 | mapping = processor.get_o2o_mapping()
44 | for f in fields.split(','):
45 | f = f.strip()
46 | mapping[f] = mapper.binary_map(mapper.remove_sep_mapper(f), path)
47 | processor.process(mapping, out_csv, {}, 'list')
48 | processor.write_to_file("")
49 |
50 |
--------------------------------------------------------------------------------
/odoo_convert_url_to_image.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #-*- coding: utf-8 -*-
3 | '''
4 | Copyright (C) Thibault Francois
5 |
6 | This program is free software: you can redistribute it and/or modify
7 | it under the terms of the GNU Lesser General Public License as
8 | published by the Free Software Foundation, version 3.
9 |
10 | This program is distributed in the hope that it will be useful, but
11 | WITHOUT ANY WARRANTY; without even the implied warranty of
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 | Lesser General Lesser Public License for more details.
14 |
15 | You should have received a copy of the GNU Lesser General Public License
16 | along with this program. If not, see .
17 | '''
18 |
19 | import argparse
20 | import os
21 | from odoo_csv_tools.lib import mapper
22 | from odoo_csv_tools.lib.transform import Processor
23 |
24 | if __name__ == '__main__':
25 | parser = argparse.ArgumentParser(description='Convert csv column Image URL into base64')
26 | parser.add_argument('file', metavar='F', help='file to convert')
27 | parser.add_argument('--out', dest='out', help='name of the result file, default out.csv', default="out.csv")
28 | parser.add_argument('-f', dest='fields', help='Fields to convert from path to base64, comma separated', required = True)
29 | args = parser.parse_args()
30 |
31 | file_csv = args.file
32 | out_csv = args.out
33 | fields = args.fields
34 |
35 | processor = Processor(file_csv)
36 | mapping = processor.get_o2o_mapping()
37 | for f in fields.split(','):
38 | f = f.strip()
39 | mapping[f] = mapper.binary_url(f, verbose=True)
40 | processor.process(mapping, out_csv, {}, 'list')
41 | processor.write_to_file("")
42 |
43 |
--------------------------------------------------------------------------------
/odoo_csv_tools/__init__.py:
--------------------------------------------------------------------------------
1 | from . import lib
2 | from . import export_threaded
3 | from . import import_threaded
4 |
--------------------------------------------------------------------------------
/odoo_csv_tools/export_threaded.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | '''
3 | Copyright (C) Thibault Francois
4 |
5 | This program is free software: you can redistribute it and/or modify
6 | it under the terms of the GNU Lesser General Public License as
7 | published by the Free Software Foundation, version 3.
8 |
9 | This program is distributed in the hope that it will be useful, but
10 | WITHOUT ANY WARRANTY; without even the implied warranty of
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 | Lesser General Lesser Public License for more details.
13 |
14 | You should have received a copy of the GNU Lesser General Public License
15 | along with this program. If not, see .
16 | '''
17 | import sys
18 | import csv
19 |
20 | from time import time
21 |
22 | from .lib import conf_lib
23 | from .lib.conf_lib import log_error, log_info
24 | from .lib.internal.rpc_thread import RpcThread
25 | from .lib.internal.csv_reader import UnicodeWriter
26 | from .lib.internal.io import ListWriter, open_write
27 | from .lib.internal.tools import batch
28 |
29 |
30 | if sys.version_info >= (3, 0, 0):
31 | from xmlrpc.client import Fault
32 | else:
33 | from xmlrpclib import Fault
34 |
35 | csv.field_size_limit(2**31-1)
36 |
37 | class RPCThreadExport(RpcThread):
38 |
39 | def __init__(self, max_connection, model, header, writer, batch_size=20, context=None):
40 | super(RPCThreadExport, self).__init__(max_connection)
41 | self.model = model
42 | self.header = header
43 | self.batch_size = batch_size
44 | self.writer = writer
45 | self.context = context
46 | self.result = {}
47 |
48 | def launch_batch(self, data_ids, batch_number):
49 | def launch_batch_fun(data_ids, batch_number, check=False):
50 | st = time()
51 | try:
52 | self.result[batch_number] = self.model.export_data(data_ids, self.header, context=self.context)['datas']
53 | except Fault as e:
54 | log_error("export %s failed" % batch_number)
55 | log_error(e.faultString)
56 | except Exception as e:
57 | log_info("Unknown Problem")
58 | exc_type, exc_value, _ = sys.exc_info()
59 | # traceback.print_tb(exc_traceback, file=sys.stdout)
60 | log_error(exc_type)
61 | log_error(exc_value)
62 | log_info("time for batch %s: %s" % (batch_number, time() - st))
63 |
64 | self.spawn_thread(launch_batch_fun, [data_ids, batch_number], {})
65 |
66 | def write_file(self, file_writer):
67 | file_writer.writerow(self.header)
68 | for key in self.result:
69 | file_writer.writerows(self.result[key])
70 |
71 |
72 | def export_data(config_file, model, domain, header, context=None, output=None, max_connection=1, batch_size=100,
73 | separator=';', encoding='utf-8'):
74 | object_registry = conf_lib.get_server_connection(config_file).get_model(model)
75 |
76 | if output:
77 | file_result = open_write(output, encoding=encoding)
78 | writer = UnicodeWriter(file_result, delimiter=separator, encoding=encoding, quoting=csv.QUOTE_ALL)
79 | else:
80 | writer = ListWriter()
81 |
82 | rpc_thread = RPCThreadExport(int(max_connection), object_registry, header, writer, batch_size, context)
83 | st = time()
84 |
85 | ids = object_registry.search(domain, context=context)
86 | i = 0
87 | for b in batch(ids, batch_size):
88 | batch_ids = [l for l in b]
89 | rpc_thread.launch_batch(batch_ids, i)
90 | i += 1
91 |
92 | rpc_thread.wait()
93 | log_info("%s %s exported, total time %s second(s)" % (len(ids), model, (time() - st)))
94 | log_info("Writing file")
95 | rpc_thread.write_file(writer)
96 | if output:
97 | file_result.close()
98 | return False, False
99 | else:
100 | return writer.header, writer.data
101 |
--------------------------------------------------------------------------------
/odoo_csv_tools/import_threaded.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | '''
3 | Copyright (C) Thibault Francois
4 |
5 | This program is free software: you can redistribute it and/or modify
6 | it under the terms of the GNU Lesser General Public License as
7 | published by the Free Software Foundation, version 3.
8 |
9 | This program is distributed in the hope that it will be useful, but
10 | WITHOUT ANY WARRANTY; without even the implied warranty of
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 | Lesser General Lesser Public License for more details.
13 |
14 | You should have received a copy of the GNU Lesser General Public License
15 | along with this program. If not, see .
16 | '''
17 |
18 | import sys
19 | import csv
20 |
21 | from time import time
22 |
23 | from .lib import conf_lib
24 | from .lib.conf_lib import log_error, log_info, log
25 | from .lib.internal.rpc_thread import RpcThread
26 | from .lib.internal.io import ListWriter, open_read, open_write
27 | from .lib.internal.csv_reader import UnicodeReader, UnicodeWriter
28 | from .lib.internal.tools import batch
29 |
30 | if sys.version_info >= (3, 0, 0):
31 | from xmlrpc.client import Fault
32 | else:
33 | from xmlrpclib import Fault
34 | from builtins import range
35 |
36 | csv.field_size_limit(2**31-1)
37 |
38 |
39 | class RPCThreadImport(RpcThread):
40 |
41 | def __init__(self, max_connection, model, header, writer, batch_size=20, context=None):
42 | super(RPCThreadImport, self).__init__(max_connection)
43 | self.model = model
44 | self.header = header
45 | self.batch_size = batch_size
46 | self.writer = writer
47 | self.context = context
48 |
49 | def launch_batch(self, data_lines, batch_number, check=False, o2m=False):
50 | def launch_batch_fun(lines, batch_number, check=False):
51 | i = 0
52 | batch_size = len(lines) if o2m else self.batch_size
53 | for lines_batch in batch(lines, batch_size):
54 | lines_batch = [l for l in lines_batch]
55 | self.sub_batch_run(lines_batch, batch_number, i, len(lines), check=check)
56 | i += 1
57 |
58 | self.spawn_thread(launch_batch_fun, [data_lines, batch_number], {'check': check})
59 |
60 | def sub_batch_run(self, lines, batch_number, sub_batch_number, total_line_nb, check=False):
61 | success = False
62 |
63 | st = time()
64 | try:
65 | success = self._send_rpc(lines, batch_number, sub_batch_number, check=check)
66 | except Fault as e:
67 | log_error("Line %s %s failed" % (batch_number, sub_batch_number))
68 | log_error(e.faultString)
69 | except ValueError as e:
70 | log_error("Line %s %s failed value error" % (batch_number, sub_batch_number))
71 | except Exception as e:
72 | log_info("Unknown Problem")
73 | exc_type, exc_value, _ = sys.exc_info()
74 | # traceback.print_tb(exc_traceback, file=sys.stdout)
75 | log_error(exc_type)
76 | log_error(exc_value)
77 |
78 | if not success:
79 | self.writer.writerows(lines)
80 |
81 | log_info("time for batch %s - %s of %s : %s" % (
82 | batch_number, (sub_batch_number + 1) * self.batch_size, total_line_nb, time() - st))
83 |
84 | def _send_rpc(self, lines, batch_number, sub_batch_number, check=False):
85 | res = self.model.load(self.header, lines, context=self.context)
86 | if res['messages']:
87 | for msg in res['messages']:
88 | log_error('batch %s, %s' % (batch_number, sub_batch_number))
89 | log_error(msg)
90 | log_error(lines[msg['record']])
91 | return False
92 | if len(res['ids']) != len(lines) and check:
93 | log_error("number of record import is different from the record to import, probably duplicate xml_id")
94 | return False
95 |
96 | return True
97 |
98 |
99 | def filter_line_ignore(ignore, header, line):
100 | new_line = []
101 | for k, val in zip(header, line):
102 | if k not in ignore:
103 | new_line.append(val)
104 | return new_line
105 |
106 |
107 | def filter_header_ignore(ignore, header):
108 | new_header = []
109 | for val in header:
110 | if val not in ignore:
111 | new_header.append(val)
112 | return new_header
113 |
114 |
115 | def read_file(file_to_read, delimiter=';', encoding='utf-8', skip=0):
116 | def get_real_header(header):
117 | """ Get real header cut at the first empty column """
118 | new_header = []
119 | for head in header:
120 | if head:
121 | new_header.append(head)
122 | else:
123 | break
124 | return new_header
125 |
126 | def check_id_column(header):
127 | try:
128 | header.index('id')
129 | except ValueError as ve:
130 | log_error("No External Id (id) column defined, please add one")
131 | raise ve
132 |
133 | def skip_line(reader):
134 | log_info("Skipping until line %s excluded" % skip)
135 | for _ in range(1, skip):
136 | reader.next()
137 |
138 | log('open %s' % file_to_read)
139 | file_ref = open_read(file_to_read, encoding=encoding)
140 | reader = UnicodeReader(file_ref, delimiter=delimiter, encoding=encoding)
141 | header = next(reader)
142 | header = get_real_header(header)
143 | check_id_column(header)
144 | skip_line(reader)
145 | data = [l for l in reader]
146 | return header, data
147 |
148 |
149 | """
150 | Splitting helper method
151 | """
152 |
153 |
154 | def split_sort(split, header, data):
155 | split_index = 0
156 | if split:
157 | try:
158 | split_index = header.index(split)
159 | except ValueError as ve:
160 | log("column %s not defined" % split)
161 | raise ve
162 | data = sorted(data, key=lambda d: d[split_index])
163 | return data, split_index
164 |
165 |
166 | def do_not_split(split, previous_split_value, split_index, line, o2m=False, id_index=0):
167 | # Do not split if you want to keep the one2many line with it's parent
168 | # The column id should be empty
169 | if o2m and not line[id_index]:
170 | return True
171 |
172 | if not split: # If no split no need to continue
173 | return False
174 |
175 | split_value = line[split_index]
176 | if split_value != previous_split_value: # Different Value no need to not split
177 | return False
178 |
179 | return True
180 |
181 |
182 | def import_data(config_file, model, header=None, data=None, file_csv=None, context=None, fail_file=False,
183 | encoding='utf-8', separator=";", ignore=False, split=False, check=True, max_connection=1,
184 | batch_size=10, skip=0, o2m=False):
185 | """
186 | header and data mandatory in file_csv is not provided
187 |
188 | """
189 | ignore = ignore or []
190 | context = context or {}
191 |
192 | if file_csv:
193 | header, data = read_file(file_csv, delimiter=separator, encoding=encoding, skip=skip)
194 | fail_file = fail_file or file_csv + ".fail"
195 | file_result = open_write(fail_file, encoding=encoding)
196 |
197 | if not header or data == None:
198 | raise ValueError("Please provide either a data file or a header and data")
199 |
200 | object_registry = conf_lib.get_server_connection(config_file).get_model(model)
201 |
202 | if file_csv:
203 | writer = UnicodeWriter(file_result, delimiter=separator, encoding=encoding, quoting=csv.QUOTE_ALL)
204 | else:
205 | writer = ListWriter()
206 |
207 | writer.writerow(filter_header_ignore(ignore, header))
208 | if file_csv:
209 | file_result.flush()
210 | rpc_thread = RPCThreadImport(int(max_connection), object_registry, filter_header_ignore(ignore, header), writer,
211 | batch_size, context)
212 | st = time()
213 |
214 | id_index = header.index('id')
215 | data, split_index = split_sort(split, header, data)
216 |
217 | i = 0
218 | previous_split_value = False
219 | while i < len(data):
220 | lines = []
221 | j = 0
222 | while i < len(data) and (
223 | j < batch_size or do_not_split(split, previous_split_value, split_index, data[i], o2m=o2m,
224 | id_index=id_index)):
225 | line = data[i][:len(header)]
226 | lines.append(filter_line_ignore(ignore, header, line))
227 | previous_split_value = line[split_index]
228 | j += 1
229 | i += 1
230 | batch_number = split and "[%s] - [%s]" % (
231 | rpc_thread.thread_number(), previous_split_value) or "[%s]" % rpc_thread.thread_number()
232 | rpc_thread.launch_batch(lines, batch_number, check, o2m=o2m)
233 |
234 | rpc_thread.wait()
235 | if file_csv:
236 | file_result.close()
237 |
238 | log_info("%s %s imported, total time %s second(s)" % (len(data), model, (time() - st)))
239 | if file_csv:
240 | return False, False
241 | else:
242 | return writer.header, writer.data
243 |
--------------------------------------------------------------------------------
/odoo_csv_tools/lib/__init__.py:
--------------------------------------------------------------------------------
1 | from . import internal
2 | from . import conf_lib
3 | from . import workflow
4 | from . import checker
5 | from . import mapper
6 | from . import transform
7 |
--------------------------------------------------------------------------------
/odoo_csv_tools/lib/checker.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | '''
3 | Created on 29 feb. 2016
4 |
5 | @author: Thibault Francois
6 | '''
7 | #TODO
8 | import re
9 |
10 | def id_validity_checker(id_field, pattern, null_values=['NULL']):
11 | def check_id_validity(header, data):
12 | regular = re.compile(pattern)
13 | res = True
14 | for i, line in enumerate(data):
15 | line = [s.strip() if s.strip() not in null_values else '' for s in line]
16 | line_dict = dict(zip(header, line))
17 | if not regular.match(line_dict[id_field]):
18 | print("Check Failed Id Validity", i+1, line_dict[id_field])
19 | res = False
20 | return res
21 | return check_id_validity
22 |
23 | def line_length_checker(length):
24 | def check_line_length(header, data):
25 | i = 1
26 | res = True
27 | for line in data:
28 | i+=1
29 | if len(line) != length:
30 | print("Check Failed", i, "Line Length", len(line))
31 | res = False
32 | return res
33 | return check_line_length
34 |
35 | def line_number_checker(line_number):
36 | def check_line_numner(header, data):
37 | if len(data) + 1 != line_number:
38 | print("Check Line Number Failed %s instead of %s" % (len(data) + 1, line_number))
39 | return False
40 | else:
41 | return True
42 | return check_line_numner
43 |
44 | def cell_len_checker(max_cell_len):
45 | def check_max_cell_len(header, data):
46 | res = True
47 | for i, line in enumerate(data):
48 | for ele in line:
49 | if len(ele) > max_cell_len:
50 | print("Check Failed", i + 1, "Cell Length", len(ele))
51 | print(line)
52 | res = False
53 | return res
54 | return check_max_cell_len
55 |
--------------------------------------------------------------------------------
/odoo_csv_tools/lib/conf_lib.py:
--------------------------------------------------------------------------------
1 | import odoolib
2 | import sys
3 | if sys.version_info >= (3, 0, 0):
4 | import configparser as ConfigParser
5 | else:
6 | import ConfigParser
7 | import logging
8 | import sys
9 |
10 |
11 | def get_server_connection(config_file):
12 | config = ConfigParser.RawConfigParser({'protocol' : 'xmlrpc', 'port' : 8069})
13 | config.read(config_file)
14 |
15 | hostname = config.get('Connection', 'hostname')
16 | database = config.get('Connection', 'database')
17 | login = config.get('Connection', 'login')
18 | password = config.get('Connection', 'password')
19 | protocol = config.get('Connection', 'protocol')
20 | port = int(config.get('Connection', 'port'))
21 | uid = int(config.get('Connection', 'uid'))
22 | return odoolib.get_connection(hostname=hostname, database=database, login=login, password=password, protocol=protocol, port=port, user_id=uid)
23 |
24 | def init_logger():
25 | logger_err = logging.getLogger("error")
26 | logger_err.setLevel(logging.INFO)
27 | err = logging.StreamHandler(sys.stderr)
28 | logger_err.addHandler(err)
29 | logger = logging.getLogger("info")
30 | logger.setLevel(logging.INFO)
31 | out = logging.StreamHandler(sys.stdout)
32 | logger.addHandler(out)
33 |
34 | def log_info(msg):
35 | logging.getLogger("info").info(msg)
36 |
37 | def log_error(msg):
38 | logging.getLogger("error").info(msg)
39 |
40 | def log(msg):
41 | log_info(msg)
42 | log_error(msg)
43 |
44 | init_logger()
45 |
--------------------------------------------------------------------------------
/odoo_csv_tools/lib/internal/__init__.py:
--------------------------------------------------------------------------------
1 | from . import exceptions
2 | from . import tools
3 | from . import csv_reader
4 | from . import io
5 | from . import rpc_thread
6 |
--------------------------------------------------------------------------------
/odoo_csv_tools/lib/internal/csv_reader.py:
--------------------------------------------------------------------------------
1 | '''
2 | Created on 16 mai 2014
3 |
4 | @author: openerp
5 | '''
6 | from __future__ import absolute_import
7 | import sys
8 | #import csv, codecs
9 | if sys.version_info >= (3, 0, 0):
10 | import csv
11 | else:
12 | import unicodecsv as csv
13 | from io import StringIO
14 | import threading
15 |
16 | class UnicodeReader:
17 | """
18 | A CSV reader which will iterate over lines in the CSV file "f",
19 | which is encoded in the given encoding.
20 | """
21 |
22 | def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
23 | self.reader = csv.reader(f, dialect=dialect, **kwds)
24 |
25 | def next(self):
26 | #For python2
27 | return self.reader.next()
28 |
29 | def __next__(self):
30 | #For python3
31 | return self.reader.__next__()
32 |
33 | def __iter__(self):
34 | return self
35 |
36 |
37 | class UnicodeWriter:
38 | """
39 | A CSV writer which will write rows to CSV file "f",
40 | which is encoded in the given encoding.
41 | """
42 |
43 | def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
44 | # Redirect output to a queue
45 | self.stream = f
46 | self.writer = writer = csv.writer(f, dialect=dialect, **kwds)
47 | self.lock = threading.RLock()
48 |
49 | def writerow(self, row):
50 | self.lock.acquire()
51 | self.writer.writerow(row)
52 | self.lock.release()
53 |
54 | def writerows(self, rows):
55 | self.lock.acquire()
56 | self.writer.writerows(rows)
57 | self.stream.flush()
58 | self.lock.release()
59 |
--------------------------------------------------------------------------------
/odoo_csv_tools/lib/internal/exceptions.py:
--------------------------------------------------------------------------------
1 | '''
2 | Created on 9 sept. 2016
3 |
4 | @author: Thibault Francois
5 | '''
6 |
7 | class SkippingException(Exception):
8 | def __init__(self, message):
9 | self.message = message
10 |
--------------------------------------------------------------------------------
/odoo_csv_tools/lib/internal/io.py:
--------------------------------------------------------------------------------
1 | '''
2 | Created on 10 sept. 2016
3 |
4 | @author: mythrys
5 | '''
6 | from __future__ import absolute_import
7 |
8 | import csv
9 | import os
10 | import sys
11 | from . csv_reader import UnicodeWriter, UnicodeReader
12 |
13 | """
14 | Compatibility layer between python 2.7 and python 3
15 | """
16 | def is_string(f):
17 | if sys.version_info >= (3, 0, 0):
18 | return isinstance(f, str)
19 | else:
20 | return isinstance(f, basestring)
21 |
22 | def open_read(f, encoding='utf-8'):
23 | if not is_string(f):
24 | return f
25 | if sys.version_info >= (3, 0, 0):
26 | return open(f, 'r', newline='', encoding=encoding)
27 | else:
28 | return open(f, 'r')
29 |
30 | def open_write(f, encoding='utf-8'):
31 | if not is_string(f):
32 | return f
33 | if sys.version_info >= (3, 0, 0):
34 | return open(f, "w", newline='', encoding=encoding)
35 | else:
36 | return open(f, "w")
37 |
38 | def write_csv(filename, header, data, encoding="utf-8"):
39 | file_result = open_write(filename, encoding=encoding)
40 | c = UnicodeWriter(file_result, delimiter=';', quoting=csv.QUOTE_ALL, encoding=encoding)
41 | c.writerow(header)
42 | for d in data:
43 | c.writerow(d)
44 | file_result.close()
45 |
46 | def write_file(filename=None, header=None, data=None, fail=False, model="auto",
47 | launchfile="import_auto.sh", worker=1, batch_size=10, init=False, encoding="utf-8",
48 | conf_file=False, groupby='', sep=";", python_exe='', path='', context=None, ignore=""):
49 | def get_model():
50 | if model == "auto":
51 | return filename.split(os.sep)[-1][:-4]
52 | else:
53 | return model
54 |
55 | context = '--context="%s"' % str(context) if context else ''
56 | conf_file = conf_file or "%s%s%s" % ('conf', os.sep, 'connection.conf')
57 | write_csv(filename, header, data, encoding=encoding)
58 | if not launchfile:
59 | return
60 |
61 | mode = init and 'w' or 'a'
62 | with open(launchfile, mode) as myfile:
63 | myfile.write("%s %sodoo_import_thread.py -c %s --file=%s --model=%s --encoding=%s --worker=%s --size=%s --groupby=%s --ignore=%s --sep=\"%s\" %s\n" %
64 | (python_exe, path, conf_file, filename, get_model(), encoding, worker, batch_size, groupby, ignore, sep, context))
65 | if fail:
66 | myfile.write("%s %sodoo_import_thread.py -c %s --fail --file=%s --model=%s --encoding=%s --ignore=%s --sep=\"%s\" %s\n" %
67 | (python_exe, path, conf_file, filename, get_model(), encoding, ignore, sep, context))
68 |
69 | class ListWriter(object):
70 | def __init__(self):
71 | self.data = []
72 | self.header = []
73 |
74 | def writerow(self, header):
75 | self.header = list(header)
76 |
77 | def writerows(self, line):
78 | self.data.extend(list(line))
79 |
--------------------------------------------------------------------------------
/odoo_csv_tools/lib/internal/rpc_thread.py:
--------------------------------------------------------------------------------
1 | #-*- coding: utf-8 -*-
2 | '''
3 | Created on 19 august 2016
4 |
5 | @author: Thibault Francois
6 | '''
7 |
8 | import threading
9 |
10 | class RpcThread(object):
11 |
12 | def __init__(self, max_connection):
13 | self.semaphore = threading.BoundedSemaphore(max_connection)
14 | self.max_thread_semaphore = threading.BoundedSemaphore(max_connection * 4)
15 | self.thread_list = []
16 |
17 | def spawn_thread(self, fun, args, kwarg=None):
18 | def wrapper(args, kwarg):
19 | kwarg = kwarg or {}
20 | self.semaphore.acquire()
21 | try:
22 | fun(*args, **kwarg)
23 | except:
24 | self.semaphore.release()
25 | self.max_thread_semaphore.release()
26 | raise
27 | self.semaphore.release()
28 | self.max_thread_semaphore.release()
29 | self.max_thread_semaphore.acquire()
30 |
31 | thread = threading.Thread(None, wrapper, None, [args, kwarg], {})
32 | thread.start()
33 | self.thread_list.append(thread)
34 |
35 | def wait(self):
36 | for t in self.thread_list:
37 | t.join()
38 |
39 | def thread_number(self):
40 | return len(self.thread_list)
41 |
--------------------------------------------------------------------------------
/odoo_csv_tools/lib/internal/tools.py:
--------------------------------------------------------------------------------
1 | '''
2 | Created on 9 sept. 2016
3 |
4 | @author: Thibault Francois
5 | '''
6 | from itertools import islice, chain
7 |
8 | def batch(iterable, size):
9 | sourceiter = iter(iterable)
10 | while True:
11 | batchiter = islice(sourceiter, size)
12 | try:
13 | yield chain([next(batchiter)], batchiter)
14 | except StopIteration:
15 | return
16 | """
17 | Data formatting tools
18 | """
19 | def to_xmlid(name):
20 | return name.replace('.', '_').replace(',', '_').replace('\n', '_').replace(' ', '_').strip()
21 |
22 | def list_to_xml_id(names):
23 | return '_'.join([to_xmlid(name) for name in names])
24 |
25 | def to_m2o(PREFIX, value, default=''):
26 | if not value:
27 | return default
28 | return PREFIX + '.' + to_xmlid(value)
29 |
30 | def to_m2m(PREFIX, value):
31 | if not value:
32 | return ''
33 |
34 | ids = []
35 | for val in value.split(','):
36 | if val.strip():
37 | ids.append(PREFIX + '.' + to_xmlid(val))
38 | return ','.join(ids)
39 |
40 | def generate_attribute_list(PREFIX, *attributes):
41 | header = ['id', 'name']
42 | lines = set()
43 | for att in attributes:
44 | lines.add((to_m2o(PREFIX, att), att))
45 | return header, lines
46 |
47 | """
48 | Secondary data file helper
49 |
50 | """
51 | class ReprWrapper(object):
52 | def __init__(self, repr_str, func):
53 | self._repr = repr_str
54 | self._func = func
55 |
56 | def __call__(self, *args, **kw):
57 | return self._func(*args, **kw)
58 |
59 | def __repr__(self):
60 | return self._repr
61 |
62 | class AttributeLineDict:
63 | def __init__(self, attribute_list_ids, id_gen_fun):
64 | self.data = {}
65 | self.att_list = attribute_list_ids
66 | self.id_gen = id_gen_fun
67 |
68 | def add_line(self, line, header):
69 | """
70 | line = ['product_tmpl_id/id' : id, 'attribute_id/id' : dict (att : id), 'value_ids/id' : dict(att: id)]
71 | """
72 | line_dict = dict(zip(header, line))
73 | if self.data.get(line_dict['product_tmpl_id/id']):
74 | for att_id, att in self.att_list:
75 | if not line_dict['attribute_id/id'].get(att):
76 | continue
77 | template_info = self.data[line_dict['product_tmpl_id/id']]
78 | template_info.setdefault(att_id, [line_dict['value_ids/id'][att]]).append(line_dict['value_ids/id'][att])
79 | else:
80 | d = {}
81 | for att_id, att in self.att_list:
82 | if line_dict['attribute_id/id'].get(att):
83 | d[att_id] = [line_dict['value_ids/id'][att]]
84 | self.data[line_dict['product_tmpl_id/id']] = d
85 |
86 | def generate_line(self):
87 | lines_header = ['id', 'product_tmpl_id/id', 'attribute_id/id', 'value_ids/id']
88 | lines_out = []
89 | for template_id, attributes in self.data.items():
90 | if not template_id:
91 | continue
92 | for attribute, values in attributes.items():
93 | line = [self.id_gen(template_id, attributes), template_id, attribute, ','.join(values)]
94 | lines_out.append(line)
95 | return lines_header, lines_out
96 |
--------------------------------------------------------------------------------
/odoo_csv_tools/lib/mapper.py:
--------------------------------------------------------------------------------
1 | """
2 | Mapper
3 | """
4 | from . internal.tools import to_m2m, to_m2o
5 | from . internal.io import is_string
6 | from . internal.exceptions import SkippingException
7 | import base64
8 | import os
9 | import requests
10 |
11 | def str_to_mapper(field):
12 | if is_string(field):
13 | return val(field)
14 | return field
15 |
16 | def list_to_mapper(args):
17 | return [val(f) if is_string(f) else f for f in args]
18 |
19 |
20 | def field(col):
21 | """ Return the col name if the col value for the given line is not empty
22 | Use for product.attribute mapping
23 | """
24 | def field_fun(line):
25 | return col if line[col] else ''
26 | return field_fun
27 |
28 | def const(value):
29 | def const_fun(line):
30 | return value
31 | return const_fun
32 |
33 | def val(field, default='', postprocess=lambda x: x, skip=False):
34 | def val_fun(line):
35 | if not line[field] and skip:
36 | raise SkippingException("Missing Value for %s" % field)
37 | return postprocess(line.get(field, default) or default)
38 | return val_fun
39 |
40 | def val_fallback(field, fallback_file, default='', postprocess=lambda x: x, skip=False):
41 | def val_fun(line):
42 | if not line[field] and not line[fallback_file] and skip:
43 | raise SkippingException("Missing Value for %s" % field)
44 | value = line[field] or line[fallback_file] or default
45 | return postprocess(value)
46 | return val_fun
47 |
48 | def val_label(field, default='', postprocess=lambda x: x, skip=False):
49 | val_m = val(field, default=default, postprocess=postprocess, skip=skip)
50 | def val_label_fun(line):
51 | return "%s : %s" % (field, val_m(line))
52 | return val_label_fun
53 |
54 | def concat_mapper(separtor, *mapper):
55 | def concat_fun(line):
56 | return separtor.join([m(line) for m in mapper if m(line)])
57 | return concat_fun
58 |
59 | def concat_mapper_all(separtor, *mapper):
60 | """
61 | Same as concat mapper, but if one value in the list of value to concat is empty, the all value return is
62 | an empty string
63 | Use for product.attribute
64 | """
65 | def concat_fun(line):
66 | values = [m(line) for m in mapper]
67 | if not all(values):
68 | return ''
69 | return separtor.join(values)
70 | return concat_fun
71 |
72 |
73 | def concat(separtor, *fields):
74 | return concat_mapper(separtor, *[val(f) for f in fields])
75 |
76 | def concat_field(separtor, *fields):
77 | return concat_mapper(separtor, *[val_label(f) for f in fields])
78 |
79 | def concat_field_value_m2m(separator, *args):
80 | def concat_name_value_fun(line):
81 | return ','.join([separator.join([f, line[f]]) for f in args if line[f]])
82 | return concat_name_value_fun
83 |
84 | def map_val(field, mapping, default=''):
85 | return val(field, postprocess=lambda x : mapping.get(x, default))
86 |
87 | def num(field, default='0.0'):
88 | return val(field, default, postprocess=lambda x: x.replace(',', '.'))
89 |
90 | def m2o_map(PREFIX, mapper, default='', skip=False):
91 | def m2o_fun(line):
92 | if skip and not mapper(line):
93 | raise SkippingException("Missing Value for %s" % mapper(line))
94 | return to_m2o(PREFIX, mapper(line), default=default)
95 | return m2o_fun
96 |
97 | def m2o(PREFIX, field, default='', skip=False):
98 | def m2o_fun(line):
99 | if skip and not line[field]:
100 | raise SkippingException("Missing Value for %s" % field)
101 | return to_m2o(PREFIX, line[field], default=default)
102 | return m2o_fun
103 |
104 | def m2m(PREFIX, *args):
105 | """
106 | @param args: list of string that should be included into the m2m field
107 | """
108 | #TODO: add default
109 | def m2m_fun(line):
110 | return ','.join([to_m2m(PREFIX, line[f]) for f in args if line[f]])
111 | return m2m_fun
112 |
113 | def m2m_map(PREFIX, mapper):
114 | """
115 | @param args: list of string that should be included into the m2m field
116 | """
117 | #TODO: add default
118 | def m2m_fun(line):
119 | return to_m2m(PREFIX, mapper(line))
120 | return m2m_fun
121 |
122 | def bool_val(field, true_vals=[], false_vals=[]):
123 | def bool_val_fun(line):
124 | if line[field] in true_vals:
125 | return '1'
126 | if line[field] in false_vals:
127 | return '0'
128 | return '1' if line[field] else '0'
129 | return bool_val_fun
130 |
131 | def binary_map(mapper, path_prefix, skip=False, encoding="utf-8"):
132 | def binary_val(line):
133 | field = mapper(line)
134 | path = path_prefix + (mapper(line) or '')
135 | if not os.path.exists(path) or not field:
136 | if skip:
137 | raise SkippingException("Missing File %s for field %s" % (path, field))
138 | return ''
139 |
140 | with open(path, "rb") as image_file:
141 | encoded_string = base64.b64encode(image_file.read()).decode(encoding)
142 | image_file.close()
143 | return encoded_string
144 | return binary_val
145 |
146 | def binary(field, path_prefix, skip=False, encoding="utf-8"):
147 | return binary_map(val(field), path_prefix, skip=skip, encoding=encoding)
148 |
149 |
150 |
151 | def binary_url_map(mapper, skip=False, verbose=False, encoding="utf-8"):
152 | def binary_url_fun(line):
153 | url = mapper(line)
154 | if verbose:
155 | print("Fetch %s" % url)
156 | res = requests.get(url)
157 | if not res.status_code == 200:
158 | if skip:
159 | raise SkippingException("Cannot fetch file at url %s" % url)
160 | return ''
161 |
162 | return base64.b64encode(res.content).decode(encoding)
163 | return binary_url_fun
164 |
165 | def binary_url(field, skip=False, verbose=False):
166 | return binary_url_map(val(field), skip=skip, verbose=verbose)
167 |
168 |
169 |
170 | """
171 | Specific to attribute mapper for V9 product.attribute_import
172 | """
173 |
174 | def val_att(att_list):
175 | def val_att_fun(line):
176 | return { att : line[att] for att in att_list if line[att]}
177 | return val_att_fun
178 |
179 | def m2o_att(PREFIX, att_list):
180 | def m2o_att_fun(line):
181 | return { att : to_m2o(PREFIX, '_'.join([att, line[att]])) for att in att_list if line[att]}
182 | return m2o_att_fun
183 |
184 | def m2o_att_name(PREFIX, att_list):
185 | def m2o_att_fun(line):
186 | return { att : to_m2o(PREFIX, att) for att in att_list if line[att]}
187 | return m2o_att_fun
188 |
189 | def m2m_attribute_value(PREFIX, *args):
190 | return m2m_map(PREFIX, concat_field_value_m2m('_', *args))
191 |
192 |
193 | """
194 | Mapper that require rpc Connection (conf_lib)
195 | """
196 | def database_id_mapper(PREFIX, field, connection, skip=False):
197 | def database_id_mapper_fun(line):
198 | res = to_m2o(PREFIX, line[field])
199 | if res:
200 | module, name = res.split('.')
201 | rec = connection.get_model('ir.model.data').search_read([('module', '=', module), ('name', '=', name)], ['res_id'])
202 | if rec and rec[0]['res_id']:
203 | return str(rec[0]['res_id'])
204 | if skip:
205 | raise SkippingException("%s not found" % res)
206 | return ''
207 | return database_id_mapper_fun
208 |
209 | def database_id_mapper_fallback(connection, *fields_mapper, **kwargs):
210 | skip = kwargs.get("skip")
211 | def database_id_mapper_fun(line):
212 | res = [f(line) for f in fields_mapper if f(line)]
213 | if res:
214 | res = res[0]
215 | module, name = res.split('.')
216 | rec = connection.get_model('ir.model.data').search_read([('module', '=', module), ('name', '=', name)], ['res_id'])
217 | if rec and rec[0]['res_id']:
218 | return str(rec[0]['res_id'])
219 | if skip:
220 | raise SkippingException("%s not found" % res)
221 | return ''
222 | return database_id_mapper_fun
223 |
224 | def database_id_mapper_fallback_create(connection, model, *fields_mapper, **kwargs):
225 | skip = kwargs.get("skip")
226 | def database_id_mapper_fun(line):
227 | res = [f(line) for f in fields_mapper if f(line)]
228 | if res:
229 | res = res[0]
230 | module, name = res.split('.')
231 | rec = connection.get_model('ir.model.data').search_read([('module', '=', module), ('name', '=', name)], ['res_id'])
232 | if rec and rec[0]['res_id']:
233 | return str(rec[0]['res_id'])
234 | else:
235 | connection.get_model(model).load(['id', 'name'], [[res, res]], context={'tracking_disable' : True, 'create_product_variant' : True,})
236 | return database_id_mapper_fun(line)
237 | if skip:
238 | raise SkippingException("%s not found" % res)
239 | return ''
240 | return database_id_mapper_fun
241 |
242 |
243 |
244 | #For many2many specific process
245 | def m2m_id_list(PREFIX, *args, **kwargs):
246 | """
247 | @param args: list of string that should be included into the m2m field
248 | @param const_values: constant values that will be add to all line
249 | """
250 | const_values = kwargs.get("const_values", [])
251 | def split_m2m_id_fun(line):
252 | """ Return a list of unique element (xml_id, name)
253 | """
254 | map_list = list_to_mapper(args)
255 | value = ','.join([to_m2m(PREFIX, m(line)) for m in map_list if m(line)] + const_values)
256 | s = []
257 | for val in value.split(','):
258 | if val.strip():
259 | s.append(val)
260 | return s
261 | return split_m2m_id_fun
262 |
263 | def m2m_value_list(*args, **kwargs):
264 | """
265 | @param args: list of string that should be included into the m2m field
266 | @param const_values: constant values that will be add to all line
267 | """
268 | const_values = kwargs.get("const_values", [])
269 | def split_m2m_value_fun(line):
270 | """ Return a list of unique element value
271 | """
272 | map_list = list_to_mapper(args)
273 | value = ','.join([m(line) for m in map_list if m(line)] + const_values)
274 | s = []
275 | for val in value.split(','):
276 | if val.strip():
277 | s.append(val)
278 | return s
279 | return split_m2m_value_fun
280 |
281 | def remove_sep_mapper(f):
282 | """
283 | @param f: field that will have the starting folder separator removed
284 | """
285 | def remove_sep_mapper_fun(line):
286 | if line[f].startswith(os.sep):
287 | return line[f][len(os.sep):]
288 | else:
289 | return line[f]
290 | return remove_sep_mapper_fun
291 |
292 |
293 | ##############################
294 | # #
295 | # Split Mapper #
296 | # #
297 | ##############################
298 |
299 | def split_line_number(line_nb):
300 | """
301 | Return a function that can we used by split method from Processor class,
302 | this function will split the data every x lines where x is given by the param line_nb
303 | :param line_nb:
304 | """
305 | def split(line, i):
306 | return divmod(i, line_nb)[0]
307 | return split
308 |
309 |
310 | def split_file_number(file_nb):
311 | """
312 | Return a function that can we used by split method from Processor class,
313 | this function will split the data into x file where x is given by the param file_nb
314 | Order of data is not kept
315 | :param line_nb:
316 | """
317 | def split(line, i):
318 | return divmod(i, file_nb)[1]
319 | return split
320 |
--------------------------------------------------------------------------------
/odoo_csv_tools/lib/transform.py:
--------------------------------------------------------------------------------
1 | #-*- coding: utf-8 -*-
2 | '''
3 | Created on 10 sept. 2016
4 |
5 | @author: Thibault Francois
6 | '''
7 | import os
8 |
9 | from collections import OrderedDict
10 |
11 | from . internal.csv_reader import UnicodeReader
12 | from . internal.tools import ReprWrapper, AttributeLineDict
13 | from . internal.io import write_file, is_string, open_read
14 | from . internal.exceptions import SkippingException
15 | from . import mapper
16 |
17 |
18 | class Processor(object):
19 | def __init__(self, filename=None, delimiter=";", encoding='utf-8', header=None, data=None, preprocess=lambda header, data: (header, data), conf_file=False):
20 | self.file_to_write = OrderedDict()
21 | if header and data:
22 | self.header = header
23 | self.data = data
24 | elif filename:
25 | self.header, self.data = self.__read_file(filename, delimiter, encoding)
26 | else:
27 | raise Exception("No Filename nor header and data provided")
28 | self.header, self.data = preprocess(self.header, self.data)
29 | self.conf_file = conf_file
30 |
31 | def check(self, check_fun, message=None):
32 | res = check_fun(self.header, self.data)
33 | if not res:
34 | if message:
35 | print(message)
36 | else:
37 | print("%s failed" % check_fun.__name__)
38 | return res
39 |
40 | def split(self, split_fun):
41 | res = {}
42 | for i, d in enumerate(self.data):
43 | k = split_fun(dict(zip(self.header, d)), i)
44 | res.setdefault(k, []).append(d)
45 | processor_dict = {}
46 | for k, data in res.items():
47 | processor_dict[k] = Processor(header=list(self.header), data=data)
48 | return processor_dict
49 |
50 | def get_o2o_mapping(self):
51 | """Will generate a mapping with 'key' : mapper.val('key') for each key
52 |
53 | you can print using pprint to print the equivalent python of the mapping to use it in your file
54 |
55 | :return: a dict where the key is a str and the value a mapper.val function,
56 | the key and the field pass to the mapper are identical
57 |
58 | {
59 | 'id' : mapper.val('id'),
60 | .....
61 | }
62 | """
63 | mapping = {}
64 | for column in [h for h in self.header if h]:
65 | map_val_rep = ReprWrapper("mapper.val('%s')" %column, mapper.val(column))
66 | mapping[str(column)] = map_val_rep
67 | return mapping
68 |
69 | def process(self, mapping, filename_out, import_args, t='list', null_values=['NULL', False], verbose=True, m2m=False):
70 | if m2m:
71 | head, data = self.__process_mapping_m2m(mapping, null_values=null_values, verbose=verbose)
72 | else:
73 | head, data = self.__process_mapping(mapping, t=t, null_values=null_values, verbose=verbose)
74 | self._add_data(head, data, filename_out, import_args)
75 | return head, data
76 |
77 | def write_to_file(self, script_filename, fail=True, append=False, python_exe='', path='', encoding='utf-8'):
78 | init = not append
79 | for _, info in self.file_to_write.items():
80 | info_copy = dict(info)
81 | info_copy.update({
82 | 'model' : info.get('model', 'auto'),
83 | 'init' : init,
84 | 'launchfile' : script_filename,
85 | 'fail' : fail,
86 | 'python_exe' : python_exe,
87 | 'path' : path,
88 | 'conf_file' : self.conf_file,
89 | 'encoding': encoding,
90 | })
91 |
92 | write_file(**info_copy)
93 | init = False
94 |
95 | def get_processed_data(self, filename_out):
96 | return self.file_to_write[filename_out]
97 |
98 | def join_file(self, filename, master_key, child_key, header_prefix="child", delimiter=";", encoding='utf-8'):
99 | """
100 | Join another file with the main file defined in the constructor.
101 | Need a key (column name) on the master file and on the file to join
102 | The line of the file to join will be added a the end of a line if
103 | the value of the column master_key match the value of the column child_key
104 |
105 | If the key is not found in the file to join, empty cell are added at the end of the master file
106 |
107 | A prefix is added (after the merge operation) to all the column of the child file
108 | to avoid collision with the header of the master file
109 |
110 | E.g.: join_file(filename, 'category_id', 'name')
111 | Master file | Child file
112 | name category_id | name color
113 | A A | A Blue
114 | B A | B Red
115 | C B
116 | D B
117 | E C
118 |
119 | Final File
120 | name category_id child_name child_color
121 | A A A Blue
122 | B A A Blue
123 | C B B Red
124 | D B B Red
125 | E C
126 | """
127 | header, data = self.__read_file(filename, delimiter, encoding)
128 | child_key_pos = header.index(child_key)
129 | master_key_pos = self.header.index(master_key)
130 |
131 | data_map = {}
132 | for d in data:
133 | data_map[d[child_key_pos]] = d
134 |
135 | for d in self.data:
136 | if data_map.get(d[master_key_pos]):
137 | d.extend(data_map[d[master_key_pos]])
138 | else:
139 | d.extend([""] * len(header))
140 |
141 | self.header += ["%s_%s" % (header_prefix, h) for h in header]
142 |
143 | ########################################
144 | # #
145 | # Private Method #
146 | # #
147 | ########################################
148 | def __read_file(self, filename, delimiter, encoding):
149 | file_ref = open_read(filename, encoding=encoding)
150 | reader = UnicodeReader(file_ref, delimiter=delimiter, encoding=encoding)
151 | head = next(reader)
152 | data = [d for d in reader]
153 | return head, data
154 |
155 | def __process_mapping(self, mapping, t, null_values, verbose):
156 | """
157 | @param t: type of return, list or set
158 | """
159 | lines_out = [] if t == 'list' else set()
160 | for i, line in enumerate(self.data):
161 | line = [s.strip() if s and s.strip() not in null_values else '' for s in line]
162 | line_dict = dict(zip(self.header, line))
163 | try:
164 | line_out = [mapping[k](line_dict) for k in mapping.keys()]
165 | except SkippingException as e:
166 | if verbose:
167 | print("Skipping", i)
168 | print(e.message)
169 | continue
170 | if t == 'list':
171 | lines_out.append(line_out)
172 | else:
173 | lines_out.add(tuple(line_out))
174 | return mapping.keys(), lines_out
175 |
176 | def __process_mapping_m2m(self, mapping, null_values, verbose):
177 | """
178 |
179 | """
180 | head, data = self.__process_mapping(mapping, 'list', null_values, verbose)
181 | lines_out = set()
182 | for line_out in data:
183 | index_list = []
184 | zip_list = []
185 | for index, value in enumerate(line_out):
186 | if isinstance(value, list):
187 | index_list.append(index)
188 | zip_list.append(value)
189 | values_list = zip(*zip_list)
190 | for values in values_list:
191 | new_line = list(line_out)
192 | for i, val in enumerate(values):
193 | new_line[index_list[i]] = val
194 | lines_out.add(tuple(new_line))
195 |
196 | return head, lines_out
197 |
198 | def _add_data(self, head, data, filename_out, import_args):
199 | import_args = dict(import_args)
200 | import_args['filename'] = os.path.abspath(filename_out) if filename_out else False
201 | import_args['header'] = head
202 | import_args['data'] = data
203 | self.file_to_write[filename_out] = import_args
204 |
205 |
206 | class ProductProcessorV9(Processor):
207 | def __generate_attribute_data(self, attributes_list, ATTRIBUTE_PREFIX):
208 | self.attr_header = ['id', 'name']
209 | self.attr_data = [[mapper.to_m2o(ATTRIBUTE_PREFIX, att), att] for att in attributes_list]
210 |
211 | def process_attribute_mapping(self, mapping, line_mapping, attributes_list, ATTRIBUTE_PREFIX, path, import_args, id_gen_fun=None, null_values=['NULL']):
212 | """
213 | Mapping : name is mandatory vat_att(attribute_list)
214 | """
215 | def add_value_line(values_out, line):
216 | for att in attributes_list:
217 | value_name = line[list(mapping.keys()).index('name')].get(att)
218 | if value_name:
219 | line_value = [ele[att] if isinstance(ele, dict) else ele for ele in line]
220 | values_out.add(tuple(line_value))
221 |
222 | id_gen_fun = id_gen_fun or (lambda template_id, values : mapper.to_m2o(template_id.split('.')[0] + '_LINE', template_id))
223 |
224 | values_header = mapping.keys()
225 | values_data = set()
226 |
227 | self.__generate_attribute_data(attributes_list, ATTRIBUTE_PREFIX)
228 | att_data = AttributeLineDict(self.attr_data, id_gen_fun)
229 | for line in self.data:
230 | line = [s.strip() if s.strip() not in null_values else '' for s in line]
231 | line_dict = dict(zip(self.header, line))
232 | line_out = [mapping[k](line_dict) for k in mapping.keys()]
233 |
234 | add_value_line(values_data, line_out)
235 | values_lines = [line_mapping[k](line_dict) for k in line_mapping.keys()]
236 | att_data.add_line(values_lines, line_mapping.keys())
237 |
238 | line_header, line_data = att_data.generate_line()
239 | context = import_args.get('context', {})
240 | context['create_product_variant'] = True
241 | import_args['context'] = context
242 | self._add_data(self.attr_header, self.attr_data, path + 'product.attribute.csv', import_args)
243 | self._add_data(values_header, values_data, path + 'product.attribute.value.csv', import_args)
244 | import_args = dict(import_args, groupby='product_tmpl_id/id')
245 | self._add_data(line_header, line_data, path + 'product.attribute.line.csv', import_args)
246 |
247 | class ProductProcessorV10(Processor):
248 | def process_attribute_data(self, attributes_list, ATTRIBUTE_PREFIX, filename_out, import_args):
249 | attr_header = ['id', 'name']
250 | attr_data = [[mapper.to_m2o(ATTRIBUTE_PREFIX, att), att] for att in attributes_list]
251 | self._add_data(attr_header, attr_data, filename_out, import_args)
252 |
--------------------------------------------------------------------------------
/odoo_csv_tools/lib/workflow.py:
--------------------------------------------------------------------------------
1 | '''
2 | Created on 7 avr. 2016
3 |
4 | @author: odoo
5 | '''
6 | #from __future__ import absolute_import
7 | import sys
8 | if sys.version_info >= (3, 0, 0):
9 | from xmlrpc.client import Fault
10 | else:
11 | from xmlrpclib import Fault
12 |
13 | from time import time
14 | from . internal.rpc_thread import RpcThread
15 |
16 | class InvoiceWorkflowV9():
17 | def __init__(self, connection, field, status_map, paid_date_field, payment_journal, max_connection=4):
18 | """
19 | @param connection : need to use a jsonrpc connection
20 | @param field: the that contains the state imported from legacy data
21 | @param status_map: dict that contains the mapping between the odoo invoice status and legacy system status
22 | the value should be a list
23 | {
24 | 'open' : ['satus1'],
25 | 'paid' : ['status2', 'status3'],
26 | 'cancel' : ...
27 | 'proforma' :
28 | }
29 | """
30 | self.connection = connection
31 | self.invoice_obj = connection.get_model('account.invoice')
32 | self.payement_obj = connection.get_model('account.payment')
33 | self.account_invoice_tax = self.connection.get_model('account.invoice.tax')
34 | self.field = field
35 | self.status_map = status_map
36 | self.paid_date = paid_date_field
37 | self.payment_journal = payment_journal
38 | self.max_connection = max_connection
39 |
40 | def display_percent(self, i, percent_step, total):
41 | if i % percent_step == 0:
42 | print("%s%% : %s/%s time %s sec" % (round(i / float(total) * 100, 2), i, total, time() - self.time))
43 |
44 | def set_tax(self):
45 | def create_tax(invoice_id):
46 | taxes = self.invoice_obj.get_taxes_values(invoice_id)
47 | for tax in taxes.values():
48 | self.account_invoice_tax.create(tax)
49 |
50 | invoices = self.invoice_obj.search([('state', '=', 'draft'),
51 | ('type', '=', 'out_invoice'),
52 | ('tax_line_ids', '=', False)])
53 | total = len(invoices)
54 | percent_step = int(total / 5000) or 1
55 | self.time = time()
56 | rpc_thread = RpcThread(self.max_connection)
57 | print("Compute Tax %s invoice" % total)
58 | for i, invoice_id in enumerate(invoices):
59 | self.display_percent(i, percent_step, total)
60 | rpc_thread.spawn_thread(create_tax, [invoice_id])
61 | rpc_thread.wait()
62 |
63 | def validate_invoice(self):
64 | invoice_to_validate = self.invoice_obj.search([(self.field, 'in', self.status_map['open'] + self.status_map['paid']),
65 | ('state', '=', 'draft'),
66 | ('type', '=', 'out_invoice')])
67 | total = len(invoice_to_validate)
68 | percent_step = int(total / 5000) or 1
69 | rpc_thread = RpcThread(1)
70 | print("Validate %s invoice" % total)
71 | self.time = time()
72 | for i, invoice_id in enumerate(invoice_to_validate):
73 | self.display_percent(i, percent_step, total)
74 | fun = self.connection.get_service('object').exec_workflow
75 | rpc_thread.spawn_thread(fun, [self.connection.database,
76 | self.connection.user_id,
77 | self.connection.password,
78 | 'account.invoice',
79 | 'invoice_open',
80 | invoice_id])
81 | rpc_thread.wait()
82 |
83 | def proforma_invoice(self):
84 | invoice_to_proforma = self.invoice_obj.search([(self.field, 'in', self.status_map['proforma']),
85 | ('state', '=', 'draft'),
86 | ('type', '=', 'out_invoice')])
87 | total = len(invoice_to_proforma)
88 | percent_step = int(total / 100) or 1
89 | self.time = time()
90 | rpc_thread = RpcThread(self.max_connection)
91 | print("Pro Format %s invoice" % total)
92 | for i, invoice_id in enumerate(invoice_to_proforma):
93 | self.display_percent(i, percent_step, total)
94 | fun = self.connection.get_service('object').exec_workflow()
95 | rpc_thread.spawn_thread(fun, [self.connection.database,
96 | self.connection.user_id,
97 | self.connection.password,
98 | 'account.invoice',
99 | 'invoice_proforma2',
100 | invoice_id], {})
101 | rpc_thread.wait()
102 |
103 | def paid_invoice(self):
104 | def pay_single_invoice(data_update, wizard_context):
105 | data = self.payement_obj.default_get(["communication", "currency_id", "invoice_ids",
106 | "payment_difference", "partner_id", "payment_method_id",
107 | "payment_difference_handling", "journal_id",
108 | "state", "writeoff_account_id", "payment_date",
109 | "partner_type", "hide_payment_method",
110 | "payment_method_code", "partner_bank_account_id",
111 | "amount", "payment_type"], context=wizard_context)
112 | data.update(data_update)
113 | wizard_id = self.payement_obj.create(data, context=wizard_context)
114 | try:
115 | self.payement_obj.post([wizard_id], context=wizard_context)
116 | except Fault:
117 | pass
118 |
119 |
120 | invoice_to_paid = self.invoice_obj.search_read([(self.field, 'in', self.status_map['paid']), ('state', '=', 'open'), ('type', '=', 'out_invoice')],
121 | [self.paid_date, 'date_invoice'])
122 | total = len(invoice_to_paid)
123 | percent_step = int(total / 1000) or 1
124 | self.time = time()
125 | rpc_thread = RpcThread(self.max_connection)
126 | print("Paid %s invoice" % total)
127 | for i, invoice in enumerate(invoice_to_paid):
128 | self.display_percent(i, percent_step, total)
129 | wizard_context = {
130 | 'active_id' : invoice['id'],
131 | 'active_ids' : [invoice['id']],
132 | 'active.model' : 'account.invoice',
133 | 'default_invoice_ids' : [(4, invoice['id'], 0)],
134 | 'type' : "out_invoice",
135 | "journal_type":"sale"
136 | }
137 | data_update = {
138 | 'journal_id' : self.payment_journal, #payement journal
139 | 'payment_date' : invoice[self.paid_date] or invoice['date_invoice'],
140 | 'payment_method_id' : 1,
141 | }
142 | rpc_thread.spawn_thread(pay_single_invoice, [data_update, wizard_context], {})
143 | rpc_thread.wait()
144 |
145 | def rename(self, name_field):
146 | invoice_to_paid = self.invoice_obj.search_read([(name_field, '!=', False),(name_field, '!=', '0.0'),('state', '!=', 'draft'), ('type', '=', 'out_invoice')],
147 | [name_field])
148 | total = len(invoice_to_paid)
149 | percent_step = int(total / 1000) or 1
150 | self.time = time()
151 | rpc_thread = RpcThread(int(self.max_connection * 1.5))
152 | print("Rename %s invoice" % total)
153 | for i, invoice in enumerate(invoice_to_paid):
154 | self.display_percent(i, percent_step, total)
155 | rpc_thread.spawn_thread(self.invoice_obj.write, [invoice['id'], {'number' : invoice[name_field], name_field : False}], {})
156 | rpc_thread.wait()
157 |
--------------------------------------------------------------------------------
/odoo_csv_tools/migrate.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #-*- coding: utf-8 -*-
3 | '''
4 | Created on 27 déc. 2016
5 |
6 | @author: Thibault Francois
7 | '''
8 | from lib.transform import Processor
9 | from export_threaded import export_data
10 | from import_threaded import import_data
11 |
12 | class Migrator(object):
13 |
14 | def __init__(self, config_export, config_import):
15 | self.config_export = config_export
16 | self.config_import = config_import
17 | self.import_batch_size = 10
18 | self.import_max_con = 1
19 | self.export_batch_size = 100
20 | self.export_max_con = 1
21 |
22 | def migrate(self, model, domain, field_export, mappings=[None]):
23 | header, data = export_data(self.config_export, model, domain, field_export, max_connection=self.export_max_con, batch_size=self.export_batch_size)
24 | processor = Processor(header=header, data=data)
25 | for mapping in mappings:
26 | if not mapping:
27 | mapping = processor.get_o2o_mapping()
28 | to_import_header, to_import_data = processor.process(mapping, False, {})
29 | import_data(self.config_import, model, header=to_import_header, data=to_import_data, max_connection=self.import_max_con, batch_size=self.import_batch_size)
--------------------------------------------------------------------------------
/odoo_export_thread.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | '''
4 | Copyright (C) Thibault Francois
5 |
6 | This program is free software: you can redistribute it and/or modify
7 | it under the terms of the GNU Lesser General Public License as
8 | published by the Free Software Foundation, version 3.
9 |
10 | This program is distributed in the hope that it will be useful, but
11 | WITHOUT ANY WARRANTY; without even the implied warranty of
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 | Lesser General Lesser Public License for more details.
14 |
15 | You should have received a copy of the GNU Lesser General Public License
16 | along with this program. If not, see .
17 | '''
18 |
19 | import argparse
20 | from odoo_csv_tools import export_threaded
21 |
22 | if __name__ == '__main__':
23 | parser = argparse.ArgumentParser(description='Import data in batch and in parallel')
24 | parser.add_argument('-c', '--config', dest='config', default="conf/connection.conf",
25 | help='Configuration File that contains connection parameters', required=True)
26 | parser.add_argument('--file', dest='filename', help='Output File', required=True)
27 | parser.add_argument('--model', dest='model', help='Model to Export', required=True)
28 | parser.add_argument('--field', dest='fields', help='Fields to Export', required=True)
29 | parser.add_argument('--domain', dest='domain', help='Filter', default="[]")
30 | parser.add_argument('--worker', dest='worker', default=1, help='Number of simultaneous connection')
31 | parser.add_argument('--size', dest='batch_size', default=10, help='Number of line to import per connection')
32 | parser.add_argument('-s', '--sep', dest="separator", default=";", help='CSV separator')
33 | parser.add_argument('--context', dest='context',
34 | help='context that will be passed to the load function, need to be a valid python dict',
35 | default="{'tracking_disable' : True}")
36 | parser.add_argument('--encoding', dest='encoding', default="utf-8", help='Encoding of the data file')
37 | # TODO args : encoding
38 | # {'update_many2many': True,'tracking_disable' : True, 'create_product_variant' : True, 'check_move_validity' : False}
39 | args = parser.parse_args()
40 |
41 | config_file = args.config
42 | file_csv = args.filename
43 | batch_size = int(args.batch_size)
44 | model = args.model
45 | max_connection = int(args.worker)
46 | separator = args.separator
47 | encoding = args.encoding
48 | context = eval(args.context)
49 | domain = eval(args.domain)
50 | header = args.fields.split(',')
51 | export_threaded.export_data(config_file, model, domain, header, context=context, output=file_csv,
52 | max_connection=max_connection, batch_size=batch_size, separator=separator,
53 | encoding=encoding)
54 |
--------------------------------------------------------------------------------
/odoo_import_thread.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #-*- coding: utf-8 -*-
3 | '''
4 | Copyright (C) Thibault Francois
5 |
6 | This program is free software: you can redistribute it and/or modify
7 | it under the terms of the GNU Lesser General Public License as
8 | published by the Free Software Foundation, version 3.
9 |
10 | This program is distributed in the hope that it will be useful, but
11 | WITHOUT ANY WARRANTY; without even the implied warranty of
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 | Lesser General Lesser Public License for more details.
14 |
15 | You should have received a copy of the GNU Lesser General Public License
16 | along with this program. If not, see .
17 | '''
18 |
19 | import argparse
20 | from odoo_csv_tools import import_threaded
21 |
22 | if __name__ == '__main__':
23 | parser = argparse.ArgumentParser(description='Import data in batch and in parallel')
24 | parser.add_argument('-c', '--config', dest='config', default="conf/connection.conf", help='Configuration File that contains connection parameters', required = True)
25 | parser.add_argument('--file', dest='filename', help='File to import', required = True)
26 | parser.add_argument('--model', dest='model', help='Model to import', required = True)
27 | parser.add_argument('--worker', dest='worker', default=1, help='Number of simultaneous connection')
28 | parser.add_argument('--size', dest='batch_size', default=10, help='Number of line to import per connection')
29 | parser.add_argument('--skip', dest='skip', default=0, help='Skip until line [SKIP]')
30 | parser.add_argument('--fail', action='store_true', dest="fail", help='Fail mode')
31 | parser.add_argument('-s', '--sep', dest="separator", default=";", help='CSV separator')
32 | parser.add_argument('--groupby', dest='split', help='Group data per batch with the same value for the given column in order to avoid concurrent update error')
33 | parser.add_argument('--ignore', dest='ignore', help='list of column separate by comma. Those column will be remove from the import request')
34 | parser.add_argument('--check', dest='check', action='store_true', help='Check if record are imported after each batch.')
35 | parser.add_argument('--context', dest='context', help='context that will be passed to the load function, need to be a valid python dict', default="{'tracking_disable' : True}")
36 | parser.add_argument('--o2m', action='store_true', dest="o2m", help="When you want to import o2m field, don't cut the batch until we find a new id")
37 | parser.add_argument('--encoding', dest='encoding', default="utf-8", help='Encoding of the data file')
38 | #TODO args : encoding
39 | #{'update_many2many': True,'tracking_disable' : True, 'create_product_variant' : True, 'check_move_validity' : False}
40 | args = parser.parse_args()
41 |
42 | file_csv = args.filename
43 | batch_size = int(args.batch_size)
44 | fail_file = file_csv + ".fail"
45 | max_connection = int(args.worker)
46 | split = False
47 | encoding= args.encoding
48 | context= eval(args.context)
49 | ignore = False
50 | if args.ignore:
51 | ignore = args.ignore.split(',')
52 |
53 | if args.fail:
54 | file_csv = fail_file
55 | fail_file = fail_file + ".bis"
56 | batch_size = 1
57 | max_connection = 1
58 | split = False
59 |
60 | import_threaded.import_data(args.config, args.model, file_csv=file_csv, context=context,
61 | fail_file=fail_file, encoding=encoding, separator=args.separator,
62 | ignore=ignore, split=args.split, check=args.check,
63 | max_connection=max_connection, batch_size=batch_size, skip=int(args.skip), o2m=args.o2m)
64 |
--------------------------------------------------------------------------------
/pics/account_move.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/pics/account_move.png
--------------------------------------------------------------------------------
/pics/cascade_update.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/pics/cascade_update.png
--------------------------------------------------------------------------------
/pics/fail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/pics/fail.png
--------------------------------------------------------------------------------
/pics/group_by_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/pics/group_by_1.png
--------------------------------------------------------------------------------
/pics/group_by_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/pics/group_by_2.png
--------------------------------------------------------------------------------
/pics/import_tool_options.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/pics/import_tool_options.png
--------------------------------------------------------------------------------
/pics/o2m_csv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/pics/o2m_csv.png
--------------------------------------------------------------------------------
/pics/o2m_csv_gen.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/pics/o2m_csv_gen.png
--------------------------------------------------------------------------------
/pics/phase_load.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/pics/phase_load.png
--------------------------------------------------------------------------------
/pics/phase_transform.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/pics/phase_transform.png
--------------------------------------------------------------------------------
/pics/run_time_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/pics/run_time_1.png
--------------------------------------------------------------------------------
/pics/run_time_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/pics/run_time_2.png
--------------------------------------------------------------------------------
/pics/run_time_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/pics/run_time_3.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | odoo-client-lib==1.2.0
2 | unicodecsv==0.14.1
3 | future==0.16.0
4 | requests>=2.20.0
5 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | '''
3 | Copyright (C) Thibault Francois
4 |
5 | This program is free software: you can redistribute it and/or modify
6 | it under the terms of the GNU Lesser General Public License as
7 | published by the Free Software Foundation, version 3.
8 |
9 | This program is distributed in the hope that it will be useful, but
10 | WITHOUT ANY WARRANTY; without even the implied warranty of
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 | Lesser General Lesser Public License for more details.
13 |
14 | You should have received a copy of the GNU Lesser General Public License
15 | along with this program. If not, see .
16 | '''
17 |
18 | from setuptools import setup, find_packages
19 |
20 | setup(name='odoo-import-export-client',
21 | version='3.0.0',
22 | install_requires=['odoo-client-lib', 'future', 'unicodecsv', 'requests'],
23 | description='Library and script that allow to export and import data to Odoo using rpc api.',
24 | author='Thibault Francois',
25 | author_email='francois.th@gmail.com',
26 | url='https://github.com/tfrancoi/odoo_csv_import',
27 | packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
28 | scripts=['odoo_export_thread.py', 'odoo_import_thread.py', 'odoo_convert_path_to_image.py', 'odoo_convert_url_to_image.py'],
29 | long_description="See the home page for any information: https://github.com/tfrancoi/odoo_csv_import",
30 | keywords="odoo library import export thread python client lib web service",
31 | license="LGPLv3",
32 | classifiers=[
33 | "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
34 | "Programming Language :: Python",
35 | ],
36 | )
37 |
--------------------------------------------------------------------------------
/tests/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | branch = True
3 | source = .,..
4 |
--------------------------------------------------------------------------------
/tests/5_partner_export.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | $1 ../odoo_export_thread.py -c conf/connection.conf --file=data/res.partner.exported.csv --model=res.partner --worker=4 --size=200 --domain="[]" --field="id,name,phone,website,street,city,country_id/id" --sep=";" --encoding=utf-8-sig
3 |
--------------------------------------------------------------------------------
/tests/6_o2m_import.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | $1 ../odoo_import_thread.py --file=origin/res.partner_o2m.csv --model='res.partner' --size=1 --worker=1 --conf=conf/connection.conf --o2m
3 |
--------------------------------------------------------------------------------
/tests/7_convert_binary.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | $1 ../odoo_convert_path_to_image.py --path=./origin/img/ -f Image origin/contact.csv
3 | $1 ../odoo_convert_url_to_image.py -f Image origin/contact_url.csv
4 |
5 |
--------------------------------------------------------------------------------
/tests/clean.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #Need to launch odoo database accessible with the configuration given in conf/connection.conf
3 | #Modules contacts need to be installed
4 |
5 | rm -rf data
6 | rm -rf htmlcov
7 | rm 0_partner_generated.sh
8 | rm 1_partner_split.sh
9 | rm 2_contact_import.sh
10 | rm 3_product_import.sh
11 | rm 4_product_import.sh
12 | rm .coverage
13 | rm error.log
14 | rm out.csv
15 |
--------------------------------------------------------------------------------
/tests/conf/connection.conf:
--------------------------------------------------------------------------------
1 | [Connection]
2 | hostname = localhost
3 | database = load
4 | login = admin
5 | password = admin
6 | protocol = jsonrpc
7 | port = 8069
8 | uid = 1
9 |
10 |
11 |
--------------------------------------------------------------------------------
/tests/const.py:
--------------------------------------------------------------------------------
1 | # EXEC = 'coverage run -a'
2 | EXEC = 'python3'
3 |
--------------------------------------------------------------------------------
/tests/launch_test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #Need to launch odoo database accessible with the configuration given in conf/connection.conf
3 | #test works well on V11
4 | #Modules contacts need to be installed
5 | #EXEC="python2"
6 | for EXEC in "python2" "python3" "python3.7" "coverage run -a"
7 | do
8 | echo "============== Test $EXEC =============="
9 | rm -rf data
10 | mkdir data
11 | export PYTHONPATH=../
12 | echo "> Erase"
13 | coverage erase
14 | echo "> Generate data for import"
15 | $EXEC test_import.py "$EXEC"
16 | echo "> Run test import"
17 | sh 0_partner_generated.sh
18 | echo "> Run test split file"
19 | $EXEC test_split.py "$EXEC"
20 | echo "> Test mapping from file"
21 | $EXEC test_from_file.py "$EXEC"
22 | echo "> Import data with error"
23 | sh 2_contact_import.sh 2> error.log
24 | echo "> Import Product"
25 | $EXEC test_product_v9.py "$EXEC"
26 | sh 3_product_import.sh
27 | echo "> Import Product v10"
28 | $EXEC test_product_v10.py "$EXEC"
29 | sh 4_product_import.sh
30 | sh 5_partner_export.sh "$EXEC"
31 | echo "> Import One2Many"
32 | sh 6_o2m_import.sh "$EXEC"
33 | echo "> Convert Binary"
34 | sh 7_convert_binary.sh "$EXEC"
35 | echo "Test join"
36 | $EXEC test_merge.py
37 | coverage html
38 | done
39 |
--------------------------------------------------------------------------------
/tests/origin/compagny.csv:
--------------------------------------------------------------------------------
1 | Company_ID;Company_Name;Phone;www;Account_Manager;address1;city;zip code;country;IsCustomer;IsSupplier;Language;Contact First Name;Contact Last Name;Contact Email;Contact Title
2 | COM15602;Company BV;8988 2436428 ;www.company.com;Thibault;Rue de la poste;Brussel;14589;Belgique;1;;French;Alexis;WU;mail1@mail.com;Mrs
3 | COM12639;John SPRL;56528 221505;www.john.fr;Francois;Downing Street;Paris;14590;FR;1;;Dutch;Thibault;Francois;mail2@mail.com;Mrs
4 | COM20695;Odoo SA;98779 342 19 45 ;www.odoo.com;Francois;La prèle, 3;New york;14591;U.S;1;;Dutch;Gregory ;Huvelle;mail3@mail.com;Mrs
5 | COM7663;Kangoo LTD;1310 3240077;;Francois;Glass AV;Amsterdam;14592;U.S;1;;Dutch;Antoine;Leberger;mail4@mail.com;Dr.
6 | COM23594;Odoo INC;0465476 392761;;Thibault;Home sweet Home;Mechelen;14593;BE;1;;Dutch;Neil;;mail5@mail.com;Mrs
7 | COM25801;Mike Corsoft inc;6456444-7070;www.mike.crosfot;Thibault;PO Box 13821;Lyon;14594;FR;;1;Dutch;Declan;;mail6@mail.com;
8 | COM7778;Odoo Limited;679789 5034221;;Francois;Marnixstraat 21;SF;14595;U.S;;;Dutch;John;Doe;mail7@mail.com;Ir.
9 | COM12268;Thomas sprl;65479845 5275812;;Thibault;Eggelaan 23;Delft;14596;NL;;;Dutch;Nicolas;Hollande;mail8@mail.com;President
10 | COM18683;Bois&co;654798 50 67 08 72;;Francois;J en M Sabbestraat 11;Marche-en-famenne;14597;Belgique;1;;Dutch;Celine;Descartes;mail9@mail.com;Mme
11 | COM23750;Roger Namur;6579 2 729 51 03;www.sa.eu;Francois;Rue de la Fusée 96;Namur;14598;Belgique;;;Dutch;Aurore;Lesage;mail10@mail.com;Mlle
12 |
--------------------------------------------------------------------------------
/tests/origin/contact.csv:
--------------------------------------------------------------------------------
1 | Company_ID;Company_Name;Phone;www;Account_Manager;address1;city;zip code;country;IsCustomer;IsSupplier;Language;Image;
2 | COM15602;Company BV;8988 2436428 ;www.company.com;Thibault;Rue de la poste;Brussel;14589;Belgique;1;0;English;employee_al-image.jpg;
3 | COM12639;John SPRL;56528 221505;www.john.fr;Francois;Downing Street;Paris;14590;FR;1;;English;employee_chs-image.jpg;
4 | COM20695;Odoo SA;98779 342 19 45 ;www.odoo.com;Francois;La prèle, 3;New york;14591;U.S;1;0;English;employee_djj-image.png;
5 | COM7663;Kangoo LTD;1310 3240077;;Francois;Glass AV;Amsterdam;14592;U.S;1;;English;employee_dzc-image.jpg;
6 | COM23594;Odoo INC;0465476 392761;;Thibault;Home sweet Home;Mechelen;14593;BE;1;;English;employee_fme-image.jpg;
7 | COM25801;Mike Corsoft inc;6456444-7070;www.mike.crosfot;Thibault;PO Box 13821;Lyon;14594;FR;;1;Dutch;employee_fpi-image.jpg;
8 | COM7778;Odoo Limited;679789 5034221;;Francois;Marnixstraat 21;SF;14595;U.S;;;English;employee_fp-image.jpg;
9 | COM12268;Thomas sprl;65479845 5275812;;Thibault;Eggelaan 23;Delft;14596;NL;;;English;employee_han-image.png;
10 | COM18683;Bois&co;654798 50 67 08 72;;Francois;J en M Sabbestraat 11;Marche-en-famenne;14597;Belgique;1;;English;employee_hne-image.png;
11 | COM23750;Roger Namur;6579 2 729 51 03;www.sa.eu;Francois;Rue de la Fusée 96;Namur;14598;Belgique;;;English;employee-image.png;
12 | COM15632;Company BV;8988 2436428 ;www.company.com;Thibault;Rue de la poste;Brussel;14589;Belgique;1;;French;employee_jep-image.jpg;
13 | COM12539;John SPRL;56528 221505;www.john.fr;Francois;Downing Street;Paris;14590;FR;1;;English;employee_jgo-image.jpg;
14 | COM20295;Odoo SA;98779 342 19 45 ;www.odoo.com;Francois;La prèle, 3;New york;14591;U.S;1;;English;employee_jod-image.png;
15 | COM7693;Kangoo LTD;1310 3240077;;Francois;Glass AV;Amsterdam;14592;U.S;1;;English;employee_jog-image.jpg;
16 | COM2344;Odoo INC;0465476 392761;;Thibault;Home sweet Home;Mechelen;14593;BE;1;;English;employee_jth-image.png;
17 | COM22501;Mike Corsoft inc;6456444-7070;www.mike.crosfot;Thibault;PO Box 13821;Lyon;14594;FR;;1;English;employee_jve-image.jpg;
18 | COM77238;Odoo Limited;679789 5034221;;Francois;Marnixstraat 21;SF;14595;U.S;;;Dutch;employee_lur-image.png;
19 | COM122648;Thomas sprl;65479845 5275812;;Thibault;Eggelaan 23;Delft;14596;NL;;;English;employee_mit-image.png;
20 | ;Bois&co;654798 50 67 08 72;;Francois;J en M Sabbestraat 11;Marche-en-famenne;14597;Belgique;1;;English;;employee_ngh-image.jpg
21 | COM237540;;6579 2 729 51 03;www.verylongwebsitename-tocause-check-length-failed.eu;Francois;Rue de la Fusée 96;Namur;14598;Belgique;;;English;employee_niv-image.jpg;
22 |
--------------------------------------------------------------------------------
/tests/origin/contact_url.csv:
--------------------------------------------------------------------------------
1 | Company_ID;Company_Name;Phone;www;Account_Manager;address1;city;zip code;country;IsCustomer;IsSupplier;Language;Image;
2 | COM15602;Company BV;8988 2436428 ;www.company.com;Thibault;Rue de la poste;Brussel;14589;Belgique;1;0;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_al-image.jpg;
3 | COM12639;John SPRL;56528 221505;www.john.fr;Francois;Downing Street;Paris;14590;FR;1;;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_chs-image.jpg;
4 | COM20695;Odoo SA;98779 342 19 45 ;www.odoo.com;Francois;La prèle, 3;New york;14591;U.S;1;0;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_djj-image.png;
5 | COM7663;Kangoo LTD;1310 3240077;;Francois;Glass AV;Amsterdam;14592;U.S;1;;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_dzc-image.jpg;
6 | COM23594;Odoo INC;0465476 392761;;Thibault;Home sweet Home;Mechelen;14593;BE;1;;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_fme-image.jpg;
7 | COM25801;Mike Corsoft inc;6456444-7070;www.mike.crosfot;Thibault;PO Box 13821;Lyon;14594;FR;;1;Dutch;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_fpi-image.jpg;
8 | COM7778;Odoo Limited;679789 5034221;;Francois;Marnixstraat 21;SF;14595;U.S;;;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_fp-image.jpg;
9 | COM12268;Thomas sprl;65479845 5275812;;Thibault;Eggelaan 23;Delft;14596;NL;;;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_han-image.png;
10 | COM18683;Bois&co;654798 50 67 08 72;;Francois;J en M Sabbestraat 11;Marche-en-famenne;14597;Belgique;1;;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_hne-image.png;
11 | COM23750;Roger Namur;6579 2 729 51 03;www.sa.eu;Francois;Rue de la Fusée 96;Namur;14598;Belgique;;;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee-image.png;
12 | COM15632;Company BV;8988 2436428 ;www.company.com;Thibault;Rue de la poste;Brussel;14589;Belgique;1;;French;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_jep-image.jpg;
13 | COM12539;John SPRL;56528 221505;www.john.fr;Francois;Downing Street;Paris;14590;FR;1;;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_jgo-image.jpg;
14 | COM20295;Odoo SA;98779 342 19 45 ;www.odoo.com;Francois;La prèle, 3;New york;14591;U.S;1;;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_jod-image.png;
15 | COM7693;Kangoo LTD;1310 3240077;;Francois;Glass AV;Amsterdam;14592;U.S;1;;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_jog-image.jpg;
16 | COM2344;Odoo INC;0465476 392761;;Thibault;Home sweet Home;Mechelen;14593;BE;1;;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_jth-image.png;
17 | COM22501;Mike Corsoft inc;6456444-7070;www.mike.crosfot;Thibault;PO Box 13821;Lyon;14594;FR;;1;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_jve-image.jpg;
18 | COM77238;Odoo Limited;679789 5034221;;Francois;Marnixstraat 21;SF;14595;U.S;;;Dutch;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_lur-image.png;
19 | COM122648;Thomas sprl;65479845 5275812;;Thibault;Eggelaan 23;Delft;14596;NL;;;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_mit-image.png;
20 | ;Bois&co;654798 50 67 08 72;;Francois;J en M Sabbestraat 11;Marche-en-famenne;14597;Belgique;1;;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_ngh-image.jpg
21 | COM237540;;6579 2 729 51 03;www.verylongwebsitename-tocause-check-length-failed.eu;Francois;Rue de la Fusée 96;Namur;14598;Belgique;;;English;https://github.com/tfrancoi/odoo_csv_import/raw/master/tests/origin/img/employee_niv-image.jpg;
22 |
--------------------------------------------------------------------------------
/tests/origin/img/employee-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee-image.png
--------------------------------------------------------------------------------
/tests/origin/img/employee_al-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_al-image.jpg
--------------------------------------------------------------------------------
/tests/origin/img/employee_chs-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_chs-image.jpg
--------------------------------------------------------------------------------
/tests/origin/img/employee_djj-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_djj-image.png
--------------------------------------------------------------------------------
/tests/origin/img/employee_dzc-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_dzc-image.jpg
--------------------------------------------------------------------------------
/tests/origin/img/employee_fme-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_fme-image.jpg
--------------------------------------------------------------------------------
/tests/origin/img/employee_fp-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_fp-image.jpg
--------------------------------------------------------------------------------
/tests/origin/img/employee_fpi-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_fpi-image.jpg
--------------------------------------------------------------------------------
/tests/origin/img/employee_han-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_han-image.png
--------------------------------------------------------------------------------
/tests/origin/img/employee_hne-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_hne-image.png
--------------------------------------------------------------------------------
/tests/origin/img/employee_jep-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_jep-image.jpg
--------------------------------------------------------------------------------
/tests/origin/img/employee_jgo-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_jgo-image.jpg
--------------------------------------------------------------------------------
/tests/origin/img/employee_jod-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_jod-image.png
--------------------------------------------------------------------------------
/tests/origin/img/employee_jog-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_jog-image.jpg
--------------------------------------------------------------------------------
/tests/origin/img/employee_jth-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_jth-image.png
--------------------------------------------------------------------------------
/tests/origin/img/employee_jve-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_jve-image.jpg
--------------------------------------------------------------------------------
/tests/origin/img/employee_lur-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_lur-image.png
--------------------------------------------------------------------------------
/tests/origin/img/employee_mit-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_mit-image.png
--------------------------------------------------------------------------------
/tests/origin/img/employee_ngh-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_ngh-image.jpg
--------------------------------------------------------------------------------
/tests/origin/img/employee_niv-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_niv-image.jpg
--------------------------------------------------------------------------------
/tests/origin/img/employee_qdp-image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_qdp-image.png
--------------------------------------------------------------------------------
/tests/origin/img/employee_stw-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_stw-image.jpg
--------------------------------------------------------------------------------
/tests/origin/img/employee_vad-image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tfrancoi/odoo_csv_import/6951f11b5e064178eeedc2bdec3c15eaa6aeac95/tests/origin/img/employee_vad-image.jpg
--------------------------------------------------------------------------------
/tests/origin/res.partner_o2m.csv:
--------------------------------------------------------------------------------
1 | id;name;email;child_ids/name;child_ids/street
2 | test_o2m.test1;test1;test1@test.com;test1_child1;street1
3 | ;;;test1_child2;street2
4 | test_o2m.test2;test2;test2@test.com;test2_child1;street3
5 | ;;;test2_child2;street4
6 | ;;;test2_child3;street5
7 | test_o2m.test3;test3;test3@test.com;test3_child1;street6
8 | ;;;test3_child2;street7
9 | ;;;test3_child3;street8
10 | ;;;test3_child4;street9
11 | ;;;test3_child5;street10
12 | ;;;test3_child6;street11
13 | test_o2m.test4;test4;test4@test.com;test4_child1;street12
14 | ;;;test4_child2;street13
15 | ;;;test4_child3;street14
16 | test_o2m.test5;test5;test5@test.com;test5_child1;street15
17 | ;;;test5_child2;street16
18 | ;;;test5_child3;street17
19 | ;;;test5_child4;street18
20 | ;;;test5_child5;street19
21 | ;;;test5_child6;street20
22 | ;;;test5_child7;street21
23 | ;;;test5_child8;street22
24 | test_o2m.test6;test6;test6@test.com;test6_child1;street23
25 | ;;;test6_child2;street24
26 | ;;;test6_child3;street25
27 | ;;;test6_child4;street26
28 | ;;;test6_child5;street27
29 | ;;;test6_child6;street28
30 | ;;;test6_child7;street29
31 | test_o2m.test7;test7;test7@test.com;test7_child1;street30
32 | ;;;test7_child2;street31
33 | test_o2m.test8;test8;test8@test.com;test7_child1;street32
34 | test_o2m.test9;test9;test9@test.com;test8_child1;street33
35 |
--------------------------------------------------------------------------------
/tests/origin/test_merge1.csv:
--------------------------------------------------------------------------------
1 | name;category
2 | A;A
3 | B;B
4 | C;A
5 | D;B
6 | E;C
--------------------------------------------------------------------------------
/tests/origin/test_merge2.csv:
--------------------------------------------------------------------------------
1 | name;color
2 | A;Red
3 | B;Blue
--------------------------------------------------------------------------------
/tests/test_from_file.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 |
4 | import os
5 | from const import EXEC
6 |
7 | from odoo_csv_tools.lib import mapper, checker
8 | from odoo_csv_tools.lib.transform import Processor
9 |
10 | if len(sys.argv) == 2:
11 | EXEC = sys.argv[1]
12 |
13 | lang_map = {
14 | '': '',
15 | 'French': u'French (BE) / Français (BE)',
16 | 'English': u'English',
17 | 'Dutch': u'Dutch / Nederlands',
18 | }
19 |
20 | country_map = {
21 | 'Belgique': 'base.be',
22 | 'BE': 'base.be',
23 | 'FR': 'base.fr',
24 | 'U.S': 'base.us',
25 | 'US': 'base.us',
26 | 'NL': 'base.nl',
27 | }
28 |
29 | PARTNER_PREFIX = "TEST_PARTNER"
30 |
31 | # STEP 1 : read the needed file(s)
32 | processor = Processor('origin%scontact.csv' % os.sep)
33 | # Print o2o mapping
34 | import pprint
35 |
36 | pprint.pprint(processor.get_o2o_mapping())
37 |
38 | # STEP 2 : Define the mapping for every object to import
39 | mapping = {
40 | 'id': mapper.m2o(PARTNER_PREFIX, 'Company_ID', skip=True),
41 | 'name': mapper.val('Company_Name', skip=True),
42 | 'phone': mapper.val('Phone'),
43 | 'website': mapper.val('www'),
44 | 'street': mapper.val('address1'),
45 | 'city': mapper.val('city'),
46 | 'zip': mapper.val('zip code'),
47 | 'country_id/id': mapper.map_val('country', country_map),
48 | 'company_type': mapper.const('company'),
49 | 'customer': mapper.bool_val('IsCustomer', ['1'], ['0']),
50 | 'supplier': mapper.bool_val('IsSupplier', ['1'], ['0']),
51 | 'lang': mapper.map_val('Language', lang_map),
52 | 'image': mapper.binary("Image", "origin/img/"),
53 | }
54 |
55 | # Step 3: Check data quality (Optional)
56 | processor.check(checker.cell_len_checker(30))
57 | processor.check(checker.id_validity_checker('Company_ID', "COM\d"))
58 | processor.check(checker.line_length_checker(13))
59 | processor.check(checker.line_number_checker(21))
60 |
61 | # Step 4: Process data
62 | processor.process(mapping, 'data%sres.partner.csv' % os.sep, {'worker': 2, 'batch_size': 5}, 'set')
63 |
64 | # Step 5: Define output and import parameter
65 | processor.write_to_file("2_contact_import.sh", python_exe=EXEC, path='../')
66 |
--------------------------------------------------------------------------------
/tests/test_import.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on 14 sept. 2016
3 |
4 | @author: mythrys
5 | """
6 | import random
7 | import sys
8 |
9 | from const import EXEC
10 |
11 | from odoo_csv_tools.lib import mapper
12 | from odoo_csv_tools.lib import transform
13 |
14 | if sys.version_info < (3, 0, 0):
15 | from builtins import range
16 |
17 | if len(sys.argv) == 2:
18 | EXEC = sys.argv[1]
19 |
20 | PARTNER_PREFIX = 'partner_generated'
21 | TAG_PREFIX = 'partner_tag'
22 | output = 'data/res.partner.generated.csv'
23 | tag_output = 'data/res.partner.category.csv'
24 | script = '0_partner_generated.sh'
25 |
26 | tags = ["Tag %s" % i for i in range(0, 100)]
27 |
28 | header = ['id', 'tags']
29 | data = [[str(i), ','.join(tags[random.randint(0, 99)] for i in range(0, 5))] for i in range(0, 200)]
30 |
31 | mapping = {
32 | 'id': mapper.m2o(PARTNER_PREFIX, 'id'),
33 | 'name': mapper.val('id', postprocess=lambda x: "Partner %s" % x),
34 | 'phone': mapper.val('id', postprocess=lambda x: "0032%s" % (int(x) * 11)),
35 | 'website': mapper.val('id', postprocess=lambda x: "http://website-%s.com" % x),
36 | 'street': mapper.val('id', postprocess=lambda x: "Street %s" % x),
37 | 'city': mapper.val('id', postprocess=lambda x: "City %s" % x),
38 | 'zip': mapper.val('id', postprocess=lambda x: ("%s" % x).zfill(6)),
39 | 'country_id/id': mapper.const('base.be'),
40 | 'company_type': mapper.const('company'),
41 | 'customer': mapper.val('id', postprocess=lambda x: str(int(x) % 2)),
42 | 'supplier': mapper.val('id', postprocess=lambda x: str((int(x) + 1) % 2)),
43 | 'lang': mapper.const('English'),
44 | 'category_id/id': mapper.m2m(TAG_PREFIX, 'tags')
45 | }
46 |
47 | tag_mapping = {
48 | 'id': mapper.m2m_id_list(TAG_PREFIX, 'tags'),
49 | 'name': mapper.m2m_value_list('tags'),
50 | 'parent_id/id': mapper.const('base.res_partner_category_0'),
51 | }
52 |
53 | processor = transform.Processor(header=header, data=data)
54 | processor.process(tag_mapping, tag_output, {
55 | 'worker': 1,
56 | 'batch_size': 10,
57 | 'model': 'res.partner.category',
58 | }, m2m=True)
59 | processor.process(mapping, output, {
60 | 'worker': 4,
61 | 'batch_size': 100,
62 | 'model': 'res.partner',
63 | })
64 | processor.write_to_file(script, python_exe=EXEC, path='../', encoding="utf-8-sig")
65 |
--------------------------------------------------------------------------------
/tests/test_merge.py:
--------------------------------------------------------------------------------
1 | '''
2 | Created on 10 dec. 2019
3 |
4 | @author: Thibault Francois
5 | '''
6 | import random
7 |
8 | from odoo_csv_tools.lib import transform
9 |
10 |
11 | processor = transform.Processor(filename='origin/test_merge1.csv')
12 | processor.join_file("origin/test_merge2.csv", "category", "name")
--------------------------------------------------------------------------------
/tests/test_product_v10.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 |
4 | import os
5 | from const import EXEC
6 |
7 | from odoo_csv_tools.lib import mapper
8 | from odoo_csv_tools.lib.transform import ProductProcessorV10
9 |
10 | if len(sys.argv) == 2:
11 | EXEC = sys.argv[1]
12 |
13 | TEMPLATE_PREFIX = "PRODUCT_TEMPLATE"
14 | PRODUCT_PREFIX = "PRODUCT_PRODUCT"
15 | CATEGORY_PREFIX = "PRODUCT_CATEGORY"
16 |
17 | ATTRIBUTE_PREFIX = "PRODUCT_ATTRIBUTE"
18 | ATTRIBUTE_VALUE_PREFIX = "PRODUCT_ATTRIBUTE_VALUE"
19 | ATTRIBUTE_LINE_PREFIX = "PRODUCT_ATTRIBUTE_LINE"
20 |
21 | context = {'create_product_variant': True, 'tracking_disable': True}
22 |
23 | # STEP 1 : read the needed file(s)
24 | processor = ProductProcessorV10('origin%sproduct.csv' % os.sep, delimiter=',')
25 |
26 | # STEP 2 : Category and Parent Category
27 | categ_parent_map = {
28 | 'id': mapper.m2o(CATEGORY_PREFIX, 'categoy'),
29 | 'name': mapper.val('categoy'),
30 | }
31 |
32 | categ_map = {
33 | 'id': mapper.m2o(CATEGORY_PREFIX, 'Sub Category'),
34 | 'parent_id/id': mapper.m2o(CATEGORY_PREFIX, 'categoy'),
35 | 'name': mapper.val('Sub Category'),
36 | }
37 |
38 | processor.process(categ_parent_map, 'data%sproduct.category.parent.csv' % os.sep, {'worker': 1, 'batch_size': 5,
39 | 'model': 'product.category'}, 'set')
40 | processor.process(categ_map, 'data%sproduct.category.csv' % os.sep, {'worker': 1, 'batch_size': 20}, 'set')
41 |
42 | # STEP 3 : Product Template mapping
43 | template_map = {
44 | 'id': mapper.m2o(TEMPLATE_PREFIX, 'ref'),
45 | 'categ_id/id': mapper.m2o(CATEGORY_PREFIX, 'Sub Category'),
46 | 'standard_price': mapper.num('cost'),
47 | 'lst_price': mapper.num('public_price'),
48 | 'default_code': mapper.val('ref'),
49 | 'name': mapper.val('name'),
50 | }
51 | processor.process(template_map, 'data%sproduct.template.csv' % os.sep, {'worker': 4, 'batch_size': 10,
52 | 'context': context}, 'set')
53 |
54 | # STEP 4: Attribute List
55 | attribute_list = ['Color', 'Gender', 'Size_H', 'Size_W']
56 | processor.process_attribute_data(attribute_list, ATTRIBUTE_PREFIX, 'data%sproduct.attribute.csv' % os.sep,
57 | {'worker': 4, 'batch_size': 10,
58 | 'context': context})
59 | # STEP 5: Attribute Value
60 | attribue_value_mapping = {
61 | 'id': mapper.m2m_id_list(ATTRIBUTE_VALUE_PREFIX, *[mapper.concat_field_value_m2m('_', f) for f in attribute_list]),
62 | 'name': mapper.m2m_value_list(*attribute_list),
63 | 'attribute_id/id': mapper.m2m_id_list(ATTRIBUTE_PREFIX, *[mapper.field(f) for f in attribute_list]),
64 | }
65 | processor.process(attribue_value_mapping, 'data%sproduct.attribute.value.csv' % os.sep, {'worker': 3, 'batch_size': 50,
66 | 'context': context,
67 | 'groupby': 'attribute_id/id'},
68 | m2m=True)
69 |
70 | # STEP 6: Attribute Value Line
71 | line_mapping = {
72 | 'id': mapper.m2m_id_list(ATTRIBUTE_LINE_PREFIX,
73 | *[mapper.concat_mapper_all('_', mapper.field(f), mapper.val('ref')) for f in
74 | attribute_list]),
75 | 'product_tmpl_id/id': mapper.m2o(TEMPLATE_PREFIX, 'ref'),
76 | 'attribute_id/id': mapper.m2m_id_list(ATTRIBUTE_PREFIX, *[mapper.field(f) for f in attribute_list]),
77 | 'value_ids/id': mapper.m2m_id_list(ATTRIBUTE_VALUE_PREFIX,
78 | *[mapper.concat_field_value_m2m('_', f) for f in attribute_list]),
79 | }
80 | context['update_many2many'] = True
81 | processor.process(line_mapping, 'data%sproduct.attribute.line.csv' % os.sep, {'worker': 3, 'batch_size': 50,
82 | 'context': dict(context),
83 | 'groupby': 'product_tmpl_id/id'},
84 | m2m=True)
85 | context.pop('update_many2many')
86 |
87 | # STEP 7: Product Variant
88 | product_mapping = {
89 | 'id': mapper.m2o_map(PRODUCT_PREFIX, mapper.concat('_', 'barcode', 'Color', 'Gender', 'Size_H', 'Size_W'),
90 | skip=True),
91 | 'barcode': mapper.val('barcode'),
92 | 'product_tmpl_id/id': mapper.m2o(TEMPLATE_PREFIX, 'ref'),
93 | 'attribute_value_ids/id': mapper.m2m_attribute_value(ATTRIBUTE_VALUE_PREFIX, 'Color', 'Gender', 'Size_H', 'Size_W'),
94 | 'default_code': mapper.val('ref'),
95 | 'standard_price': mapper.num('cost'),
96 | }
97 | processor.process(product_mapping, 'data%sproduct.product.csv' % os.sep, {'worker': 3, 'batch_size': 50,
98 | 'groupby': 'product_tmpl_id/id',
99 | 'context': context}, 'set')
100 |
101 | # #Step 8: Define output and import parameter
102 | processor.write_to_file("4_product_import.sh", python_exe=EXEC, path='../')
103 |
--------------------------------------------------------------------------------
/tests/test_product_v9.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import sys
3 |
4 | import os
5 | from const import EXEC
6 |
7 | from odoo_csv_tools.lib import mapper
8 | from odoo_csv_tools.lib.transform import ProductProcessorV9
9 |
10 | if len(sys.argv) == 2:
11 | EXEC = sys.argv[1]
12 |
13 | TEMPLATE_PREFIX = "PRODUCT_TEMPLATE"
14 | PRODUCT_PREFIX = "PRODUCT_PRODUCT"
15 | CATEGORY_PREFIX = "PRODUCT_CATEGORY"
16 |
17 | ATTRIBUTE_PREFIX = "PRODUCT_ATTRIBUTE"
18 | ATTRIBUTE_VALUE_PREFIX = "PRODUCT_ATTRIBUTE_VALUE"
19 | # Define the context that will be used
20 | context = {'create_product_variant': True, 'tracking_disable': True}
21 |
22 | # STEP 1 : read the needed file(s)
23 | processor = ProductProcessorV9('origin%sproduct.csv' % os.sep, delimiter=',')
24 |
25 | # STEP 2 : Category and Parent Category
26 | categ_parent_map = {
27 | 'id': mapper.m2o(CATEGORY_PREFIX, 'categoy'),
28 | 'name': mapper.val('categoy'),
29 | }
30 |
31 | categ_map = {
32 | 'id': mapper.m2o(CATEGORY_PREFIX, 'Sub Category'),
33 | 'parent_id/id': mapper.m2o(CATEGORY_PREFIX, 'categoy'),
34 | 'name': mapper.val('Sub Category'),
35 | }
36 |
37 | processor.process(categ_parent_map, 'data%sproduct.category.parent.csv' % os.sep,
38 | {'worker': 1, 'batch_size': 5, 'model': 'product.category'}, 'set')
39 | processor.process(categ_map, 'data%sproduct.category.csv' % os.sep, {'worker': 1, 'batch_size': 20}, 'set')
40 |
41 | # STEP 3 : Product Template mapping
42 | template_map = {
43 | 'id': mapper.m2o(TEMPLATE_PREFIX, 'ref'),
44 | 'categ_id/id': mapper.m2o(CATEGORY_PREFIX, 'Sub Category'),
45 | 'standard_price': mapper.num('cost'),
46 | 'lst_price': mapper.num('public_price'),
47 | 'default_code': mapper.val('ref'),
48 | 'name': mapper.val('name'),
49 | }
50 | processor.process(template_map, 'data%sproduct.template.csv' % os.sep,
51 | {'worker': 4, 'batch_size': 10, 'context': context}, 'set')
52 |
53 | # STEP 4: Attribute List
54 | attribute_list = ['Color', 'Gender', 'Size_H', 'Size_W']
55 | attribue_value_mapping = {
56 | 'id': mapper.m2o_att(ATTRIBUTE_VALUE_PREFIX, attribute_list), # TODO
57 | 'name': mapper.val_att(attribute_list), # TODO
58 | 'attribute_id/id': mapper.m2o_att_name(ATTRIBUTE_PREFIX, attribute_list),
59 | }
60 |
61 | line_mapping = {
62 | 'product_tmpl_id/id': mapper.m2o(TEMPLATE_PREFIX, 'ref'),
63 | 'attribute_id/id': mapper.m2o_att_name(ATTRIBUTE_PREFIX, attribute_list),
64 | 'value_ids/id': mapper.m2o_att(ATTRIBUTE_VALUE_PREFIX, attribute_list) # TODO
65 | }
66 | processor.process_attribute_mapping(attribue_value_mapping, line_mapping, attribute_list, ATTRIBUTE_PREFIX, 'data/',
67 | {'worker': 3, 'batch_size': 50, 'context': context})
68 |
69 | # STEP 5: Product Variant
70 | product_mapping = {
71 | 'id': mapper.m2o_map(PRODUCT_PREFIX, mapper.concat('_', 'barcode', 'Color', 'Gender', 'Size_H', 'Size_W'),
72 | skip=True),
73 | 'barcode': mapper.val('barcode'),
74 | 'product_tmpl_id/id': mapper.m2o(TEMPLATE_PREFIX, 'ref'),
75 | 'attribute_value_ids/id': mapper.m2m_attribute_value(ATTRIBUTE_VALUE_PREFIX, 'Color', 'Gender', 'Size_H', 'Size_W'),
76 | }
77 | processor.process(product_mapping, 'data%sproduct.product.csv' % os.sep,
78 | {'worker': 3, 'batch_size': 50, 'groupby': 'product_tmpl_id/id', 'context': context}, 'set')
79 |
80 | # Step 6: Define output and import parameter
81 | processor.write_to_file("3_product_import.sh", python_exe=EXEC, path='../')
82 |
--------------------------------------------------------------------------------
/tests/test_split.py:
--------------------------------------------------------------------------------
1 | '''
2 | Created on 14 sept. 2016
3 |
4 | @author: mythrys
5 | '''
6 | import random
7 |
8 | from odoo_csv_tools.lib import mapper
9 | from odoo_csv_tools.lib import transform
10 |
11 | PARTNER_PREFIX = 'partner_generated'
12 | TAG_PREFIX = 'partner_tag'
13 | output = 'data/res.partner.generated.csv'
14 | tag_output = 'data/res.partner.category.csv'
15 | script = '1_partner_split.sh'
16 |
17 | tags = ["Tag %s" % i for i in range(0, 100)]
18 |
19 | header = ['id', 'tags']
20 | data = [[str(i), ','.join(tags[random.randint(0, 99)] for i in range(0, 5))] for i in range(0, 10000)]
21 |
22 | mapping = {
23 | 'id': mapper.m2o(PARTNER_PREFIX, 'id'),
24 | 'name': mapper.val('id', postprocess=lambda x: "Partner %s" % x),
25 | 'phone': mapper.val('id', postprocess=lambda x: "0032%s" % (int(x) * 11)),
26 | 'website': mapper.val('id', postprocess=lambda x: "http://website-%s.com" % x),
27 | 'street': mapper.val('id', postprocess=lambda x: "Street %s" % x),
28 | 'city': mapper.val('id', postprocess=lambda x: "City %s" % x),
29 | 'zip': mapper.val('id', postprocess=lambda x: ("%s" % x).zfill(6)),
30 | 'country_id/id': mapper.const('base.be'),
31 | 'company_type': mapper.const('company'),
32 | 'customer': mapper.val('id', postprocess=lambda x: str(int(x) % 2)),
33 | 'supplier': mapper.val('id', postprocess=lambda x: str((int(x) + 1) % 2)),
34 | 'lang': mapper.const('English'),
35 | 'category_id/id': mapper.m2m(TAG_PREFIX, 'tags')
36 | }
37 |
38 | tag_mapping = {
39 | 'id': mapper.m2m_id_list(TAG_PREFIX, 'tags'),
40 | 'name': mapper.m2m_value_list('tags'),
41 | 'parent_id/id': mapper.const('base.res_partner_category_0'),
42 | }
43 |
44 | processor = transform.Processor(header=header, data=data)
45 | p_dict = processor.split(mapper.split_line_number(1000)) # Useless just for coverage
46 | p_dict = processor.split(mapper.split_file_number(8))
47 | processor.process(tag_mapping, tag_output, {
48 | 'worker': 1, # OPTIONAL
49 | 'batch_size': 10, # OPTIONAL
50 | 'model': 'res.partner.category',
51 | }, m2m=True)
52 | processor.write_to_file(script, path='../')
53 | for index, p in p_dict.items():
54 | p.process(mapping, '%s.%s' % (output, index), {
55 | 'worker': 4, # OPTIONAL
56 | 'batch_size': 100, # OPTIONAL
57 | 'model': 'res.partner',
58 | })
59 | p.write_to_file(script, path='../', append=True)
60 |
--------------------------------------------------------------------------------