├── .readthedocs.yaml
├── LICENSE
├── README.md
├── deeph
├── __init__.py
├── data.py
├── default.ini
├── from_HermNet
│ ├── __init__.py
│ ├── license.txt
│ └── rmnet.py
├── from_PyG_future
│ ├── __init__.py
│ ├── diff_group_norm.py
│ ├── graph_norm.py
│ └── license.txt
├── from_pymatgen
│ ├── __init__.py
│ ├── lattice.py
│ └── license.txt
├── from_schnetpack
│ ├── __init__.py
│ ├── acsf.py
│ └── license.txt
├── from_se3_transformer
│ ├── __init__.py
│ ├── license.txt
│ └── representations.py
├── graph.py
├── inference
│ ├── __init__.py
│ ├── band_config.json
│ ├── dense_calc.jl
│ ├── dense_calc.py
│ ├── inference_default.ini
│ ├── local_coordinate.jl
│ ├── pred_ham.py
│ ├── restore_blocks.jl
│ └── sparse_calc.jl
├── kernel.py
├── model.py
├── preprocess
│ ├── __init__.py
│ ├── abacus_get_data.py
│ ├── aims_get_data.jl
│ ├── get_rc.py
│ ├── openmx_get_data.jl
│ ├── openmx_parse.py
│ ├── periodic_table.json
│ ├── preprocess_default.ini
│ └── siesta_get_data.py
├── rotate.py
├── scripts
│ ├── __init__.py
│ ├── evaluate.py
│ ├── inference.py
│ ├── preprocess.py
│ └── train.py
└── utils.py
├── docs
├── Makefile
├── make.bat
├── requirements.txt
└── source
│ ├── _static
│ ├── logo-white.png
│ └── logo.png
│ ├── conf.py
│ ├── dataset
│ └── dataset.rst
│ ├── demo
│ ├── demo1.md
│ ├── demo2.md
│ └── demo3.md
│ ├── index.rst
│ ├── inference
│ └── inference.md
│ ├── installation
│ ├── abacus.md
│ ├── aims.md
│ ├── installation.rst
│ ├── openmx.md
│ └── siesta.md
│ ├── keyword
│ ├── inference.md
│ ├── keyword.rst
│ ├── preprocess.md
│ └── train.md
│ ├── preprocess
│ └── preprocess.md
│ └── train
│ └── train.md
├── gen_example.py
├── ini
├── MoS2_1.ini
├── MoS2_2.ini
├── MoS2_3.ini
├── MoS2_4.ini
├── TBB.ini
├── TBG.ini
└── graphene.ini
├── logo
└── logo_word.svg
├── setup.py
└── tools
└── get_all_orbital_str.py
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | build:
4 | os: "ubuntu-20.04"
5 | tools:
6 | python: "3.9"
7 |
8 | sphinx:
9 | configuration: docs/source/conf.py
10 |
11 | python:
12 | install:
13 | - requirements: docs/requirements.txt
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU LESSER GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 |
9 | This version of the GNU Lesser General Public License incorporates
10 | the terms and conditions of version 3 of the GNU General Public
11 | License, supplemented by the additional permissions listed below.
12 |
13 | 0. Additional Definitions.
14 |
15 | As used herein, "this License" refers to version 3 of the GNU Lesser
16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU
17 | General Public License.
18 |
19 | "The Library" refers to a covered work governed by this License,
20 | other than an Application or a Combined Work as defined below.
21 |
22 | An "Application" is any work that makes use of an interface provided
23 | by the Library, but which is not otherwise based on the Library.
24 | Defining a subclass of a class defined by the Library is deemed a mode
25 | of using an interface provided by the Library.
26 |
27 | A "Combined Work" is a work produced by combining or linking an
28 | Application with the Library. The particular version of the Library
29 | with which the Combined Work was made is also called the "Linked
30 | Version".
31 |
32 | The "Minimal Corresponding Source" for a Combined Work means the
33 | Corresponding Source for the Combined Work, excluding any source code
34 | for portions of the Combined Work that, considered in isolation, are
35 | based on the Application, and not on the Linked Version.
36 |
37 | The "Corresponding Application Code" for a Combined Work means the
38 | object code and/or source code for the Application, including any data
39 | and utility programs needed for reproducing the Combined Work from the
40 | Application, but excluding the System Libraries of the Combined Work.
41 |
42 | 1. Exception to Section 3 of the GNU GPL.
43 |
44 | You may convey a covered work under sections 3 and 4 of this License
45 | without being bound by section 3 of the GNU GPL.
46 |
47 | 2. Conveying Modified Versions.
48 |
49 | If you modify a copy of the Library, and, in your modifications, a
50 | facility refers to a function or data to be supplied by an Application
51 | that uses the facility (other than as an argument passed when the
52 | facility is invoked), then you may convey a copy of the modified
53 | version:
54 |
55 | a) under this License, provided that you make a good faith effort to
56 | ensure that, in the event an Application does not supply the
57 | function or data, the facility still operates, and performs
58 | whatever part of its purpose remains meaningful, or
59 |
60 | b) under the GNU GPL, with none of the additional permissions of
61 | this License applicable to that copy.
62 |
63 | 3. Object Code Incorporating Material from Library Header Files.
64 |
65 | The object code form of an Application may incorporate material from
66 | a header file that is part of the Library. You may convey such object
67 | code under terms of your choice, provided that, if the incorporated
68 | material is not limited to numerical parameters, data structure
69 | layouts and accessors, or small macros, inline functions and templates
70 | (ten or fewer lines in length), you do both of the following:
71 |
72 | a) Give prominent notice with each copy of the object code that the
73 | Library is used in it and that the Library and its use are
74 | covered by this License.
75 |
76 | b) Accompany the object code with a copy of the GNU GPL and this license
77 | document.
78 |
79 | 4. Combined Works.
80 |
81 | You may convey a Combined Work under terms of your choice that,
82 | taken together, effectively do not restrict modification of the
83 | portions of the Library contained in the Combined Work and reverse
84 | engineering for debugging such modifications, if you also do each of
85 | the following:
86 |
87 | a) Give prominent notice with each copy of the Combined Work that
88 | the Library is used in it and that the Library and its use are
89 | covered by this License.
90 |
91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license
92 | document.
93 |
94 | c) For a Combined Work that displays copyright notices during
95 | execution, include the copyright notice for the Library among
96 | these notices, as well as a reference directing the user to the
97 | copies of the GNU GPL and this license document.
98 |
99 | d) Do one of the following:
100 |
101 | 0) Convey the Minimal Corresponding Source under the terms of this
102 | License, and the Corresponding Application Code in a form
103 | suitable for, and under terms that permit, the user to
104 | recombine or relink the Application with a modified version of
105 | the Linked Version to produce a modified Combined Work, in the
106 | manner specified by section 6 of the GNU GPL for conveying
107 | Corresponding Source.
108 |
109 | 1) Use a suitable shared library mechanism for linking with the
110 | Library. A suitable mechanism is one that (a) uses at run time
111 | a copy of the Library already present on the user's computer
112 | system, and (b) will operate properly with a modified version
113 | of the Library that is interface-compatible with the Linked
114 | Version.
115 |
116 | e) Provide Installation Information, but only if you would otherwise
117 | be required to provide such information under section 6 of the
118 | GNU GPL, and only to the extent that such information is
119 | necessary to install and execute a modified version of the
120 | Combined Work produced by recombining or relinking the
121 | Application with a modified version of the Linked Version. (If
122 | you use option 4d0, the Installation Information must accompany
123 | the Minimal Corresponding Source and Corresponding Application
124 | Code. If you use option 4d1, you must provide the Installation
125 | Information in the manner specified by section 6 of the GNU GPL
126 | for conveying Corresponding Source.)
127 |
128 | 5. Combined Libraries.
129 |
130 | You may place library facilities that are a work based on the
131 | Library side by side in a single library together with other library
132 | facilities that are not Applications and are not covered by this
133 | License, and convey such a combined library under terms of your
134 | choice, if you do both of the following:
135 |
136 | a) Accompany the combined library with a copy of the same work based
137 | on the Library, uncombined with any other library facilities,
138 | conveyed under the terms of this License.
139 |
140 | b) Give prominent notice with the combined library that part of it
141 | is a work based on the Library, and explaining where to find the
142 | accompanying uncombined form of the same work.
143 |
144 | 6. Revised Versions of the GNU Lesser General Public License.
145 |
146 | The Free Software Foundation may publish revised and/or new versions
147 | of the GNU Lesser General Public License from time to time. Such new
148 | versions will be similar in spirit to the present version, but may
149 | differ in detail to address new problems or concerns.
150 |
151 | Each version is given a distinguishing version number. If the
152 | Library as you received it specifies that a certain numbered version
153 | of the GNU Lesser General Public License "or any later version"
154 | applies to it, you have the option of following the terms and
155 | conditions either of that published version or of any later version
156 | published by the Free Software Foundation. If the Library as you
157 | received it does not specify a version number of the GNU Lesser
158 | General Public License, you may choose any version of the GNU Lesser
159 | General Public License ever published by the Free Software Foundation.
160 |
161 | If the Library as you received it specifies that a proxy can decide
162 | whether future versions of the GNU Lesser General Public License shall
163 | apply, that proxy's public statement of acceptance of any version is
164 | permanent authorization for you to choose that version for the
165 | Library.
--------------------------------------------------------------------------------
/deeph/__init__.py:
--------------------------------------------------------------------------------
1 | from .data import HData
2 | from .model import HGNN, ExpBernsteinBasis
3 | from .utils import print_args, Logger, MaskMSELoss, MaskMAELoss, write_ham_npz, write_ham, write_ham_h5, get_config, \
4 | get_inference_config, get_preprocess_config
5 | from .graph import Collater, collate_fn, get_graph, load_orbital_types
6 | from .kernel import DeepHKernel
7 | from .preprocess import get_rc, OijLoad, GetEEiEij, abacus_parse, siesta_parse
8 | from .rotate import get_rh, rotate_back, Rotate, dtype_dict
9 |
10 | __version__ = "0.2.2"
11 |
--------------------------------------------------------------------------------
/deeph/data.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | import os
3 | import time
4 | import tqdm
5 |
6 | from pymatgen.core.structure import Structure
7 | import numpy as np
8 | import torch
9 | from torch_geometric.data import InMemoryDataset
10 | from pathos.multiprocessing import ProcessingPool as Pool
11 |
12 | from .graph import get_graph
13 |
14 |
15 | class HData(InMemoryDataset):
16 | def __init__(self, raw_data_dir: str, graph_dir: str, interface: str, target: str,
17 | dataset_name: str, multiprocessing: int, radius, max_num_nbr,
18 | num_l, max_element, create_from_DFT, if_lcmp_graph, separate_onsite, new_sp,
19 | default_dtype_torch, nums: int = None, transform=None, pre_transform=None, pre_filter=None):
20 | """
21 | when interface == 'h5',
22 | raw_data_dir
23 | ├── 00
24 | │ ├──rh.h5 / rdm.h5
25 | │ ├──rc.h5
26 | │ ├──element.dat
27 | │ ├──orbital_types.dat
28 | │ ├──site_positions.dat
29 | │ ├──lat.dat
30 | │ └──info.json
31 | ├── 01
32 | │ ├──rh.h5 / rdm.h5
33 | │ ├──rc.h5
34 | │ ├──element.dat
35 | │ ├──orbital_types.dat
36 | │ ├──site_positions.dat
37 | │ ├──lat.dat
38 | │ └──info.json
39 | ├── 02
40 | │ ├──rh.h5 / rdm.h5
41 | │ ├──rc.h5
42 | │ ├──element.dat
43 | │ ├──orbital_types.dat
44 | │ ├──site_positions.dat
45 | │ ├──lat.dat
46 | │ └──info.json
47 | ├── ...
48 | """
49 | self.raw_data_dir = raw_data_dir
50 | assert dataset_name.find('-') == -1, '"-" can not be included in the dataset name'
51 | if create_from_DFT:
52 | way_create_graph = 'FromDFT'
53 | else:
54 | way_create_graph = f'{radius}r{max_num_nbr}mn'
55 | if if_lcmp_graph:
56 | lcmp_str = f'{num_l}l'
57 | else:
58 | lcmp_str = 'WithoutLCMP'
59 | if separate_onsite is True:
60 | onsite_str = '-SeparateOnsite'
61 | else:
62 | onsite_str = ''
63 | if new_sp:
64 | new_sp_str = '-NewSP'
65 | else:
66 | new_sp_str = ''
67 | if target == 'hamiltonian':
68 | title = 'HGraph'
69 | else:
70 | raise ValueError('Unknown prediction target: {}'.format(target))
71 | graph_file_name = f'{title}-{interface}-{dataset_name}-{lcmp_str}-{way_create_graph}{onsite_str}{new_sp_str}.pkl'
72 | self.data_file = os.path.join(graph_dir, graph_file_name)
73 | os.makedirs(graph_dir, exist_ok=True)
74 | self.data, self.slices = None, None
75 | self.interface = interface
76 | self.target = target
77 | self.dataset_name = dataset_name
78 | self.multiprocessing = multiprocessing
79 | self.radius = radius
80 | self.max_num_nbr = max_num_nbr
81 | self.num_l = num_l
82 | self.create_from_DFT = create_from_DFT
83 | self.if_lcmp_graph = if_lcmp_graph
84 | self.separate_onsite = separate_onsite
85 | self.new_sp = new_sp
86 | self.default_dtype_torch = default_dtype_torch
87 |
88 | self.nums = nums
89 | self.transform = transform
90 | self.pre_transform = pre_transform
91 | self.pre_filter = pre_filter
92 | self.__indices__ = None
93 | self.__data_list__ = None
94 | self._indices = None
95 | self._data_list = None
96 |
97 | print(f'Graph data file: {graph_file_name}')
98 | if os.path.exists(self.data_file):
99 | print('Use existing graph data file')
100 | else:
101 | print('Process new data file......')
102 | self.process()
103 | begin = time.time()
104 | try:
105 | loaded_data = torch.load(self.data_file)
106 | except AttributeError:
107 | raise RuntimeError('Error in loading graph data file, try to delete it and generate the graph file with the current version of PyG')
108 | if len(loaded_data) == 2:
109 | warnings.warn('You are using the graph data file with an old version')
110 | self.data, self.slices = loaded_data
111 | self.info = {
112 | "spinful": False,
113 | "index_to_Z": torch.arange(max_element + 1),
114 | "Z_to_index": torch.arange(max_element + 1),
115 | }
116 | elif len(loaded_data) == 3:
117 | self.data, self.slices, tmp = loaded_data
118 | if isinstance(tmp, dict):
119 | self.info = tmp
120 | print(f"Atomic types: {self.info['index_to_Z'].tolist()}")
121 | else:
122 | warnings.warn('You are using an old version of the graph data file')
123 | self.info = {
124 | "spinful": tmp,
125 | "index_to_Z": torch.arange(max_element + 1),
126 | "Z_to_index": torch.arange(max_element + 1),
127 | }
128 | print(f'Finish loading the processed {len(self)} structures (spinful: {self.info["spinful"]}, '
129 | f'the number of atomic types: {len(self.info["index_to_Z"])}), cost {time.time() - begin:.0f} seconds')
130 |
131 | def process_worker(self, folder, **kwargs):
132 | stru_id = os.path.split(folder)[-1]
133 |
134 | structure = Structure(np.loadtxt(os.path.join(folder, 'lat.dat')).T,
135 | np.loadtxt(os.path.join(folder, 'element.dat')),
136 | np.loadtxt(os.path.join(folder, 'site_positions.dat')).T,
137 | coords_are_cartesian=True,
138 | to_unit_cell=False)
139 |
140 | cart_coords = torch.tensor(structure.cart_coords, dtype=self.default_dtype_torch)
141 | frac_coords = torch.tensor(structure.frac_coords, dtype=self.default_dtype_torch)
142 | numbers = torch.tensor(structure.atomic_numbers)
143 | structure.lattice.matrix.setflags(write=True)
144 | lattice = torch.tensor(structure.lattice.matrix, dtype=self.default_dtype_torch)
145 | if self.target == 'E_ij':
146 | huge_structure = True
147 | else:
148 | huge_structure = False
149 | return get_graph(cart_coords, frac_coords, numbers, stru_id, r=self.radius, max_num_nbr=self.max_num_nbr,
150 | numerical_tol=1e-8, lattice=lattice, default_dtype_torch=self.default_dtype_torch,
151 | tb_folder=folder, interface=self.interface, num_l=self.num_l,
152 | create_from_DFT=self.create_from_DFT, if_lcmp_graph=self.if_lcmp_graph,
153 | separate_onsite=self.separate_onsite,
154 | target=self.target, huge_structure=huge_structure, if_new_sp=self.new_sp, **kwargs)
155 |
156 | def process(self):
157 | begin = time.time()
158 | folder_list = []
159 | for root, dirs, files in os.walk(self.raw_data_dir):
160 | if (self.interface == 'h5' and 'rc.h5' in files) or (
161 | self.interface == 'npz' and 'rc.npz' in files):
162 | folder_list.append(root)
163 | folder_list = sorted(folder_list)
164 | folder_list = folder_list[: self.nums]
165 | if self.dataset_name == 'graphene_450':
166 | folder_list = folder_list[500:5000:10]
167 | if self.dataset_name == 'graphene_1500':
168 | folder_list = folder_list[500:5000:3]
169 | if self.dataset_name == 'bp_bilayer':
170 | folder_list = folder_list[:600]
171 | assert len(folder_list) != 0, "Can not find any structure"
172 | print('Found %d structures, have cost %d seconds' % (len(folder_list), time.time() - begin))
173 |
174 | if self.multiprocessing == 0:
175 | print(f'Use multiprocessing (nodes = num_processors x num_threads = 1 x {torch.get_num_threads()})')
176 | data_list = [self.process_worker(folder) for folder in tqdm.tqdm(folder_list)]
177 | else:
178 | pool_dict = {} if self.multiprocessing < 0 else {'nodes': self.multiprocessing}
179 | # BS (2023.06.06):
180 | # The keyword "num_threads" in kernel.py can be used to set the torch threads.
181 | # The multiprocessing in the "process_worker" is in contradiction with the num_threads utilized in torch.
182 | # To avoid this conflict, I limit the number of torch threads to one,
183 | # and recover it when finishing the process_worker.
184 | torch_num_threads = torch.get_num_threads()
185 | torch.set_num_threads(1)
186 |
187 | with Pool(**pool_dict) as pool:
188 | nodes = pool.nodes
189 | print(f'Use multiprocessing (nodes = num_processors x num_threads = {nodes} x {torch.get_num_threads()})')
190 | data_list = list(tqdm.tqdm(pool.imap(self.process_worker, folder_list), total=len(folder_list)))
191 | torch.set_num_threads(torch_num_threads)
192 | print('Finish processing %d structures, have cost %d seconds' % (len(data_list), time.time() - begin))
193 |
194 | if self.pre_filter is not None:
195 | data_list = [d for d in data_list if self.pre_filter(d)]
196 | if self.pre_transform is not None:
197 | data_list = [self.pre_transform(d) for d in data_list]
198 |
199 | index_to_Z, Z_to_index = self.element_statistics(data_list)
200 | spinful = data_list[0].spinful
201 | for d in data_list:
202 | assert spinful == d.spinful
203 |
204 | data, slices = self.collate(data_list)
205 | torch.save((data, slices, dict(spinful=spinful, index_to_Z=index_to_Z, Z_to_index=Z_to_index)), self.data_file)
206 | print('Finish saving %d structures to %s, have cost %d seconds' % (
207 | len(data_list), self.data_file, time.time() - begin))
208 |
209 | def element_statistics(self, data_list):
210 | index_to_Z, inverse_indices = torch.unique(data_list[0].x, sorted=True, return_inverse=True)
211 | Z_to_index = torch.full((100,), -1, dtype=torch.int64)
212 | Z_to_index[index_to_Z] = torch.arange(len(index_to_Z))
213 |
214 | for data in data_list:
215 | data.x = Z_to_index[data.x]
216 |
217 | return index_to_Z, Z_to_index
218 |
--------------------------------------------------------------------------------
/deeph/default.ini:
--------------------------------------------------------------------------------
1 | [basic]
2 | graph_dir = /your/own/path
3 | save_dir = /your/own/path
4 | raw_dir = /your/own/path
5 | dataset_name = your_own_name
6 | only_get_graph = False
7 | ;choices = ['h5', 'npz']
8 | interface = h5
9 | target = hamiltonian
10 | disable_cuda = False
11 | device = cuda:0
12 | ;-1 for cpu_count(logical=False) // torch.cuda.device_count()
13 | num_threads = -1
14 | save_to_time_folder = True
15 | save_csv = False
16 | tb_writer = True
17 | seed = 42
18 | multiprocessing = 0
19 | orbital = [{"6 6": [0, 0]}, {"6 6": [0, 1]}, {"6 6": [0, 2]}, {"6 6": [0, 3]}, {"6 6": [0, 4]}, {"6 6": [0, 5]}, {"6 6": [0, 6]}, {"6 6": [0, 7]}, {"6 6": [0, 8]}, {"6 6": [0, 9]}, {"6 6": [0, 10]}, {"6 6": [0, 11]}, {"6 6": [0, 12]}, {"6 6": [1, 0]}, {"6 6": [1, 1]}, {"6 6": [1, 2]}, {"6 6": [1, 3]}, {"6 6": [1, 4]}, {"6 6": [1, 5]}, {"6 6": [1, 6]}, {"6 6": [1, 7]}, {"6 6": [1, 8]}, {"6 6": [1, 9]}, {"6 6": [1, 10]}, {"6 6": [1, 11]}, {"6 6": [1, 12]}, {"6 6": [2, 0]}, {"6 6": [2, 1]}, {"6 6": [2, 2]}, {"6 6": [2, 3]}, {"6 6": [2, 4]}, {"6 6": [2, 5]}, {"6 6": [2, 6]}, {"6 6": [2, 7]}, {"6 6": [2, 8]}, {"6 6": [2, 9]}, {"6 6": [2, 10]}, {"6 6": [2, 11]}, {"6 6": [2, 12]}, {"6 6": [3, 0]}, {"6 6": [3, 1]}, {"6 6": [3, 2]}, {"6 6": [3, 3]}, {"6 6": [3, 4]}, {"6 6": [3, 5]}, {"6 6": [3, 6]}, {"6 6": [3, 7]}, {"6 6": [3, 8]}, {"6 6": [3, 9]}, {"6 6": [3, 10]}, {"6 6": [3, 11]}, {"6 6": [3, 12]}, {"6 6": [4, 0]}, {"6 6": [4, 1]}, {"6 6": [4, 2]}, {"6 6": [4, 3]}, {"6 6": [4, 4]}, {"6 6": [4, 5]}, {"6 6": [4, 6]}, {"6 6": [4, 7]}, {"6 6": [4, 8]}, {"6 6": [4, 9]}, {"6 6": [4, 10]}, {"6 6": [4, 11]}, {"6 6": [4, 12]}, {"6 6": [5, 0]}, {"6 6": [5, 1]}, {"6 6": [5, 2]}, {"6 6": [5, 3]}, {"6 6": [5, 4]}, {"6 6": [5, 5]}, {"6 6": [5, 6]}, {"6 6": [5, 7]}, {"6 6": [5, 8]}, {"6 6": [5, 9]}, {"6 6": [5, 10]}, {"6 6": [5, 11]}, {"6 6": [5, 12]}, {"6 6": [6, 0]}, {"6 6": [6, 1]}, {"6 6": [6, 2]}, {"6 6": [6, 3]}, {"6 6": [6, 4]}, {"6 6": [6, 5]}, {"6 6": [6, 6]}, {"6 6": [6, 7]}, {"6 6": [6, 8]}, {"6 6": [6, 9]}, {"6 6": [6, 10]}, {"6 6": [6, 11]}, {"6 6": [6, 12]}, {"6 6": [7, 0]}, {"6 6": [7, 1]}, {"6 6": [7, 2]}, {"6 6": [7, 3]}, {"6 6": [7, 4]}, {"6 6": [7, 5]}, {"6 6": [7, 6]}, {"6 6": [7, 7]}, {"6 6": [7, 8]}, {"6 6": [7, 9]}, {"6 6": [7, 10]}, {"6 6": [7, 11]}, {"6 6": [7, 12]}, {"6 6": [8, 0]}, {"6 6": [8, 1]}, {"6 6": [8, 2]}, {"6 6": [8, 3]}, {"6 6": [8, 4]}, {"6 6": [8, 5]}, {"6 6": [8, 6]}, {"6 6": [8, 7]}, {"6 6": [8, 8]}, {"6 6": [8, 9]}, {"6 6": [8, 10]}, {"6 6": [8, 11]}, {"6 6": [8, 12]}, {"6 6": [9, 0]}, {"6 6": [9, 1]}, {"6 6": [9, 2]}, {"6 6": [9, 3]}, {"6 6": [9, 4]}, {"6 6": [9, 5]}, {"6 6": [9, 6]}, {"6 6": [9, 7]}, {"6 6": [9, 8]}, {"6 6": [9, 9]}, {"6 6": [9, 10]}, {"6 6": [9, 11]}, {"6 6": [9, 12]}, {"6 6": [10, 0]}, {"6 6": [10, 1]}, {"6 6": [10, 2]}, {"6 6": [10, 3]}, {"6 6": [10, 4]}, {"6 6": [10, 5]}, {"6 6": [10, 6]}, {"6 6": [10, 7]}, {"6 6": [10, 8]}, {"6 6": [10, 9]}, {"6 6": [10, 10]}, {"6 6": [10, 11]}, {"6 6": [10, 12]}, {"6 6": [11, 0]}, {"6 6": [11, 1]}, {"6 6": [11, 2]}, {"6 6": [11, 3]}, {"6 6": [11, 4]}, {"6 6": [11, 5]}, {"6 6": [11, 6]}, {"6 6": [11, 7]}, {"6 6": [11, 8]}, {"6 6": [11, 9]}, {"6 6": [11, 10]}, {"6 6": [11, 11]}, {"6 6": [11, 12]}, {"6 6": [12, 0]}, {"6 6": [12, 1]}, {"6 6": [12, 2]}, {"6 6": [12, 3]}, {"6 6": [12, 4]}, {"6 6": [12, 5]}, {"6 6": [12, 6]}, {"6 6": [12, 7]}, {"6 6": [12, 8]}, {"6 6": [12, 9]}, {"6 6": [12, 10]}, {"6 6": [12, 11]}, {"6 6": [12, 12]}]
20 | O_component = H
21 | energy_component = summation
22 | max_element = -1
23 | statistics = False
24 | normalizer = False
25 | boxcox = False
26 |
27 | [graph]
28 | radius = -1.0
29 | max_num_nbr = 0
30 | create_from_DFT = True
31 | if_lcmp_graph = True
32 | separate_onsite = False
33 | new_sp = False
34 |
35 | [train]
36 | epochs = 4000
37 | pretrained =
38 | resume =
39 | train_ratio = 0.6
40 | val_ratio = 0.2
41 | test_ratio = 0.2
42 | early_stopping_loss = 0.0
43 | early_stopping_loss_epoch = [0.000000, 500]
44 | revert_then_decay = True
45 | revert_threshold = 30
46 | revert_decay_epoch = [500, 2000, 3000]
47 | revert_decay_gamma = [0.4, 0.5, 0.5]
48 | clip_grad = True
49 | clip_grad_value = 4.2
50 | switch_sgd = False
51 | switch_sgd_lr = 1e-4
52 | switch_sgd_epoch = -1
53 |
54 | [hyperparameter]
55 | batch_size = 3
56 | dtype = float32
57 | ;choices = ['sgd', 'sgdm', 'adam', 'lbfgs']
58 | optimizer = adam
59 | ;initial learning rate
60 | learning_rate = 0.001
61 | ;choices = ['', 'MultiStepLR', 'ReduceLROnPlateau', 'CyclicLR']
62 | lr_scheduler =
63 | lr_milestones = []
64 | momentum = 0.9
65 | weight_decay = 0
66 | criterion = MaskMSELoss
67 | retain_edge_fea = True
68 | lambda_Eij = 0.0
69 | lambda_Ei = 0.1
70 | lambda_Etot = 0.0
71 |
72 | [network]
73 | atom_fea_len = 64
74 | edge_fea_len = 128
75 | gauss_stop = 6
76 | ;The number of angular quantum numbers that spherical harmonic functions have
77 | num_l = 5
78 | aggr = add
79 | distance_expansion = GaussianBasis
80 | if_exp = True
81 | if_MultipleLinear = False
82 | if_edge_update = True
83 | if_lcmp = True
84 | normalization = LayerNorm
85 | ;choices = ['CGConv', 'GAT', 'PAINN']
86 | atom_update_net = CGConv
87 | trainable_gaussians = False
88 | type_affine = False
89 |
--------------------------------------------------------------------------------
/deeph/from_HermNet/__init__.py:
--------------------------------------------------------------------------------
1 | from .rmnet import RBF, cosine_cutoff, ShiftedSoftplus, _eps
--------------------------------------------------------------------------------
/deeph/from_HermNet/license.txt:
--------------------------------------------------------------------------------
1 | The code in this folder was obtained from "https://github.com/sakuraiiiii/HermNet"
--------------------------------------------------------------------------------
/deeph/from_HermNet/rmnet.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import torch
4 | from torch import nn, Tensor
5 | import numpy as np
6 |
7 |
8 | _eps = 1e-3
9 |
10 | r"""Tricks: Introducing the parameter `_eps` is to avoid NaN.
11 | In HVNet and HTNet, a subgraph will be extracted to calculate angles.
12 | And with all the nodes still be included in the subgraph,
13 | each hidden state in such a subgraph will contain 0 value.
14 | In `painn`, the calculation w.r.t $r / \parallel r \parallel$ will be taken.
15 | If just alternate $r / \parallel r \parallel$ with $r / (\parallel r \parallel + _eps)$,
16 | NaN will still occur in during the training.
17 | Considering the following example,
18 | $$
19 | (\frac{x}{r+_eps})^\prime = \frac{r+b-\frac{x^2}{r}}{(r+b)^2}
20 | $$
21 | where $r = \sqrt{x^2+y^2+z^2}$. It is obvious that NaN will occur.
22 | Thus the solution is change the norm $r$ as $r^\prime = \sqrt(x^2+y^2+z^2+_eps)$.
23 | Since $r$ is rotational invariant, $r^2$ is rotational invariant.
24 | Obviously, $\sqrt(r^2 + _eps)$ is rotational invariant.
25 | """
26 | class RBF(nn.Module):
27 | r"""Radial basis function.
28 | A modified version of feature engineering in `DimeNet`,
29 | which is used in `PAINN`.
30 |
31 | Parameters
32 | ----------
33 | rc : float
34 | Cutoff radius
35 | l : int
36 | Parameter in feature engineering in DimeNet
37 | """
38 | def __init__(self, rc: float, l: int):
39 | super(RBF, self).__init__()
40 | self.rc = rc
41 | self.l = l
42 |
43 | def forward(self, x: Tensor):
44 | ls = torch.arange(1, self.l + 1).float().to(x.device)
45 | norm = torch.sqrt((x ** 2).sum(dim=-1) + _eps).unsqueeze(-1)
46 | return torch.sin(math.pi / self.rc * norm@ls.unsqueeze(0)) / norm
47 |
48 |
49 | class cosine_cutoff(nn.Module):
50 | r"""Cutoff function in https://aip.scitation.org/doi/pdf/10.1063/1.3553717.
51 |
52 | Parameters
53 | ----------
54 | rc : float
55 | Cutoff radius
56 | """
57 | def __init__(self, rc: float):
58 | super(cosine_cutoff, self).__init__()
59 | self.rc = rc
60 |
61 | def forward(self, x: Tensor):
62 | norm = torch.norm(x, dim=-1, keepdim=True) + _eps
63 | return 0.5 * (torch.cos(math.pi * norm / self.rc) + 1)
64 |
65 | class ShiftedSoftplus(nn.Module):
66 | r"""
67 |
68 | Description
69 | -----------
70 | Applies the element-wise function:
71 |
72 | .. math::
73 | \text{SSP}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x)) - \log(\text{shift})
74 |
75 | Attributes
76 | ----------
77 | beta : int
78 | :math:`\beta` value for the mathematical formulation. Default to 1.
79 | shift : int
80 | :math:`\text{shift}` value for the mathematical formulation. Default to 2.
81 | """
82 | def __init__(self, beta=1, shift=2, threshold=20):
83 | super(ShiftedSoftplus, self).__init__()
84 |
85 | self.shift = shift
86 | self.softplus = nn.Softplus(beta=beta, threshold=threshold)
87 |
88 | def forward(self, inputs):
89 | """
90 |
91 | Description
92 | -----------
93 | Applies the activation function.
94 |
95 | Parameters
96 | ----------
97 | inputs : float32 tensor of shape (N, *)
98 | * denotes any number of additional dimensions.
99 |
100 | Returns
101 | -------
102 | float32 tensor of shape (N, *)
103 | Result of applying the activation function to the input.
104 | """
105 | return self.softplus(inputs) - np.log(float(self.shift))
106 |
--------------------------------------------------------------------------------
/deeph/from_PyG_future/__init__.py:
--------------------------------------------------------------------------------
1 | from .graph_norm import GraphNorm
2 | from .diff_group_norm import DiffGroupNorm
3 |
--------------------------------------------------------------------------------
/deeph/from_PyG_future/diff_group_norm.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import Tensor
3 | from torch.nn import Linear, BatchNorm1d
4 |
5 |
6 | class DiffGroupNorm(torch.nn.Module):
7 | r"""The differentiable group normalization layer from the `"Towards Deeper
8 | Graph Neural Networks with Differentiable Group Normalization"
9 | `_ paper, which normalizes node features
10 | group-wise via a learnable soft cluster assignment
11 |
12 | .. math::
13 |
14 | \mathbf{S} = \text{softmax} (\mathbf{X} \mathbf{W})
15 |
16 | where :math:`\mathbf{W} \in \mathbb{R}^{F \times G}` denotes a trainable
17 | weight matrix mapping each node into one of :math:`G` clusters.
18 | Normalization is then performed group-wise via:
19 |
20 | .. math::
21 |
22 | \mathbf{X}^{\prime} = \mathbf{X} + \lambda \sum_{i = 1}^G
23 | \text{BatchNorm}(\mathbf{S}[:, i] \odot \mathbf{X})
24 |
25 | Args:
26 | in_channels (int): Size of each input sample :math:`F`.
27 | groups (int): The number of groups :math:`G`.
28 | lamda (float, optional): The balancing factor :math:`\lambda` between
29 | input embeddings and normalized embeddings. (default: :obj:`0.01`)
30 | eps (float, optional): A value added to the denominator for numerical
31 | stability. (default: :obj:`1e-5`)
32 | momentum (float, optional): The value used for the running mean and
33 | running variance computation. (default: :obj:`0.1`)
34 | affine (bool, optional): If set to :obj:`True`, this module has
35 | learnable affine parameters :math:`\gamma` and :math:`\beta`.
36 | (default: :obj:`True`)
37 | track_running_stats (bool, optional): If set to :obj:`True`, this
38 | module tracks the running mean and variance, and when set to
39 | :obj:`False`, this module does not track such statistics and always
40 | uses batch statistics in both training and eval modes.
41 | (default: :obj:`True`)
42 | """
43 | def __init__(self, in_channels, groups, lamda=0.01, eps=1e-5, momentum=0.1,
44 | affine=True, track_running_stats=True):
45 | super(DiffGroupNorm, self).__init__()
46 |
47 | self.in_channels = in_channels
48 | self.groups = groups
49 | self.lamda = lamda
50 |
51 | self.lin = Linear(in_channels, groups, bias=False)
52 | self.norm = BatchNorm1d(groups * in_channels, eps, momentum, affine,
53 | track_running_stats)
54 |
55 | self.reset_parameters()
56 |
57 | def reset_parameters(self):
58 | self.lin.reset_parameters()
59 | self.norm.reset_parameters()
60 |
61 | def forward(self, x: Tensor) -> Tensor:
62 | """"""
63 | F, G = self.in_channels, self.groups
64 |
65 | s = self.lin(x).softmax(dim=-1) # [N, G]
66 | out = s.unsqueeze(-1) * x.unsqueeze(-2) # [N, G, F]
67 | out = self.norm(out.view(-1, G * F)).view(-1, G, F).sum(-2) # [N, F]
68 |
69 | return x + self.lamda * out
70 |
71 | @staticmethod
72 | def group_distance_ratio(x: Tensor, y: Tensor, eps: float = 1e-5) -> float:
73 | r"""Measures the ratio of inter-group distance over intra-group
74 | distance
75 |
76 | .. math::
77 | R_{\text{Group}} = \frac{\frac{1}{(C-1)^2} \sum_{i!=j}
78 | \frac{1}{|\mathbf{X}_i||\mathbf{X}_j|} \sum_{\mathbf{x}_{iv}
79 | \in \mathbf{X}_i } \sum_{\mathbf{x}_{jv^{\prime}} \in \mathbf{X}_j}
80 | {\| \mathbf{x}_{iv} - \mathbf{x}_{jv^{\prime}} \|}_2 }{
81 | \frac{1}{C} \sum_{i} \frac{1}{{|\mathbf{X}_i|}^2}
82 | \sum_{\mathbf{x}_{iv}, \mathbf{x}_{iv^{\prime}} \in \mathbf{X}_i }
83 | {\| \mathbf{x}_{iv} - \mathbf{x}_{iv^{\prime}} \|}_2 }
84 |
85 | where :math:`\mathbf{X}_i` denotes the set of all nodes that belong to
86 | class :math:`i`, and :math:`C` denotes the total number of classes in
87 | :obj:`y`.
88 | """
89 | num_classes = int(y.max()) + 1
90 |
91 | numerator = 0.
92 | for i in range(num_classes):
93 | mask = y == i
94 | dist = torch.cdist(x[mask].unsqueeze(0), x[~mask].unsqueeze(0))
95 | numerator += (1 / dist.numel()) * float(dist.sum())
96 | numerator *= 1 / (num_classes - 1)**2
97 |
98 | denominator = 0.
99 | for i in range(num_classes):
100 | mask = y == i
101 | dist = torch.cdist(x[mask].unsqueeze(0), x[mask].unsqueeze(0))
102 | denominator += (1 / dist.numel()) * float(dist.sum())
103 | denominator *= 1 / num_classes
104 |
105 | return numerator / (denominator + eps)
106 |
107 | def __repr__(self):
108 | return '{}({}, groups={})'.format(self.__class__.__name__,
109 | self.in_channels, self.groups)
110 |
--------------------------------------------------------------------------------
/deeph/from_PyG_future/graph_norm.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | import torch
4 | from torch import Tensor
5 | from torch_scatter import scatter_mean
6 |
7 | from torch_geometric.nn.inits import zeros, ones
8 |
9 |
10 | class GraphNorm(torch.nn.Module):
11 | r"""Applies graph normalization over individual graphs as described in the
12 | `"GraphNorm: A Principled Approach to Accelerating Graph Neural Network
13 | Training" `_ paper
14 |
15 | .. math::
16 | \mathbf{x}^{\prime}_i = \frac{\mathbf{x} - \alpha \odot
17 | \textrm{E}[\mathbf{x}]}
18 | {\sqrt{\textrm{Var}[\mathbf{x} - \alpha \odot \textrm{E}[\mathbf{x}]]
19 | + \epsilon}} \odot \gamma + \beta
20 |
21 | where :math:`\alpha` denotes parameters that learn how much information
22 | to keep in the mean.
23 |
24 | Args:
25 | in_channels (int): Size of each input sample.
26 | eps (float, optional): A value added to the denominator for numerical
27 | stability. (default: :obj:`1e-5`)
28 | """
29 | def __init__(self, in_channels: int, eps: float = 1e-5):
30 | super(GraphNorm, self).__init__()
31 |
32 | self.in_channels = in_channels
33 | self.eps = eps
34 |
35 | self.weight = torch.nn.Parameter(torch.Tensor(in_channels))
36 | self.bias = torch.nn.Parameter(torch.Tensor(in_channels))
37 | self.mean_scale = torch.nn.Parameter(torch.Tensor(in_channels))
38 |
39 | self.reset_parameters()
40 |
41 | def reset_parameters(self):
42 | ones(self.weight)
43 | zeros(self.bias)
44 | ones(self.mean_scale)
45 |
46 | def forward(self, x: Tensor, batch: Optional[Tensor] = None) -> Tensor:
47 | """"""
48 | if batch is None:
49 | batch = x.new_zeros(x.size(0), dtype=torch.long)
50 |
51 | batch_size = int(batch.max()) + 1
52 |
53 | mean = scatter_mean(x, batch, dim=0, dim_size=batch_size)[batch]
54 | out = x - mean * self.mean_scale
55 | var = scatter_mean(out.pow(2), batch, dim=0, dim_size=batch_size)
56 | std = (var + self.eps).sqrt()[batch]
57 | return self.weight * out / std + self.bias
58 |
59 | def __repr__(self):
60 | return f'{self.__class__.__name__}({self.in_channels})'
61 |
--------------------------------------------------------------------------------
/deeph/from_PyG_future/license.txt:
--------------------------------------------------------------------------------
1 | The code in this folder was obtained from "https://github.com/rusty1s/pytorch_geometric", which has the following license:
2 |
3 |
4 | Copyright (c) 2020 Matthias Fey
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in
14 | all copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 | THE SOFTWARE.
--------------------------------------------------------------------------------
/deeph/from_pymatgen/__init__.py:
--------------------------------------------------------------------------------
1 | from .lattice import find_neighbors, _one_to_three, _compute_cube_index, _three_to_one
2 |
--------------------------------------------------------------------------------
/deeph/from_pymatgen/lattice.py:
--------------------------------------------------------------------------------
1 | import itertools
2 | import numpy as np
3 |
4 |
5 | # The following internal methods are used in the get_points_in_sphere method.
6 | def _compute_cube_index(coords: np.ndarray, global_min: float, radius: float
7 | ) -> np.ndarray:
8 | """
9 | Compute the cube index from coordinates
10 | Args:
11 | coords: (nx3 array) atom coordinates
12 | global_min: (float) lower boundary of coordinates
13 | radius: (float) cutoff radius
14 |
15 | Returns: (nx3 array) int indices
16 |
17 | """
18 | return np.array(np.floor((coords - global_min) / radius), dtype=int)
19 |
20 | def _three_to_one(label3d: np.ndarray, ny: int, nz: int) -> np.ndarray:
21 | """
22 | The reverse of _one_to_three
23 | """
24 | return np.array(label3d[:, 0] * ny * nz +
25 | label3d[:, 1] * nz + label3d[:, 2]).reshape((-1, 1))
26 |
27 | def _one_to_three(label1d: np.ndarray, ny: int, nz: int) -> np.ndarray:
28 | """
29 | Convert a 1D index array to 3D index array
30 |
31 | Args:
32 | label1d: (array) 1D index array
33 | ny: (int) number of cells in y direction
34 | nz: (int) number of cells in z direction
35 |
36 | Returns: (nx3) int array of index
37 |
38 | """
39 | last = np.mod(label1d, nz)
40 | second = np.mod((label1d - last) / nz, ny)
41 | first = (label1d - last - second * nz) / (ny * nz)
42 | return np.concatenate([first, second, last], axis=1)
43 |
44 | def find_neighbors(label: np.ndarray, nx: int, ny: int, nz: int):
45 | """
46 | Given a cube index, find the neighbor cube indices
47 |
48 | Args:
49 | label: (array) (n,) or (n x 3) indice array
50 | nx: (int) number of cells in y direction
51 | ny: (int) number of cells in y direction
52 | nz: (int) number of cells in z direction
53 |
54 | Returns: neighbor cell indices
55 |
56 | """
57 |
58 | array = [[-1, 0, 1]] * 3
59 | neighbor_vectors = np.array(list(itertools.product(*array)),
60 | dtype=int)
61 | if np.shape(label)[1] == 1:
62 | label3d = _one_to_three(label, ny, nz)
63 | else:
64 | label3d = label
65 | all_labels = label3d[:, None, :] - neighbor_vectors[None, :, :]
66 | filtered_labels = []
67 | # filter out out-of-bound labels i.e., label < 0
68 | for labels in all_labels:
69 | ind = (labels[:, 0] < nx) * (labels[:, 1] < ny) * (labels[:, 2] < nz) * np.all(labels > -1e-5, axis=1)
70 | filtered_labels.append(labels[ind])
71 | return filtered_labels
--------------------------------------------------------------------------------
/deeph/from_pymatgen/license.txt:
--------------------------------------------------------------------------------
1 | The code in this folder was obtained from "https://github.com/materialsproject/pymatgen", which has the following license:
2 |
3 |
4 | The MIT License (MIT)
5 | Copyright (c) 2011-2012 MIT & The Regents of the University of California, through Lawrence Berkeley National Laboratory
6 |
7 | Permission is hereby granted, free of charge, to any person obtaining a copy of
8 | this software and associated documentation files (the "Software"), to deal in
9 | the Software without restriction, including without limitation the rights to
10 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11 | the Software, and to permit persons to whom the Software is furnished to do so,
12 | subject to the following conditions:
13 |
14 | The above copyright notice and this permission notice shall be included in all
15 | copies or substantial portions of the Software.
16 |
17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/deeph/from_schnetpack/__init__.py:
--------------------------------------------------------------------------------
1 | from .acsf import GaussianBasis
2 |
--------------------------------------------------------------------------------
/deeph/from_schnetpack/acsf.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 |
5 | def gaussian_smearing(distances, offset, widths, centered=False):
6 | if not centered:
7 | # compute width of Gaussian functions (using an overlap of 1 STDDEV)
8 | coeff = -0.5 / torch.pow(widths, 2)
9 | # Use advanced indexing to compute the individual components
10 | diff = distances[..., None] - offset
11 | else:
12 | # if Gaussian functions are centered, use offsets to compute widths
13 | coeff = -0.5 / torch.pow(offset, 2)
14 | # if Gaussian functions are centered, no offset is subtracted
15 | diff = distances[..., None]
16 | # compute smear distance values
17 | gauss = torch.exp(coeff * torch.pow(diff, 2))
18 | return gauss
19 |
20 |
21 | class GaussianBasis(nn.Module):
22 | def __init__(
23 | self, start=0.0, stop=5.0, n_gaussians=50, centered=False, trainable=False
24 | ):
25 | super(GaussianBasis, self).__init__()
26 | # compute offset and width of Gaussian functions
27 | offset = torch.linspace(start, stop, n_gaussians)
28 | widths = torch.FloatTensor((offset[1] - offset[0]) * torch.ones_like(offset))
29 | if trainable:
30 | self.width = nn.Parameter(widths)
31 | self.offsets = nn.Parameter(offset)
32 | else:
33 | self.register_buffer("width", widths)
34 | self.register_buffer("offsets", offset)
35 | self.centered = centered
36 |
37 | def forward(self, distances):
38 | """Compute smeared-gaussian distance values.
39 |
40 | Args:
41 | distances (torch.Tensor): interatomic distance values of
42 | (N_b x N_at x N_nbh) shape.
43 |
44 | Returns:
45 | torch.Tensor: layer output of (N_b x N_at x N_nbh x N_g) shape.
46 |
47 | """
48 | return gaussian_smearing(
49 | distances, self.offsets, self.width, centered=self.centered
50 | )
--------------------------------------------------------------------------------
/deeph/from_schnetpack/license.txt:
--------------------------------------------------------------------------------
1 | The code in this folder was obtained from "https://github.com/atomistic-machine-learning/schnetpack", which has the following license:
2 |
3 |
4 | COPYRIGHT
5 |
6 | Copyright (c) 2018 Kristof Schütt, Michael Gastegger, Pan Kessel, Kim Nicoli
7 |
8 | All other contributions:
9 | Copyright (c) 2018, the respective contributors.
10 | All rights reserved.
11 |
12 | Each contributor holds copyright over their respective contributions.
13 | The project versioning (Git) records all such contribution source information.
14 |
15 | LICENSE
16 |
17 | The MIT License
18 |
19 | Permission is hereby granted, free of charge, to any person obtaining a copy
20 | of this software and associated documentation files (the "Software"), to deal
21 | in the Software without restriction, including without limitation the rights
22 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
23 | copies of the Software, and to permit persons to whom the Software is
24 | furnished to do so, subject to the following conditions:
25 |
26 | The above copyright notice and this permission notice shall be included in all
27 | copies or substantial portions of the Software.
28 |
29 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
34 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 | SOFTWARE.
--------------------------------------------------------------------------------
/deeph/from_se3_transformer/__init__.py:
--------------------------------------------------------------------------------
1 | from .representations import SphericalHarmonics
--------------------------------------------------------------------------------
/deeph/from_se3_transformer/license.txt:
--------------------------------------------------------------------------------
1 | The code in this folder was obtained from "https://github.com/mariogeiger/se3cnn/", which has the following license:
2 |
3 |
4 | MIT License
5 |
6 | Copyright (c) 2019 Mario Geiger
7 |
8 | Permission is hereby granted, free of charge, to any person obtaining a copy
9 | of this software and associated documentation files (the "Software"), to deal
10 | in the Software without restriction, including without limitation the rights
11 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 | copies of the Software, and to permit persons to whom the Software is
13 | furnished to do so, subject to the following conditions:
14 |
15 | The above copyright notice and this permission notice shall be included in all
16 | copies or substantial portions of the Software.
17 |
18 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 | SOFTWARE.
--------------------------------------------------------------------------------
/deeph/from_se3_transformer/representations.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 |
5 | def semifactorial(x):
6 | """Compute the semifactorial function x!!.
7 |
8 | x!! = x * (x-2) * (x-4) *...
9 |
10 | Args:
11 | x: positive int
12 | Returns:
13 | float for x!!
14 | """
15 | y = 1.
16 | for n in range(x, 1, -2):
17 | y *= n
18 | return y
19 |
20 |
21 | def pochhammer(x, k):
22 | """Compute the pochhammer symbol (x)_k.
23 |
24 | (x)_k = x * (x+1) * (x+2) *...* (x+k-1)
25 |
26 | Args:
27 | x: positive int
28 | Returns:
29 | float for (x)_k
30 | """
31 | xf = float(x)
32 | for n in range(x+1, x+k):
33 | xf *= n
34 | return xf
35 |
36 | def lpmv(l, m, x):
37 | """Associated Legendre function including Condon-Shortley phase.
38 |
39 | Args:
40 | m: int order
41 | l: int degree
42 | x: float argument tensor
43 | Returns:
44 | tensor of x-shape
45 | """
46 | m_abs = abs(m)
47 | if m_abs > l:
48 | return torch.zeros_like(x)
49 |
50 | # Compute P_m^m
51 | yold = ((-1)**m_abs * semifactorial(2*m_abs-1)) * torch.pow(1-x*x, m_abs/2)
52 |
53 | # Compute P_{m+1}^m
54 | if m_abs != l:
55 | y = x * (2*m_abs+1) * yold
56 | else:
57 | y = yold
58 |
59 | # Compute P_{l}^m from recursion in P_{l-1}^m and P_{l-2}^m
60 | for i in range(m_abs+2, l+1):
61 | tmp = y
62 | # Inplace speedup
63 | y = ((2*i-1) / (i-m_abs)) * x * y
64 | y -= ((i+m_abs-1)/(i-m_abs)) * yold
65 | yold = tmp
66 |
67 | if m < 0:
68 | y *= ((-1)**m / pochhammer(l+m+1, -2*m))
69 |
70 | return y
71 |
72 | def tesseral_harmonics(l, m, theta=0., phi=0.):
73 | """Tesseral spherical harmonic with Condon-Shortley phase.
74 |
75 | The Tesseral spherical harmonics are also known as the real spherical
76 | harmonics.
77 |
78 | Args:
79 | l: int for degree
80 | m: int for order, where -l <= m < l
81 | theta: collatitude or polar angle
82 | phi: longitude or azimuth
83 | Returns:
84 | tensor of shape theta
85 | """
86 | assert abs(m) <= l, "absolute value of order m must be <= degree l"
87 |
88 | N = np.sqrt((2*l+1) / (4*np.pi))
89 | leg = lpmv(l, abs(m), torch.cos(theta))
90 | if m == 0:
91 | return N*leg
92 | elif m > 0:
93 | Y = torch.cos(m*phi) * leg
94 | else:
95 | Y = torch.sin(abs(m)*phi) * leg
96 | N *= np.sqrt(2. / pochhammer(l-abs(m)+1, 2*abs(m)))
97 | Y *= N
98 | return Y
99 |
100 | class SphericalHarmonics(object):
101 | def __init__(self):
102 | self.leg = {}
103 |
104 | def clear(self):
105 | self.leg = {}
106 |
107 | def negative_lpmv(self, l, m, y):
108 | """Compute negative order coefficients"""
109 | if m < 0:
110 | y *= ((-1)**m / pochhammer(l+m+1, -2*m))
111 | return y
112 |
113 | def lpmv(self, l, m, x):
114 | """Associated Legendre function including Condon-Shortley phase.
115 |
116 | Args:
117 | m: int order
118 | l: int degree
119 | x: float argument tensor
120 | Returns:
121 | tensor of x-shape
122 | """
123 | # Check memoized versions
124 | m_abs = abs(m)
125 | if (l,m) in self.leg:
126 | return self.leg[(l,m)]
127 | elif m_abs > l:
128 | return None
129 | elif l == 0:
130 | self.leg[(l,m)] = torch.ones_like(x)
131 | return self.leg[(l,m)]
132 |
133 | # Check if on boundary else recurse solution down to boundary
134 | if m_abs == l:
135 | # Compute P_m^m
136 | y = (-1)**m_abs * semifactorial(2*m_abs-1)
137 | y *= torch.pow(1-x*x, m_abs/2)
138 | self.leg[(l,m)] = self.negative_lpmv(l, m, y)
139 | return self.leg[(l,m)]
140 | else:
141 | # Recursively precompute lower degree harmonics
142 | self.lpmv(l-1, m, x)
143 |
144 | # Compute P_{l}^m from recursion in P_{l-1}^m and P_{l-2}^m
145 | # Inplace speedup
146 | y = ((2*l-1) / (l-m_abs)) * x * self.lpmv(l-1, m_abs, x)
147 | if l - m_abs > 1:
148 | y -= ((l+m_abs-1)/(l-m_abs)) * self.leg[(l-2, m_abs)]
149 | #self.leg[(l, m_abs)] = y
150 |
151 | if m < 0:
152 | y = self.negative_lpmv(l, m, y)
153 | self.leg[(l,m)] = y
154 |
155 | return self.leg[(l,m)]
156 |
157 | def get_element(self, l, m, theta, phi):
158 | """Tesseral spherical harmonic with Condon-Shortley phase.
159 |
160 | The Tesseral spherical harmonics are also known as the real spherical
161 | harmonics.
162 |
163 | Args:
164 | l: int for degree
165 | m: int for order, where -l <= m < l
166 | theta: collatitude or polar angle
167 | phi: longitude or azimuth
168 | Returns:
169 | tensor of shape theta
170 | """
171 | assert abs(m) <= l, "absolute value of order m must be <= degree l"
172 |
173 | N = np.sqrt((2*l+1) / (4*np.pi))
174 | leg = self.lpmv(l, abs(m), torch.cos(theta))
175 | if m == 0:
176 | return N*leg
177 | elif m > 0:
178 | Y = torch.cos(m*phi) * leg
179 | else:
180 | Y = torch.sin(abs(m)*phi) * leg
181 | N *= np.sqrt(2. / pochhammer(l-abs(m)+1, 2*abs(m)))
182 | Y *= N
183 | return Y
184 |
185 | def get(self, l, theta, phi, refresh=True):
186 | """Tesseral harmonic with Condon-Shortley phase.
187 |
188 | The Tesseral spherical harmonics are also known as the real spherical
189 | harmonics.
190 |
191 | Args:
192 | l: int for degree
193 | theta: collatitude or polar angle
194 | phi: longitude or azimuth
195 | Returns:
196 | tensor of shape [*theta.shape, 2*l+1]
197 | """
198 | results = []
199 | if refresh:
200 | self.clear()
201 | for m in range(-l, l+1):
202 | results.append(self.get_element(l, m, theta, phi))
203 | return torch.stack(results, -1)
204 |
205 |
--------------------------------------------------------------------------------
/deeph/inference/__init__.py:
--------------------------------------------------------------------------------
1 | from .pred_ham import predict, predict_with_grad
--------------------------------------------------------------------------------
/deeph/inference/band_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "calc_job": "band",
3 | "which_k": 0,
4 | "fermi_level": -3.82373,
5 | "max_iter": 300,
6 | "num_band": 50,
7 | "k_data": ["15 0 0 0 0.5 0.5 0 Γ M", "15 0.5 0.5 0 0.3333333333333333 0.6666666666666667 0 M K", "15 0.3333333333333333 0.6666666666666667 0 0 0 0 K Γ"]
8 | }
--------------------------------------------------------------------------------
/deeph/inference/dense_calc.jl:
--------------------------------------------------------------------------------
1 | using DelimitedFiles, LinearAlgebra, JSON
2 | using HDF5
3 | using ArgParse
4 | using SparseArrays
5 | using Arpack
6 | using JLD
7 | # BLAS.set_num_threads(1)
8 |
9 | const ev2Hartree = 0.036749324533634074
10 | const Bohr2Ang = 0.529177249
11 | const default_dtype = Complex{Float64}
12 |
13 |
14 | function parse_commandline()
15 | s = ArgParseSettings()
16 | @add_arg_table! s begin
17 | "--input_dir", "-i"
18 | help = "path of rlat.dat, orbital_types.dat, site_positions.dat, hamiltonians_pred.h5, and overlaps.h5"
19 | arg_type = String
20 | default = "./"
21 | "--output_dir", "-o"
22 | help = "path of output openmx.Band"
23 | arg_type = String
24 | default = "./"
25 | "--config"
26 | help = "config file in the format of JSON"
27 | arg_type = String
28 | "--ill_project"
29 | help = "projects out the eigenvectors of the overlap matrix that correspond to eigenvalues smaller than ill_threshold"
30 | arg_type = Bool
31 | default = true
32 | "--ill_threshold"
33 | help = "threshold for ill_project"
34 | arg_type = Float64
35 | default = 5e-4
36 | end
37 | return parse_args(s)
38 | end
39 |
40 |
41 | function _create_dict_h5(filename::String)
42 | fid = h5open(filename, "r")
43 | T = eltype(fid[keys(fid)[1]])
44 | d_out = Dict{Array{Int64,1}, Array{T, 2}}()
45 | for key in keys(fid)
46 | data = read(fid[key])
47 | nk = map(x -> parse(Int64, convert(String, x)), split(key[2 : length(key) - 1], ','))
48 | d_out[nk] = permutedims(data)
49 | end
50 | close(fid)
51 | return d_out
52 | end
53 |
54 |
55 | function genlist(x)
56 | return collect(range(x[1], stop = x[2], length = Int64(x[3])))
57 | end
58 |
59 |
60 | function k_data2num_ks(kdata::AbstractString)
61 | return parse(Int64,split(kdata)[1])
62 | end
63 |
64 |
65 | function k_data2kpath(kdata::AbstractString)
66 | return map(x->parse(Float64,x), split(kdata)[2:7])
67 | end
68 |
69 |
70 | function std_out_array(a::AbstractArray)
71 | return string(map(x->string(x," "),a)...)
72 | end
73 |
74 |
75 | function main()
76 | parsed_args = parse_commandline()
77 |
78 | println(parsed_args["config"])
79 | config = JSON.parsefile(parsed_args["config"])
80 | calc_job = config["calc_job"]
81 | ill_project = parsed_args["ill_project"]
82 | ill_threshold = parsed_args["ill_threshold"]
83 |
84 | if isfile(joinpath(parsed_args["input_dir"],"info.json"))
85 | spinful = JSON.parsefile(joinpath(parsed_args["input_dir"],"info.json"))["isspinful"]
86 | else
87 | spinful = false
88 | end
89 |
90 | site_positions = readdlm(joinpath(parsed_args["input_dir"], "site_positions.dat"))
91 | nsites = size(site_positions, 2)
92 |
93 | orbital_types_f = open(joinpath(parsed_args["input_dir"], "orbital_types.dat"), "r")
94 | site_norbits = zeros(nsites)
95 | orbital_types = Vector{Vector{Int64}}()
96 | for index_site = 1:nsites
97 | orbital_type = parse.(Int64, split(readline(orbital_types_f)))
98 | push!(orbital_types, orbital_type)
99 | end
100 | site_norbits = (x->sum(x .* 2 .+ 1)).(orbital_types) * (1 + spinful)
101 | norbits = sum(site_norbits)
102 | site_norbits_cumsum = cumsum(site_norbits)
103 |
104 | rlat = readdlm(joinpath(parsed_args["input_dir"], "rlat.dat"))
105 |
106 |
107 | @info "read h5"
108 | begin_time = time()
109 | hamiltonians_pred = _create_dict_h5(joinpath(parsed_args["input_dir"], "hamiltonians_pred.h5"))
110 | overlaps = _create_dict_h5(joinpath(parsed_args["input_dir"], "overlaps.h5"))
111 | println("Time for reading h5: ", time() - begin_time, "s")
112 |
113 | H_R = Dict{Vector{Int64}, Matrix{default_dtype}}()
114 | S_R = Dict{Vector{Int64}, Matrix{default_dtype}}()
115 |
116 | @info "construct Hamiltonian and overlap matrix in the real space"
117 | begin_time = time()
118 | for key in collect(keys(hamiltonians_pred))
119 | hamiltonian_pred = hamiltonians_pred[key]
120 | if (key ∈ keys(overlaps))
121 | overlap = overlaps[key]
122 | else
123 | # continue
124 | overlap = zero(hamiltonian_pred)
125 | end
126 | if spinful
127 | overlap = vcat(hcat(overlap,zeros(size(overlap))),hcat(zeros(size(overlap)),overlap)) # the readout overlap matrix only contains the upper-left block # TODO maybe drop the zeros?
128 | end
129 | R = key[1:3]; atom_i=key[4]; atom_j=key[5]
130 |
131 | @assert (site_norbits[atom_i], site_norbits[atom_j]) == size(hamiltonian_pred)
132 | @assert (site_norbits[atom_i], site_norbits[atom_j]) == size(overlap)
133 | if !(R ∈ keys(H_R))
134 | H_R[R] = zeros(default_dtype, norbits, norbits)
135 | S_R[R] = zeros(default_dtype, norbits, norbits)
136 | end
137 | for block_matrix_i in 1:site_norbits[atom_i]
138 | for block_matrix_j in 1:site_norbits[atom_j]
139 | index_i = site_norbits_cumsum[atom_i] - site_norbits[atom_i] + block_matrix_i
140 | index_j = site_norbits_cumsum[atom_j] - site_norbits[atom_j] + block_matrix_j
141 | H_R[R][index_i, index_j] = hamiltonian_pred[block_matrix_i, block_matrix_j]
142 | S_R[R][index_i, index_j] = overlap[block_matrix_i, block_matrix_j]
143 | end
144 | end
145 | end
146 | println("Time for constructing Hamiltonian and overlap matrix in the real space: ", time() - begin_time, " s")
147 |
148 |
149 | if calc_job == "band"
150 | fermi_level = config["fermi_level"]
151 | k_data = config["k_data"]
152 |
153 | @info "calculate bands"
154 | num_ks = k_data2num_ks.(k_data)
155 | kpaths = k_data2kpath.(k_data)
156 |
157 | egvals = zeros(Float64, norbits, sum(num_ks)[1])
158 |
159 | begin_time = time()
160 | idx_k = 1
161 | for i = 1:size(kpaths, 1)
162 | kpath = kpaths[i]
163 | pnkpts = num_ks[i]
164 | kxs = LinRange(kpath[1], kpath[4], pnkpts)
165 | kys = LinRange(kpath[2], kpath[5], pnkpts)
166 | kzs = LinRange(kpath[3], kpath[6], pnkpts)
167 | for (kx, ky, kz) in zip(kxs, kys, kzs)
168 | idx_k
169 | H_k = zeros(default_dtype, norbits, norbits)
170 | S_k = zeros(default_dtype, norbits, norbits)
171 | for R in keys(H_R)
172 | H_k += H_R[R] * exp(im*2π*([kx, ky, kz]⋅R))
173 | S_k += S_R[R] * exp(im*2π*([kx, ky, kz]⋅R))
174 | end
175 | S_k = (S_k + S_k') / 2
176 | H_k = (H_k + H_k') / 2
177 | if ill_project
178 | (egval_S, egvec_S) = eigen(Hermitian(S_k))
179 | # egvec_S: shape (num_basis, num_bands)
180 | project_index = abs.(egval_S) .> ill_threshold
181 | if sum(project_index) != length(project_index)
182 | # egval_S = egval_S[project_index]
183 | egvec_S = egvec_S[:, project_index]
184 | @warn "ill-conditioned eigenvalues detected, projected out $(length(project_index) - sum(project_index)) eigenvalues"
185 | H_k = egvec_S' * H_k * egvec_S
186 | S_k = egvec_S' * S_k * egvec_S
187 | (egval, egvec) = eigen(Hermitian(H_k), Hermitian(S_k))
188 | egval = vcat(egval, fill(1e4, length(project_index) - sum(project_index)))
189 | egvec = egvec_S * egvec
190 | else
191 | (egval, egvec) = eigen(Hermitian(H_k), Hermitian(S_k))
192 | end
193 | else
194 | (egval, egvec) = eigen(Hermitian(H_k), Hermitian(S_k))
195 | end
196 | egvals[:, idx_k] = egval
197 | println("Time for solving No.$idx_k eigenvalues at k = ", [kx, ky, kz], ": ", time() - begin_time, " s")
198 | idx_k += 1
199 | end
200 | end
201 |
202 | # output in openmx band format
203 | f = open(joinpath(parsed_args["output_dir"], "openmx.Band"),"w")
204 | println(f, norbits, " ", 0, " ", ev2Hartree * fermi_level)
205 | openmx_rlat = reshape((rlat .* Bohr2Ang), 1, :)
206 | println(f, std_out_array(openmx_rlat))
207 | println(f, length(k_data))
208 | for line in k_data
209 | println(f,line)
210 | end
211 | idx_k = 1
212 | for i = 1:size(kpaths, 1)
213 | pnkpts = num_ks[i]
214 | kstart = kpaths[i][1:3]
215 | kend = kpaths[i][4:6]
216 | k_list = zeros(Float64,pnkpts,3)
217 | for alpha = 1:3
218 | k_list[:,alpha] = genlist([kstart[alpha],kend[alpha],pnkpts])
219 | end
220 | for j = 1:pnkpts
221 | idx_k
222 | kvec = k_list[j,:]
223 | println(f, norbits, " ", std_out_array(kvec))
224 | println(f, std_out_array(ev2Hartree * egvals[:, idx_k]))
225 | idx_k += 1
226 | end
227 | end
228 | close(f)
229 | end
230 | end
231 |
232 |
233 | main()
234 |
--------------------------------------------------------------------------------
/deeph/inference/dense_calc.py:
--------------------------------------------------------------------------------
1 | import json
2 | import argparse
3 | import h5py
4 | import numpy as np
5 | import os
6 | from time import time
7 | from scipy import linalg
8 |
9 | def parse_commandline():
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument(
12 | "--input_dir", "-i", type=str, default="./",
13 | help="path of rlat.dat, orbital_types.dat, site_positions.dat, hamiltonians_pred.h5, and overlaps.h5"
14 | )
15 | parser.add_argument(
16 | "--output_dir", "-o", type=str, default="./",
17 | help="path of output openmx.Band"
18 | )
19 | parser.add_argument(
20 | "--config", type=str,
21 | help="config file in the format of JSON"
22 | )
23 | return parser.parse_args()
24 |
25 | parsed_args = parse_commandline()
26 |
27 | def _create_dict_h5(filename):
28 | fid = h5py.File(filename, "r")
29 | d_out = {}
30 | for key in fid.keys():
31 | data = np.array(fid[key])
32 | nk = tuple(map(int, key[1:-1].split(',')))
33 | # BS:
34 | # the matrix do not need be transposed in Python,
35 | # But the transpose should be done in Julia.
36 | d_out[nk] = data # np.transpose(data)
37 | fid.close()
38 | return d_out
39 |
40 |
41 | ev2Hartree = 0.036749324533634074
42 | Bohr2Ang = 0.529177249
43 |
44 |
45 | def genlist(x):
46 | return np.linspace(x[0], x[1], int(x[2]))
47 |
48 |
49 | def k_data2num_ks(kdata):
50 | return int(kdata.split()[0])
51 |
52 |
53 | def k_data2kpath(kdata):
54 | return [float(x) for x in kdata.split()[1:7]]
55 |
56 |
57 | def std_out_array(a):
58 | return ''.join([str(x) + ' ' for x in a])
59 |
60 |
61 | default_dtype = np.complex128
62 |
63 | print(parsed_args.config)
64 | with open(parsed_args.config) as f:
65 | config = json.load(f)
66 | calc_job = config["calc_job"]
67 |
68 | if os.path.isfile(os.path.join(parsed_args.input_dir, "info.json")):
69 | with open(os.path.join(parsed_args.input_dir, "info.json")) as f:
70 | spinful = json.load(f)["isspinful"]
71 | else:
72 | spinful = False
73 |
74 | site_positions = np.loadtxt(os.path.join(parsed_args.input_dir, "site_positions.dat"))
75 |
76 | if len(site_position.shape) == 2:
77 | nsites = site_positions.shape[1]
78 | else:
79 | nsites = 1
80 | # in case of single atom
81 |
82 |
83 | with open(os.path.join(parsed_args.input_dir, "orbital_types.dat")) as f:
84 | site_norbits = np.zeros(nsites, dtype=int)
85 | orbital_types = []
86 | for index_site in range(nsites):
87 | orbital_type = list(map(int, f.readline().split()))
88 | orbital_types.append(orbital_type)
89 | site_norbits[index_site] = np.sum(np.array(orbital_type) * 2 + 1)
90 | norbits = np.sum(site_norbits)
91 | site_norbits_cumsum = np.cumsum(site_norbits)
92 |
93 | rlat = np.loadtxt(os.path.join(parsed_args.input_dir, "rlat.dat")).T
94 | # require transposition while reading rlat.dat in python
95 |
96 |
97 | print("read h5")
98 | begin_time = time()
99 | hamiltonians_pred = _create_dict_h5(os.path.join(parsed_args.input_dir, "hamiltonians_pred.h5"))
100 | overlaps = _create_dict_h5(os.path.join(parsed_args.input_dir, "overlaps.h5"))
101 | print("Time for reading h5: ", time() - begin_time, "s")
102 |
103 | H_R = {}
104 | S_R = {}
105 |
106 | print("construct Hamiltonian and overlap matrix in the real space")
107 | begin_time = time()
108 |
109 | # BS:
110 | # this is for debug python and julia
111 | # in julia, you can use 'sort(collect(keys(hamiltonians_pred)))'
112 | # for key in dict(sorted(hamiltonians_pred.items())).keys():
113 | for key in hamiltonians_pred.keys():
114 |
115 | hamiltonian_pred = hamiltonians_pred[key]
116 |
117 | if key in overlaps.keys():
118 | overlap = overlaps[key]
119 | else:
120 | overlap = np.zeros_like(hamiltonian_pred)
121 | if spinful:
122 | overlap = np.vstack((np.hstack((overlap, np.zeros_like(overlap))), np.hstack((np.zeros_like(overlap), overlap))))
123 | R = key[:3]
124 | atom_i = key[3] - 1
125 | atom_j = key[4] - 1
126 |
127 | assert (site_norbits[atom_i], site_norbits[atom_j]) == hamiltonian_pred.shape
128 | assert (site_norbits[atom_i], site_norbits[atom_j]) == overlap.shape
129 |
130 | if R not in H_R.keys():
131 | H_R[R] = np.zeros((norbits, norbits), dtype=default_dtype)
132 | S_R[R] = np.zeros((norbits, norbits), dtype=default_dtype)
133 |
134 | for block_matrix_i in range(1, site_norbits[atom_i]+1):
135 | for block_matrix_j in range(1, site_norbits[atom_j]+1):
136 | index_i = site_norbits_cumsum[atom_i] - site_norbits[atom_i] + block_matrix_i - 1
137 | index_j = site_norbits_cumsum[atom_j] - site_norbits[atom_j] + block_matrix_j - 1
138 | H_R[R][index_i, index_j] = hamiltonian_pred[block_matrix_i-1, block_matrix_j-1]
139 | S_R[R][index_i, index_j] = overlap[block_matrix_i-1, block_matrix_j-1]
140 |
141 |
142 | print("Time for constructing Hamiltonian and overlap matrix in the real space: ", time() - begin_time, " s")
143 |
144 | if calc_job == "band":
145 | fermi_level = config["fermi_level"]
146 | k_data = config["k_data"]
147 |
148 | print("calculate bands")
149 | num_ks = [k_data2num_ks(k) for k in k_data]
150 | kpaths = [k_data2kpath(k) for k in k_data]
151 |
152 | egvals = np.zeros((norbits, sum(num_ks)))
153 |
154 | begin_time = time()
155 | idx_k = 0
156 | for i in range(len(kpaths)):
157 | kpath = kpaths[i]
158 | pnkpts = num_ks[i]
159 | kxs = np.linspace(kpath[0], kpath[3], pnkpts)
160 | kys = np.linspace(kpath[1], kpath[4], pnkpts)
161 | kzs = np.linspace(kpath[2], kpath[5], pnkpts)
162 | for kx, ky, kz in zip(kxs, kys, kzs):
163 | H_k = np.zeros((norbits, norbits), dtype=default_dtype)
164 | S_k = np.zeros((norbits, norbits), dtype=default_dtype)
165 | for R in H_R.keys():
166 | H_k += H_R[R] * np.exp(1j*2*np.pi*np.dot([kx, ky, kz], R))
167 | S_k += S_R[R] * np.exp(1j*2*np.pi*np.dot([kx, ky, kz], R))
168 | #---------------------------------------------
169 | # BS: only eigenvalues are needed in this part,
170 | # the upper matrix is used
171 | #
172 | # egval, egvec = linalg.eig(H_k, S_k)
173 | egval = linalg.eigvalsh(H_k, S_k, lower=False)
174 | egvals[:, idx_k] = egval
175 |
176 | print("Time for solving No.{} eigenvalues at k = {} : {} s".format(idx_k+1, [kx, ky, kz], time() - begin_time))
177 | idx_k += 1
178 |
179 | # output in openmx band format
180 | with open(os.path.join(parsed_args.output_dir, "openmx.Band"), "w") as f:
181 | f.write("{} {} {}\n".format(norbits, 0, ev2Hartree * fermi_level))
182 | openmx_rlat = np.reshape((rlat * Bohr2Ang), (1, -1))[0]
183 | f.write(std_out_array(openmx_rlat) + "\n")
184 | f.write(str(len(k_data)) + "\n")
185 | for line in k_data:
186 | f.write(line + "\n")
187 | idx_k = 0
188 | for i in range(len(kpaths)):
189 | pnkpts = num_ks[i]
190 | kstart = kpaths[i][:3]
191 | kend = kpaths[i][3:]
192 | k_list = np.zeros((pnkpts, 3))
193 | for alpha in range(3):
194 | k_list[:, alpha] = genlist([kstart[alpha], kend[alpha], pnkpts])
195 | for j in range(pnkpts):
196 | kvec = k_list[j, :]
197 | f.write("{} {}\n".format(norbits, std_out_array(kvec)))
198 | f.write(std_out_array(ev2Hartree * egvals[:, idx_k]) + "\n")
199 | idx_k += 1
200 |
--------------------------------------------------------------------------------
/deeph/inference/inference_default.ini:
--------------------------------------------------------------------------------
1 | [basic]
2 | work_dir = /your/own/path
3 | OLP_dir = /your/own/path
4 | interface = openmx
5 | trained_model_dir = ["/your/trained/model1", "/your/trained/model2"]
6 | task = [1, 2, 3, 4, 5]
7 | sparse_calc_config = /your/own/path
8 | eigen_solver = sparse_jl
9 | disable_cuda = True
10 | device = cuda:0
11 | huge_structure = True
12 | restore_blocks_py = True
13 | gen_rc_idx = False
14 | gen_rc_by_idx =
15 | with_grad = False
16 |
17 | [interpreter]
18 | julia_interpreter = julia
19 | python_interpreter = python
20 |
21 | [graph]
22 | radius = -1.0
23 | create_from_DFT = True
24 |
--------------------------------------------------------------------------------
/deeph/inference/local_coordinate.jl:
--------------------------------------------------------------------------------
1 | using DelimitedFiles, LinearAlgebra
2 | using HDF5
3 | using ArgParse
4 | using StaticArrays
5 |
6 |
7 | function parse_commandline()
8 | s = ArgParseSettings()
9 | @add_arg_table! s begin
10 | "--input_dir", "-i"
11 | help = "path of site_positions.dat, lat.dat, element.dat, and R_list.dat (overlaps.h5)"
12 | arg_type = String
13 | default = "./"
14 | "--output_dir", "-o"
15 | help = "path of output rc.h5"
16 | arg_type = String
17 | default = "./"
18 | "--radius", "-r"
19 | help = "cutoff radius"
20 | arg_type = Float64
21 | default = 8.0
22 | "--create_from_DFT"
23 | help = "retain edges by DFT overlaps neighbour"
24 | arg_type = Bool
25 | default = true
26 | "--output_text"
27 | help = "an option without argument, i.e. a flag"
28 | action = :store_true
29 | "--Hop_dir"
30 | help = "path of Hop.jl"
31 | arg_type = String
32 | default = "/home/lihe/DeepH/process_ham/Hop.jl/"
33 | end
34 | return parse_args(s)
35 | end
36 | parsed_args = parse_commandline()
37 |
38 | using Pkg
39 | Pkg.activate(parsed_args["Hop_dir"])
40 | using Hop
41 |
42 |
43 | site_positions = readdlm(joinpath(parsed_args["input_dir"], "site_positions.dat"))
44 | lat = readdlm(joinpath(parsed_args["input_dir"], "lat.dat"))
45 | R_list_read = convert(Matrix{Int64}, readdlm(joinpath(parsed_args["input_dir"], "R_list.dat")))
46 | num_R = size(R_list_read, 1)
47 | R_list = Vector{SVector{3, Int64}}()
48 | for index_R in 1:num_R
49 | push!(R_list, SVector{3, Int64}(R_list_read[index_R, :]))
50 | end
51 |
52 | @info "get local coordinate"
53 | begin_time = time()
54 | rcoordinate = Hop.Deeph.rotate_system(site_positions, lat, R_list, parsed_args["radius"])
55 | println("time for calculating local coordinate is: ", time() - begin_time)
56 |
57 | if parsed_args["output_text"]
58 | @info "output txt"
59 | mkpath(joinpath(parsed_args["output_dir"], "rresult"))
60 | mkpath(joinpath(parsed_args["output_dir"], "rresult/rc"))
61 | for (R, coord) in rcoordinate
62 | open(joinpath(parsed_args["output_dir"], "rresult/rc/", R, "_real.dat"), "w") do f
63 | writedlm(f, coord)
64 | end
65 | end
66 | end
67 |
68 | @info "output h5"
69 | h5open(joinpath(parsed_args["input_dir"], "overlaps.h5"), "r") do fid_OLP
70 | graph_key = Set(keys(fid_OLP))
71 | h5open(joinpath(parsed_args["output_dir"], "rc.h5"), "w") do fid
72 | for (key, coord) in rcoordinate
73 | if (parsed_args["create_from_DFT"] == true) && (!(string(key) in graph_key))
74 | continue
75 | end
76 | write(fid, string(key), permutedims(coord))
77 | end
78 | end
79 | end
80 |
--------------------------------------------------------------------------------
/deeph/inference/restore_blocks.jl:
--------------------------------------------------------------------------------
1 | using JSON
2 | using LinearAlgebra
3 | using DelimitedFiles
4 | using HDF5
5 | using ArgParse
6 |
7 |
8 | function parse_commandline()
9 | s = ArgParseSettings()
10 | @add_arg_table! s begin
11 | "--input_dir", "-i"
12 | help = "path of block_without_restoration, element.dat, site_positions.dat, orbital_types.dat, and info.json"
13 | arg_type = String
14 | default = "./"
15 | "--output_dir", "-o"
16 | help = "path of output rh_pred.h5"
17 | arg_type = String
18 | default = "./"
19 | end
20 | return parse_args(s)
21 | end
22 | parsed_args = parse_commandline()
23 |
24 |
25 | function _create_dict_h5(filename::String)
26 | fid = h5open(filename, "r")
27 | T = eltype(fid[keys(fid)[1]])
28 | d_out = Dict{Array{Int64,1}, Array{T, 2}}()
29 | for key in keys(fid)
30 | data = read(fid[key])
31 | nk = map(x -> parse(Int64, convert(String, x)), split(key[2 : length(key) - 1], ','))
32 | d_out[nk] = permutedims(data)
33 | end
34 | close(fid)
35 | return d_out
36 | end
37 |
38 |
39 | if isfile(joinpath(parsed_args["input_dir"],"info.json"))
40 | spinful = JSON.parsefile(joinpath(parsed_args["input_dir"],"info.json"))["isspinful"]
41 | else
42 | spinful = false
43 | end
44 |
45 | spinful = JSON.parsefile(joinpath(parsed_args["input_dir"],"info.json"))["isspinful"]
46 | numbers = readdlm(joinpath(parsed_args["input_dir"], "element.dat"), Int64)
47 | lattice = readdlm(joinpath(parsed_args["input_dir"], "lat.dat"))
48 | inv_lattice = inv(lattice)
49 | site_positions = readdlm(joinpath(parsed_args["input_dir"], "site_positions.dat"))
50 | nsites = size(site_positions, 2)
51 | orbital_types_f = open(joinpath(parsed_args["input_dir"], "orbital_types.dat"), "r")
52 | site_norbits = zeros(nsites)
53 | orbital_types = Vector{Vector{Int64}}()
54 | for index_site = 1:nsites
55 | orbital_type = parse.(Int64, split(readline(orbital_types_f)))
56 | push!(orbital_types, orbital_type)
57 | end
58 | site_norbits = (x->sum(x .* 2 .+ 1)).(orbital_types) * (1 + spinful)
59 | atom_num_orbital = (x->sum(x .* 2 .+ 1)).(orbital_types)
60 |
61 | fid = h5open(joinpath(parsed_args["input_dir"], "block_without_restoration", "block_without_restoration.h5"), "r")
62 | num_model = read(fid["num_model"])
63 | T_pytorch = eltype(fid["output_0"])
64 | if spinful
65 | T_Hamiltonian = Complex{T_pytorch}
66 | else
67 | T_Hamiltonian = T_pytorch
68 | end
69 | hoppings_pred = Dict{Array{Int64,1}, Array{T_Hamiltonian, 2}}()
70 | println("Found $num_model models, spinful:$spinful")
71 | edge_attr = read(fid["edge_attr"])
72 | edge_index = read(fid["edge_index"])
73 | for index_model in 0:(num_model-1)
74 | output = read(fid["output_$index_model"])
75 | orbital = JSON.parsefile(joinpath(parsed_args["input_dir"], "block_without_restoration", "orbital_$index_model.json"))
76 | orbital = convert(Vector{Dict{String, Vector{Int}}}, orbital)
77 | for index in 1:size(edge_index, 1)
78 | R = Int.(round.(inv_lattice * edge_attr[5:7, index] - inv_lattice * edge_attr[8:10, index]))
79 | i = edge_index[index, 1] + 1
80 | j = edge_index[index, 2] + 1
81 | key_term = cat(R, i, j, dims=1)
82 | for (index_orbital, orbital_dict) in enumerate(orbital)
83 | atomic_number_pair = "$(numbers[i]) $(numbers[j])"
84 | if !(atomic_number_pair ∈ keys(orbital_dict))
85 | continue
86 | end
87 | orbital_i, orbital_j = orbital_dict[atomic_number_pair]
88 | orbital_i += 1
89 | orbital_j += 1
90 |
91 | if !(key_term ∈ keys(hoppings_pred))
92 | if spinful
93 | hoppings_pred[key_term] = fill(NaN + NaN * im, 2 * atom_num_orbital[i], 2 * atom_num_orbital[j])
94 | else
95 | hoppings_pred[key_term] = fill(NaN, atom_num_orbital[i], atom_num_orbital[j])
96 | end
97 | end
98 | if spinful
99 | hoppings_pred[key_term][orbital_i, orbital_j] = output[index_orbital * 8 - 7, index] + output[index_orbital * 8 - 6, index] * im
100 | hoppings_pred[key_term][atom_num_orbital[i] + orbital_i, atom_num_orbital[j] + orbital_j] = output[index_orbital * 8 - 5, index] + output[index_orbital * 8 - 4, index] * im
101 | hoppings_pred[key_term][orbital_i, atom_num_orbital[j] + orbital_j] = output[index_orbital * 8 - 3, index] + output[index_orbital * 8 - 2, index] * im
102 | hoppings_pred[key_term][atom_num_orbital[i] + orbital_i, orbital_j] = output[index_orbital * 8 - 1, index] + output[index_orbital * 8, index] * im
103 | else
104 | hoppings_pred[key_term][orbital_i, orbital_j] = output[index_orbital, index]
105 | end
106 | end
107 | end
108 | end
109 | close(fid)
110 |
111 | h5open(joinpath(parsed_args["output_dir"], "rh_pred.h5"), "w") do fid
112 | for (key, rh_pred) in hoppings_pred
113 | write(fid, string(key), permutedims(rh_pred))
114 | end
115 | end
116 |
--------------------------------------------------------------------------------
/deeph/preprocess/__init__.py:
--------------------------------------------------------------------------------
1 | from .openmx_parse import OijLoad, GetEEiEij, openmx_parse_overlap
2 | from .get_rc import get_rc
3 | from .abacus_get_data import abacus_parse
4 | from .siesta_get_data import siesta_parse
5 |
--------------------------------------------------------------------------------
/deeph/preprocess/get_rc.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 |
4 | import h5py
5 | import numpy as np
6 | import torch
7 |
8 |
9 | class Neighbours:
10 | def __init__(self):
11 | self.Rs = []
12 | self.dists = []
13 | self.eijs = []
14 | self.indices = []
15 |
16 | def __str__(self):
17 | return 'Rs: {}\ndists: {}\neijs: {}\nindices: {}'.format(
18 | self.Rs, self.dists, self.indices, self.eijs)
19 |
20 |
21 | def _get_local_coordinate(eij, neighbours_i, gen_rc_idx=False, atom_j=None, atom_j_R=None, r2_rand=False):
22 | if gen_rc_idx:
23 | rc_idx = np.full(8, np.nan, dtype=np.int32)
24 | assert r2_rand is False
25 | assert atom_j is not None, 'atom_j must be specified when gen_rc_idx is True'
26 | assert atom_j_R is not None, 'atom_j_R must be specified when gen_rc_idx is True'
27 | else:
28 | rc_idx = None
29 | if r2_rand:
30 | r2_list = []
31 |
32 | if not np.allclose(eij.detach(), torch.zeros_like(eij)):
33 | r1 = eij
34 | if gen_rc_idx:
35 | rc_idx[0] = atom_j
36 | rc_idx[1:4] = atom_j_R
37 | else:
38 | r1 = neighbours_i.eijs[1]
39 | if gen_rc_idx:
40 | rc_idx[0] = neighbours_i.indices[1]
41 | rc_idx[1:4] = neighbours_i.Rs[1]
42 | r2_flag = None
43 | for r2, r2_index, r2_R in zip(neighbours_i.eijs[1:], neighbours_i.indices[1:], neighbours_i.Rs[1:]):
44 | if torch.norm(torch.cross(r1, r2)) > 1e-6:
45 | if gen_rc_idx:
46 | rc_idx[4] = r2_index
47 | rc_idx[5:8] = r2_R
48 | r2_flag = True
49 | if r2_rand:
50 | if (len(r2_list) == 0) or (torch.norm(r2_list[0]) + 0.5 > torch.norm(r2)):
51 | r2_list.append(r2)
52 | else:
53 | break
54 | else:
55 | break
56 | assert r2_flag is not None, "There is no linear independent chemical bond in the Rcut range, this may be caused by a too small Rcut or the structure is 1D"
57 | if r2_rand:
58 | # print(f"r2 is randomly chosen from {len(r2_list)} candidates")
59 | r2 = r2_list[np.random.randint(len(r2_list))]
60 | local_coordinate_1 = r1 / torch.norm(r1)
61 | local_coordinate_2 = torch.cross(r1, r2) / torch.norm(torch.cross(r1, r2))
62 | local_coordinate_3 = torch.cross(local_coordinate_1, local_coordinate_2)
63 | return torch.stack([local_coordinate_1, local_coordinate_2, local_coordinate_3], dim=-1), rc_idx
64 |
65 |
66 | def get_rc(input_dir, output_dir, radius, r2_rand=False, gen_rc_idx=False, gen_rc_by_idx="", create_from_DFT=True, neighbour_file='overlaps.h5', if_require_grad=False, cart_coords=None):
67 | if not if_require_grad:
68 | assert os.path.exists(os.path.join(input_dir, 'site_positions.dat')), 'No site_positions.dat found in {}'.format(input_dir)
69 | cart_coords = torch.tensor(np.loadtxt(os.path.join(input_dir, 'site_positions.dat')).T)
70 | else:
71 | assert cart_coords is not None, 'cart_coords must be provided if "if_require_grad" is True'
72 | assert os.path.exists(os.path.join(input_dir, 'lat.dat')), 'No lat.dat found in {}'.format(input_dir)
73 | lattice = torch.tensor(np.loadtxt(os.path.join(input_dir, 'lat.dat')).T, dtype=cart_coords.dtype)
74 |
75 | rc_dict = {}
76 | if gen_rc_idx:
77 | assert r2_rand is False, 'r2_rand must be False when gen_rc_idx is True'
78 | assert gen_rc_by_idx == "", 'gen_rc_by_idx must be "" when gen_rc_idx is True'
79 | rc_idx_dict = {}
80 | neighbours_dict = {}
81 | if gen_rc_by_idx != "":
82 | # print(f'get local coordinate using {os.path.join(gen_rc_by_idx, "rc_idx.h5")} from: {input_dir}')
83 | assert os.path.exists(os.path.join(gen_rc_by_idx, "rc_idx.h5")), 'Atomic indices for constructing rc rc_idx.h5 is not found in {}'.format(gen_rc_by_idx)
84 | fid_rc_idx = h5py.File(os.path.join(gen_rc_by_idx, "rc_idx.h5"), 'r')
85 | for key_str, rc_idx in fid_rc_idx.items():
86 | key = json.loads(key_str)
87 | # R = torch.tensor([key[0], key[1], key[2]])
88 | atom_i = key[3] - 1
89 | cart_coords_i = cart_coords[atom_i]
90 |
91 | r1 = cart_coords[rc_idx[0]] + torch.tensor(rc_idx[1:4]).type(cart_coords.dtype) @ lattice - cart_coords_i
92 | r2 = cart_coords[rc_idx[4]] + torch.tensor(rc_idx[5:8]).type(cart_coords.dtype) @ lattice - cart_coords_i
93 | local_coordinate_1 = r1 / torch.norm(r1)
94 | local_coordinate_2 = torch.cross(r1, r2) / torch.norm(torch.cross(r1, r2))
95 | local_coordinate_3 = torch.cross(local_coordinate_1, local_coordinate_2)
96 |
97 | rc_dict[key_str] = torch.stack([local_coordinate_1, local_coordinate_2, local_coordinate_3], dim=-1)
98 | fid_rc_idx.close()
99 | else:
100 | # print("get local coordinate from:", input_dir)
101 | if create_from_DFT:
102 | assert os.path.exists(os.path.join(input_dir, neighbour_file)), 'No {} found in {}'.format(neighbour_file, input_dir)
103 | fid_OLP = h5py.File(os.path.join(input_dir, neighbour_file), 'r')
104 | for key_str in fid_OLP.keys():
105 | key = json.loads(key_str)
106 | R = torch.tensor([key[0], key[1], key[2]])
107 | atom_i = key[3] - 1
108 | atom_j = key[4] - 1
109 | cart_coords_i = cart_coords[atom_i]
110 | cart_coords_j = cart_coords[atom_j] + R.type(cart_coords.dtype) @ lattice
111 | eij = cart_coords_j - cart_coords_i
112 | dist = torch.norm(eij)
113 | if radius > 0 and dist > radius:
114 | continue
115 | if atom_i not in neighbours_dict:
116 | neighbours_dict[atom_i] = Neighbours()
117 | neighbours_dict[atom_i].Rs.append(R)
118 | neighbours_dict[atom_i].dists.append(dist)
119 | neighbours_dict[atom_i].eijs.append(eij)
120 | neighbours_dict[atom_i].indices.append(atom_j)
121 |
122 | for atom_i, neighbours_i in neighbours_dict.items():
123 | neighbours_i.Rs = torch.stack(neighbours_i.Rs)
124 | neighbours_i.dists = torch.tensor(neighbours_i.dists, dtype=cart_coords.dtype)
125 | neighbours_i.eijs = torch.stack(neighbours_i.eijs)
126 | neighbours_i.indices = torch.tensor(neighbours_i.indices)
127 |
128 | neighbours_i.dists, sorted_index = torch.sort(neighbours_i.dists)
129 | neighbours_i.Rs = neighbours_i.Rs[sorted_index]
130 | neighbours_i.eijs = neighbours_i.eijs[sorted_index]
131 | neighbours_i.indices = neighbours_i.indices[sorted_index]
132 |
133 | assert np.allclose(neighbours_i.eijs[0].detach(), torch.zeros_like(neighbours_i.eijs[0])), 'eijs[0] should be zero'
134 |
135 | for R, eij, atom_j, atom_j_R in zip(neighbours_i.Rs, neighbours_i.eijs, neighbours_i.indices, neighbours_i.Rs):
136 | key_str = str(list([*R.tolist(), atom_i + 1, atom_j.item() + 1]))
137 | if gen_rc_idx:
138 | rc_dict[key_str], rc_idx_dict[key_str] = _get_local_coordinate(eij, neighbours_i, gen_rc_idx, atom_j, atom_j_R)
139 | else:
140 | rc_dict[key_str] = _get_local_coordinate(eij, neighbours_i, r2_rand=r2_rand)[0]
141 | else:
142 | raise NotImplementedError
143 |
144 | if create_from_DFT:
145 | fid_OLP.close()
146 |
147 | if if_require_grad:
148 | return rc_dict
149 | else:
150 | if os.path.exists(os.path.join(output_dir, 'rc_julia.h5')):
151 | rc_old_flag = True
152 | fid_rc_old = h5py.File(os.path.join(output_dir, 'rc_julia.h5'), 'r')
153 | else:
154 | rc_old_flag = False
155 | fid_rc = h5py.File(os.path.join(output_dir, 'rc.h5'), 'w')
156 | for k, v in rc_dict.items():
157 | if rc_old_flag:
158 | assert np.allclose(v, fid_rc_old[k][...], atol=1e-4), f"{k}, {v}, {fid_rc_old[k][...]}"
159 | fid_rc[k] = v
160 | fid_rc.close()
161 | if gen_rc_idx:
162 | fid_rc_idx = h5py.File(os.path.join(output_dir, 'rc_idx.h5'), 'w')
163 | for k, v in rc_idx_dict.items():
164 | fid_rc_idx[k] = v
165 | fid_rc_idx.close()
166 |
--------------------------------------------------------------------------------
/deeph/preprocess/preprocess_default.ini:
--------------------------------------------------------------------------------
1 | [basic]
2 | raw_dir = /your/own/path
3 | processed_dir = /your/own/path
4 | target = hamiltonian
5 | interface = openmx
6 | multiprocessing = 0
7 | local_coordinate = True
8 | get_S = False
9 |
10 | [interpreter]
11 | julia_interpreter = julia
12 |
13 | [graph]
14 | radius = -1.0
15 | create_from_DFT = True
16 | r2_rand = False
17 |
18 | [magnetic_moment]
19 | parse_magnetic_moment = False
20 | magnetic_element = ["Cr", "Mn", "Fe", "Co", "Ni"]
21 |
--------------------------------------------------------------------------------
/deeph/rotate.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os.path
3 | import warnings
4 |
5 | import numpy as np
6 | import h5py
7 | import torch
8 | from e3nn.o3 import Irrep, Irreps, matrix_to_angles
9 |
10 | from deeph import load_orbital_types
11 |
12 | dtype_dict = {
13 | np.float32: (torch.float32, torch.float32, torch.complex64),
14 | np.float64: (torch.float64, torch.float64, torch.complex128),
15 | np.complex64: (torch.complex64, torch.float32, torch.complex64),
16 | np.complex128: (torch.complex128, torch.float64, torch.complex128),
17 | torch.float32: (torch.float32, torch.float32, torch.complex64),
18 | torch.float64: (torch.float64, torch.float64, torch.complex128),
19 | torch.complex64: (torch.complex64, torch.float32, torch.complex64),
20 | torch.complex128: (torch.complex128, torch.float64, torch.complex128),
21 | }
22 |
23 |
24 | class Rotate:
25 | def __init__(self, torch_dtype, torch_dtype_real=torch.float64, torch_dtype_complex=torch.cdouble,
26 | device=torch.device('cpu'), spinful=False):
27 | self.dtype = torch_dtype
28 | self.torch_dtype_real = torch_dtype_real
29 | self.device = device
30 | self.spinful = spinful
31 | sqrt_2 = 1.4142135623730951
32 | self.Us_openmx = {
33 | 0: torch.tensor([1], dtype=torch_dtype_complex, device=device),
34 | 1: torch.tensor([[-1 / sqrt_2, 1j / sqrt_2, 0], [0, 0, 1], [1 / sqrt_2, 1j / sqrt_2, 0]],
35 | dtype=torch_dtype_complex, device=device),
36 | 2: torch.tensor([[0, 1 / sqrt_2, -1j / sqrt_2, 0, 0],
37 | [0, 0, 0, -1 / sqrt_2, 1j / sqrt_2],
38 | [1, 0, 0, 0, 0],
39 | [0, 0, 0, 1 / sqrt_2, 1j / sqrt_2],
40 | [0, 1 / sqrt_2, 1j / sqrt_2, 0, 0]], dtype=torch_dtype_complex, device=device),
41 | 3: torch.tensor([[0, 0, 0, 0, 0, -1 / sqrt_2, 1j / sqrt_2],
42 | [0, 0, 0, 1 / sqrt_2, -1j / sqrt_2, 0, 0],
43 | [0, -1 / sqrt_2, 1j / sqrt_2, 0, 0, 0, 0],
44 | [1, 0, 0, 0, 0, 0, 0],
45 | [0, 1 / sqrt_2, 1j / sqrt_2, 0, 0, 0, 0],
46 | [0, 0, 0, 1 / sqrt_2, 1j / sqrt_2, 0, 0],
47 | [0, 0, 0, 0, 0, 1 / sqrt_2, 1j / sqrt_2]], dtype=torch_dtype_complex, device=device),
48 | }
49 | self.Us_openmx2wiki = {
50 | 0: torch.eye(1, dtype=torch_dtype).to(device=device),
51 | 1: torch.eye(3, dtype=torch_dtype)[[1, 2, 0]].to(device=device),
52 | 2: torch.eye(5, dtype=torch_dtype)[[2, 4, 0, 3, 1]].to(device=device),
53 | 3: torch.eye(7, dtype=torch_dtype)[[6, 4, 2, 0, 1, 3, 5]].to(device=device)
54 | }
55 | self.Us_wiki2openmx = {k: v.T for k, v in self.Us_openmx2wiki.items()}
56 |
57 | def rotate_e3nn_v(self, v, R, l, order_xyz=True):
58 | if self.spinful:
59 | raise NotImplementedError
60 | assert len(R.shape) == 2
61 | if order_xyz:
62 | R_e3nn = self.rotate_matrix_convert(R)
63 | else:
64 | R_e3nn = R
65 | return v @ Irrep(l, 1).D_from_matrix(R_e3nn)
66 |
67 | def rotate_openmx_H_old(self, H, R, l_lefts, l_rights, order_xyz=True):
68 | assert len(R.shape) == 2
69 | if order_xyz:
70 | R_e3nn = self.rotate_matrix_convert(R)
71 | else:
72 | R_e3nn = R
73 |
74 | block_lefts = []
75 | for l_left in l_lefts:
76 | block_lefts.append(
77 | self.Us_openmx2wiki[l_left].T @ Irrep(l_left, 1).D_from_matrix(R_e3nn) @ self.Us_openmx2wiki[l_left])
78 | rotation_left = torch.block_diag(*block_lefts)
79 |
80 | block_rights = []
81 | for l_right in l_rights:
82 | block_rights.append(
83 | self.Us_openmx2wiki[l_right].T @ Irrep(l_right, 1).D_from_matrix(R_e3nn) @ self.Us_openmx2wiki[l_right])
84 | rotation_right = torch.block_diag(*block_rights)
85 |
86 | return torch.einsum("cd,ca,db->ab", H, rotation_left, rotation_right)
87 |
88 | def rotate_openmx_H(self, H, R, l_lefts, l_rights, order_xyz=True):
89 | # spin-1/2 is writed by gongxx
90 | assert len(R.shape) == 2
91 | if order_xyz:
92 | R_e3nn = self.rotate_matrix_convert(R)
93 | else:
94 | R_e3nn = R
95 | irreps_left = Irreps([(1, (l, 1)) for l in l_lefts])
96 | irreps_right = Irreps([(1, (l, 1)) for l in l_rights])
97 | U_left = irreps_left.D_from_matrix(R_e3nn)
98 | U_right = irreps_right.D_from_matrix(R_e3nn)
99 | openmx2wiki_left = torch.block_diag(*[self.Us_openmx2wiki[l] for l in l_lefts])
100 | openmx2wiki_right = torch.block_diag(*[self.Us_openmx2wiki[l] for l in l_rights])
101 | if self.spinful:
102 | U_left = torch.kron(self.D_one_half(R_e3nn), U_left)
103 | U_right = torch.kron(self.D_one_half(R_e3nn), U_right)
104 | openmx2wiki_left = torch.block_diag(openmx2wiki_left, openmx2wiki_left)
105 | openmx2wiki_right = torch.block_diag(openmx2wiki_right, openmx2wiki_right)
106 | return openmx2wiki_left.T @ U_left.transpose(-1, -2).conj() @ openmx2wiki_left @ H \
107 | @ openmx2wiki_right.T @ U_right @ openmx2wiki_right
108 |
109 | def rotate_openmx_phiVdphi(self, phiVdphi, R, l_lefts, l_rights, order_xyz=True):
110 | if self.spinful:
111 | raise NotImplementedError
112 | assert phiVdphi.shape[-1] == 3
113 | assert len(R.shape) == 2
114 | if order_xyz:
115 | R_e3nn = self.rotate_matrix_convert(R)
116 | else:
117 | R_e3nn = R
118 | block_lefts = []
119 | for l_left in l_lefts:
120 | block_lefts.append(
121 | self.Us_openmx2wiki[l_left].T @ Irrep(l_left, 1).D_from_matrix(R_e3nn) @ self.Us_openmx2wiki[l_left])
122 | rotation_left = torch.block_diag(*block_lefts)
123 |
124 | block_rights = []
125 | for l_right in l_rights:
126 | block_rights.append(
127 | self.Us_openmx2wiki[l_right].T @ Irrep(l_right, 1).D_from_matrix(R_e3nn) @ self.Us_openmx2wiki[l_right])
128 | rotation_right = torch.block_diag(*block_rights)
129 |
130 | rotation_x = self.Us_openmx2wiki[1].T @ Irrep(1, 1).D_from_matrix(R_e3nn) @ self.Us_openmx2wiki[1]
131 |
132 | return torch.einsum("def,da,eb,fc->abc", phiVdphi, rotation_left, rotation_right, rotation_x)
133 |
134 | def wiki2openmx_H(self, H, l_left, l_right):
135 | if self.spinful:
136 | raise NotImplementedError
137 | return self.Us_openmx2wiki[l_left].T @ H @ self.Us_openmx2wiki[l_right]
138 |
139 | def openmx2wiki_H(self, H, l_left, l_right):
140 | if self.spinful:
141 | raise NotImplementedError
142 | return self.Us_openmx2wiki[l_left] @ H @ self.Us_openmx2wiki[l_right].T
143 |
144 | def rotate_matrix_convert(self, R):
145 | return R.index_select(0, R.new_tensor([1, 2, 0]).int()).index_select(1, R.new_tensor([1, 2, 0]).int())
146 |
147 | def D_one_half(self, R):
148 | # writed by gongxx
149 | assert self.spinful
150 | d = torch.det(R).sign()
151 | R = d[..., None, None] * R
152 | k = (1 - d) / 2 # parity index
153 | alpha, beta, gamma = matrix_to_angles(R)
154 | J = torch.tensor([[1, 1], [1j, -1j]], dtype=self.dtype) / 1.4142135623730951 # <1/2 mz|1/2 my>
155 | Uz1 = self._sp_z_rot(alpha)
156 | Uy = J @ self._sp_z_rot(beta) @ J.T.conj()
157 | Uz2 = self._sp_z_rot(gamma)
158 | return Uz1 @ Uy @ Uz2
159 |
160 | def _sp_z_rot(self, angle):
161 | # writed by gongxx
162 | assert self.spinful
163 | M = torch.zeros([*angle.shape, 2, 2], dtype=self.dtype)
164 | inds = torch.tensor([0, 1])
165 | freqs = torch.tensor([0.5, -0.5], dtype=self.dtype)
166 | M[..., inds, inds] = torch.exp(- freqs * (1j) * angle[..., None])
167 | return M
168 |
169 |
170 | def get_rh(input_dir, output_dir, target='hamiltonian'):
171 | torch_device = torch.device('cpu')
172 | assert target in ['hamiltonian', 'phiVdphi']
173 | file_name = {
174 | 'hamiltonian': 'hamiltonians.h5',
175 | 'phiVdphi': 'phiVdphi.h5',
176 | }[target]
177 | prime_file_name = {
178 | 'hamiltonian': 'rh.h5',
179 | 'phiVdphi': 'rphiVdphi.h5',
180 | }[target]
181 | assert os.path.exists(os.path.join(input_dir, file_name))
182 | assert os.path.exists(os.path.join(input_dir, 'rc.h5'))
183 | assert os.path.exists(os.path.join(input_dir, 'orbital_types.dat'))
184 | assert os.path.exists(os.path.join(input_dir, 'info.json'))
185 |
186 | atom_num_orbital, orbital_types = load_orbital_types(os.path.join(input_dir, 'orbital_types.dat'),
187 | return_orbital_types=True)
188 | nsite = len(atom_num_orbital)
189 | with open(os.path.join(input_dir, 'info.json'), 'r') as info_f:
190 | info_dict = json.load(info_f)
191 | spinful = info_dict["isspinful"]
192 | fid_H = h5py.File(os.path.join(input_dir, file_name), 'r')
193 | fid_rc = h5py.File(os.path.join(input_dir, 'rc.h5'), 'r')
194 | fid_rh = h5py.File(os.path.join(output_dir, prime_file_name), 'w')
195 | assert '[0, 0, 0, 1, 1]' in fid_H.keys()
196 | h5_dtype = fid_H['[0, 0, 0, 1, 1]'].dtype
197 | torch_dtype, torch_dtype_real, torch_dtype_complex = dtype_dict[h5_dtype.type]
198 | rotate_kernel = Rotate(torch_dtype, torch_dtype_real=torch_dtype_real, torch_dtype_complex=torch_dtype_complex,
199 | device=torch_device, spinful=spinful)
200 |
201 | for key_str, hamiltonian in fid_H.items():
202 | if key_str not in fid_rc:
203 | warnings.warn(f'Hamiltonian matrix block ({key_str}) do not have local coordinate')
204 | continue
205 | rotation_matrix = torch.tensor(fid_rc[key_str], dtype=torch_dtype_real, device=torch_device)
206 | key = json.loads(key_str)
207 | atom_i = key[3] - 1
208 | atom_j = key[4] - 1
209 | assert atom_i >= 0
210 | assert atom_i < nsite
211 | assert atom_j >= 0
212 | assert atom_j < nsite
213 | if target == 'hamiltonian':
214 | rotated_hamiltonian = rotate_kernel.rotate_openmx_H(torch.tensor(hamiltonian), rotation_matrix,
215 | orbital_types[atom_i], orbital_types[atom_j])
216 | elif target == 'phiVdphi':
217 | rotated_hamiltonian = rotate_kernel.rotate_openmx_phiVdphi(torch.tensor(hamiltonian), rotation_matrix,
218 | orbital_types[atom_i], orbital_types[atom_j])
219 | fid_rh[key_str] = rotated_hamiltonian.numpy()
220 |
221 | fid_H.close()
222 | fid_rc.close()
223 | fid_rh.close()
224 |
225 |
226 | def rotate_back(input_dir, output_dir, target='hamiltonian'):
227 | torch_device = torch.device('cpu')
228 | assert target in ['hamiltonian', 'phiVdphi']
229 | file_name = {
230 | 'hamiltonian': 'hamiltonians_pred.h5',
231 | 'phiVdphi': 'phiVdphi_pred.h5',
232 | }[target]
233 | prime_file_name = {
234 | 'hamiltonian': 'rh_pred.h5',
235 | 'phiVdphi': 'rphiVdphi_pred.h5',
236 | }[target]
237 | assert os.path.exists(os.path.join(input_dir, prime_file_name))
238 | assert os.path.exists(os.path.join(input_dir, 'rc.h5'))
239 | assert os.path.exists(os.path.join(input_dir, 'orbital_types.dat'))
240 | assert os.path.exists(os.path.join(input_dir, 'info.json'))
241 |
242 | atom_num_orbital, orbital_types = load_orbital_types(os.path.join(input_dir, 'orbital_types.dat'),
243 | return_orbital_types=True)
244 | nsite = len(atom_num_orbital)
245 | with open(os.path.join(input_dir, 'info.json'), 'r') as info_f:
246 | info_dict = json.load(info_f)
247 | spinful = info_dict["isspinful"]
248 | fid_rc = h5py.File(os.path.join(input_dir, 'rc.h5'), 'r')
249 | fid_rh = h5py.File(os.path.join(input_dir, prime_file_name), 'r')
250 | fid_H = h5py.File(os.path.join(output_dir, file_name), 'w')
251 | assert '[0, 0, 0, 1, 1]' in fid_rh.keys()
252 | h5_dtype = fid_rh['[0, 0, 0, 1, 1]'].dtype
253 | torch_dtype, torch_dtype_real, torch_dtype_complex = dtype_dict[h5_dtype.type]
254 | rotate_kernel = Rotate(torch_dtype, torch_dtype_real=torch_dtype_real, torch_dtype_complex=torch_dtype_complex,
255 | device=torch_device, spinful=spinful)
256 |
257 | for key_str, rotated_hamiltonian in fid_rh.items():
258 | assert key_str in fid_rc
259 | rotation_matrix = torch.tensor(fid_rc[key_str], dtype=torch_dtype_real, device=torch_device).T
260 | key = json.loads(key_str)
261 | atom_i = key[3] - 1
262 | atom_j = key[4] - 1
263 | assert atom_i >= 0
264 | assert atom_i < nsite
265 | assert atom_j >= 0
266 | assert atom_j < nsite
267 | if target == 'hamiltonian':
268 | hamiltonian = rotate_kernel.rotate_openmx_H(torch.tensor(rotated_hamiltonian), rotation_matrix,
269 | orbital_types[atom_i], orbital_types[atom_j])
270 | elif target == 'phiVdphi':
271 | hamiltonian = rotate_kernel.rotate_openmx_phiVdphi(torch.tensor(rotated_hamiltonian), rotation_matrix,
272 | orbital_types[atom_i], orbital_types[atom_j])
273 | fid_H[key_str] = hamiltonian.numpy()
274 |
275 | fid_H.close()
276 | fid_rc.close()
277 | fid_rh.close()
278 |
--------------------------------------------------------------------------------
/deeph/scripts/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/deepmodeling/DeepH-pack/8720d94ab11a1c7f42c633cb6b59f7e8eb3fa5c7/deeph/scripts/__init__.py
--------------------------------------------------------------------------------
/deeph/scripts/evaluate.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import os
3 | import argparse
4 | import time
5 | import warnings
6 | from configparser import ConfigParser
7 |
8 | import numpy as np
9 | import torch
10 | from pymatgen.core.structure import Structure
11 |
12 | from deeph import get_graph, DeepHKernel, collate_fn
13 |
14 |
15 | def main():
16 | parser = argparse.ArgumentParser(description='Predict Hamiltonian')
17 | parser.add_argument('--trained_model_dir', type=str,
18 | help='path of trained model')
19 | parser.add_argument('--input_dir', type=str,
20 | help='')
21 | parser.add_argument('--output_dir', type=str,
22 | help='')
23 | parser.add_argument('--disable_cuda', action='store_true', help='Disable CUDA')
24 | parser.add_argument('--save_csv', action='store_true', help='Save the result for each edge in csv format')
25 | parser.add_argument(
26 | '--interface',
27 | type=str,
28 | default='h5',
29 | choices=['h5', 'npz'])
30 | parser.add_argument('--huge_structure', type=bool, default=False, help='')
31 | args = parser.parse_args()
32 |
33 | old_version = False
34 | assert os.path.exists(os.path.join(args.trained_model_dir, 'config.ini'))
35 | if os.path.exists(os.path.join(args.trained_model_dir, 'best_model.pt')) is False:
36 | old_version = True
37 | assert os.path.exists(os.path.join(args.trained_model_dir, 'best_model.pkl'))
38 | assert os.path.exists(os.path.join(args.trained_model_dir, 'src'))
39 |
40 | os.makedirs(args.output_dir, exist_ok=True)
41 |
42 | config = ConfigParser()
43 | config.read(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'default.ini'))
44 | config.read(os.path.join(args.trained_model_dir, 'config.ini'))
45 | config.set('basic', 'save_dir', os.path.join(args.output_dir))
46 | config.set('basic', 'disable_cuda', str(args.disable_cuda))
47 | config.set('basic', 'save_to_time_folder', 'False')
48 | config.set('basic', 'tb_writer', 'False')
49 | config.set('train', 'pretrained', '')
50 | config.set('train', 'resume', '')
51 | kernel = DeepHKernel(config)
52 | if old_version is False:
53 | checkpoint = kernel.build_model(args.trained_model_dir, old_version)
54 | else:
55 | warnings.warn('You are using the trained model with an old version')
56 | checkpoint = torch.load(
57 | os.path.join(args.trained_model_dir, 'best_model.pkl'),
58 | map_location=kernel.device
59 | )
60 | for key in ['index_to_Z', 'Z_to_index', 'spinful']:
61 | if key in checkpoint:
62 | setattr(kernel, key, checkpoint[key])
63 | if hasattr(kernel, 'index_to_Z') is False:
64 | kernel.index_to_Z = torch.arange(config.getint('basic', 'max_element') + 1)
65 | if hasattr(kernel, 'Z_to_index') is False:
66 | kernel.Z_to_index = torch.arange(config.getint('basic', 'max_element') + 1)
67 | if hasattr(kernel, 'spinful') is False:
68 | kernel.spinful = False
69 | kernel.num_species = len(kernel.index_to_Z)
70 | print("=> load best checkpoint (epoch {})".format(checkpoint['epoch']))
71 | print(f"=> Atomic types: {kernel.index_to_Z.tolist()}, "
72 | f"spinful: {kernel.spinful}, the number of atomic types: {len(kernel.index_to_Z)}.")
73 | kernel.build_model(args.trained_model_dir, old_version)
74 | kernel.model.load_state_dict(checkpoint['state_dict'])
75 |
76 | with torch.no_grad():
77 | input_dir = args.input_dir
78 | structure = Structure(np.loadtxt(os.path.join(args.input_dir, 'lat.dat')).T,
79 | np.loadtxt(os.path.join(args.input_dir, 'element.dat')),
80 | np.loadtxt(os.path.join(args.input_dir, 'site_positions.dat')).T,
81 | coords_are_cartesian=True,
82 | to_unit_cell=False)
83 | cart_coords = torch.tensor(structure.cart_coords, dtype=torch.get_default_dtype())
84 | frac_coords = torch.tensor(structure.frac_coords, dtype=torch.get_default_dtype())
85 | numbers = kernel.Z_to_index[torch.tensor(structure.atomic_numbers)]
86 | structure.lattice.matrix.setflags(write=True)
87 | lattice = torch.tensor(structure.lattice.matrix, dtype=torch.get_default_dtype())
88 | inv_lattice = torch.inverse(lattice)
89 |
90 | if os.path.exists(os.path.join(input_dir, 'graph.pkl')):
91 | data = torch.load(os.path.join(input_dir, 'graph.pkl'))
92 | print(f"Load processed graph from {os.path.join(input_dir, 'graph.pkl')}")
93 | else:
94 | begin = time.time()
95 | data = get_graph(cart_coords, frac_coords, numbers, 0,
96 | r=kernel.config.getfloat('graph', 'radius'),
97 | max_num_nbr=kernel.config.getint('graph', 'max_num_nbr'),
98 | numerical_tol=1e-8, lattice=lattice, default_dtype_torch=torch.get_default_dtype(),
99 | tb_folder=args.input_dir, interface=args.interface,
100 | num_l=kernel.config.getint('network', 'num_l'),
101 | create_from_DFT=kernel.config.getboolean('graph', 'create_from_DFT', fallback=True),
102 | if_lcmp_graph=kernel.config.getboolean('graph', 'if_lcmp_graph', fallback=True),
103 | separate_onsite=kernel.separate_onsite,
104 | target=kernel.config.get('basic', 'target'), huge_structure=args.huge_structure)
105 | torch.save(data, os.path.join(input_dir, 'graph.pkl'))
106 | print(f"Save processed graph to {os.path.join(input_dir, 'graph.pkl')}, cost {time.time() - begin} seconds")
107 |
108 | dataset_mask = kernel.make_mask([data])
109 | batch, subgraph = collate_fn(dataset_mask)
110 | sub_atom_idx, sub_edge_idx, sub_edge_ang, sub_index = subgraph
111 |
112 | output = kernel.model(batch.x.to(kernel.device), batch.edge_index.to(kernel.device),
113 | batch.edge_attr.to(kernel.device),
114 | batch.batch.to(kernel.device),
115 | sub_atom_idx.to(kernel.device), sub_edge_idx.to(kernel.device),
116 | sub_edge_ang.to(kernel.device), sub_index.to(kernel.device),
117 | huge_structure=args.huge_structure)
118 |
119 | label = batch.label
120 | mask = batch.mask
121 | output = output.cpu().reshape(label.shape)
122 |
123 | assert label.shape == output.shape == mask.shape
124 | mse = torch.pow(label - output, 2)
125 | mae = torch.abs(label - output)
126 |
127 | print()
128 | for index_orb, orbital_single in enumerate(kernel.orbital):
129 | if index_orb != 0:
130 | print('================================================================')
131 | print('orbital:', orbital_single)
132 | if kernel.spinful == False:
133 | print(f'mse: {torch.masked_select(mse[:, index_orb], mask[:, index_orb]).mean().item()}, '
134 | f'mae: {torch.masked_select(mae[:, index_orb], mask[:, index_orb]).mean().item()}')
135 | else:
136 | for index_soc, str_soc in enumerate([
137 | 'left_up_real', 'left_up_imag', 'right_down_real', 'right_down_imag',
138 | 'right_up_real', 'right_up_imag', 'left_down_real', 'left_down_imag',
139 | ]):
140 | if index_soc != 0:
141 | print('----------------------------------------------------------------')
142 | print(str_soc, ':')
143 | index_out = index_orb * 8 + index_soc
144 | print(f'mse: {torch.masked_select(mse[:, index_out], mask[:, index_out]).mean().item()}, '
145 | f'mae: {torch.masked_select(mae[:, index_out], mask[:, index_out]).mean().item()}')
146 |
147 | if args.save_csv:
148 | edge_stru_index = torch.squeeze(batch.batch[batch.edge_index[0]]).numpy()
149 | edge_slices = torch.tensor(batch.__slices__['x'])[edge_stru_index].view(-1, 1)
150 | atom_ids = torch.squeeze(batch.edge_index.T - edge_slices).tolist()
151 | atomic_numbers = torch.squeeze(kernel.index_to_Z[batch.x[batch.edge_index.T]]).tolist()
152 | edge_infos = torch.squeeze(batch.edge_attr[:, :7].detach().cpu()).tolist()
153 |
154 | with open(os.path.join(kernel.config.get('basic', 'save_dir'), 'error_distance.csv'), 'w', newline='') as f:
155 | writer = csv.writer(f)
156 | writer.writerow(['index', 'atom_id', 'atomic_number', 'dist', 'atom1_x', 'atom1_y', 'atom1_z',
157 | 'atom2_x', 'atom2_y', 'atom2_z']
158 | + ['target'] * kernel.out_fea_len + ['pred'] * kernel.out_fea_len + [
159 | 'mask'] * kernel.out_fea_len)
160 | for index_edge in range(batch.edge_attr.shape[0]):
161 | writer.writerow([
162 | index_edge,
163 | atom_ids[index_edge],
164 | atomic_numbers[index_edge],
165 | *(edge_infos[index_edge]),
166 | *(label[index_edge].tolist()),
167 | *(output[index_edge].tolist()),
168 | *(mask[index_edge].tolist()),
169 | ])
170 |
171 |
172 | if __name__ == '__main__':
173 | main()
174 |
--------------------------------------------------------------------------------
/deeph/scripts/inference.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import subprocess as sp
4 | import json
5 |
6 | import argparse
7 |
8 | from deeph import get_inference_config, rotate_back, abacus_parse
9 | from deeph.preprocess import openmx_parse_overlap, get_rc
10 | from deeph.inference import predict, predict_with_grad
11 |
12 |
13 | def main():
14 | parser = argparse.ArgumentParser(description='Deep Hamiltonian')
15 | parser.add_argument('--config', default=[], nargs='+', type=str, metavar='N')
16 | args = parser.parse_args()
17 |
18 | print(f'User config name: {args.config}')
19 | config = get_inference_config(args.config)
20 |
21 | work_dir = os.path.abspath(config.get('basic', 'work_dir'))
22 | OLP_dir = os.path.abspath(config.get('basic', 'OLP_dir'))
23 | interface = config.get('basic', 'interface')
24 | abacus_suffix = str(config.get('basic', 'abacus_suffix', fallback='ABACUS'))
25 | task = json.loads(config.get('basic', 'task'))
26 | assert isinstance(task, list)
27 | eigen_solver = config.get('basic', 'eigen_solver')
28 | disable_cuda = config.getboolean('basic', 'disable_cuda')
29 | device = config.get('basic', 'device')
30 | huge_structure = config.getboolean('basic', 'huge_structure')
31 | restore_blocks_py = config.getboolean('basic', 'restore_blocks_py')
32 | gen_rc_idx = config.getboolean('basic', 'gen_rc_idx')
33 | gen_rc_by_idx = config.get('basic', 'gen_rc_by_idx')
34 | with_grad = config.getboolean('basic', 'with_grad')
35 | julia_interpreter = config.get('interpreter', 'julia_interpreter', fallback='')
36 | python_interpreter = config.get('interpreter', 'python_interpreter', fallback='')
37 | radius = config.getfloat('graph', 'radius')
38 |
39 | if 5 in task:
40 | if eigen_solver in ['sparse_jl', 'dense_jl']:
41 | assert julia_interpreter, "Please specify julia_interpreter to use Julia code to calculate eigenpairs"
42 | elif eigen_solver in ['dense_py']:
43 | assert python_interpreter, "Please specify python_interpreter to use Python code to calculate eigenpairs"
44 | else:
45 | raise ValueError(f"Unknown eigen_solver: {eigen_solver}")
46 | if 3 in task and not restore_blocks_py:
47 | assert julia_interpreter, "Please specify julia_interpreter to use Julia code to rearrange matrix blocks"
48 |
49 | if with_grad:
50 | assert restore_blocks_py is True
51 | assert 4 not in task
52 | assert 5 not in task
53 |
54 | os.makedirs(work_dir, exist_ok=True)
55 | config.write(open(os.path.join(work_dir, 'config.ini'), "w"))
56 |
57 |
58 | if not restore_blocks_py:
59 | cmd3_post = f"{julia_interpreter} " \
60 | f"{os.path.join(os.path.dirname(os.path.dirname(__file__)), 'inference', 'restore_blocks.jl')} " \
61 | f"--input_dir {work_dir} --output_dir {work_dir}"
62 |
63 | if eigen_solver == 'sparse_jl':
64 | cmd5 = f"{julia_interpreter} " \
65 | f"{os.path.join(os.path.dirname(os.path.dirname(__file__)), 'inference', 'sparse_calc.jl')} " \
66 | f"--input_dir {work_dir} --output_dir {work_dir} --config {config.get('basic', 'sparse_calc_config')}"
67 | elif eigen_solver == 'dense_jl':
68 | cmd5 = f"{julia_interpreter} " \
69 | f"{os.path.join(os.path.dirname(os.path.dirname(__file__)), 'inference', 'dense_calc.jl')} " \
70 | f"--input_dir {work_dir} --output_dir {work_dir} --config {config.get('basic', 'sparse_calc_config')}"
71 | elif eigen_solver == 'dense_py':
72 | cmd5 = f"{python_interpreter} " \
73 | f"{os.path.join(os.path.dirname(os.path.dirname(__file__)), 'inference', 'dense_calc.py')} " \
74 | f"--input_dir {work_dir} --output_dir {work_dir} --config {config.get('basic', 'sparse_calc_config')}"
75 | else:
76 | raise ValueError(f"Unknown eigen_solver: {eigen_solver}")
77 |
78 | print(f"\n~~~~~~~ 1.parse_Overlap\n")
79 | print(f"\n~~~~~~~ 2.get_local_coordinate\n")
80 | print(f"\n~~~~~~~ 3.get_pred_Hamiltonian\n")
81 | if not restore_blocks_py:
82 | print(f"\n~~~~~~~ 3_post.restore_blocks, command: \n{cmd3_post}\n")
83 | print(f"\n~~~~~~~ 4.rotate_back\n")
84 | print(f"\n~~~~~~~ 5.sparse_calc, command: \n{cmd5}\n")
85 |
86 | if 1 in task:
87 | begin = time.time()
88 | print(f"\n####### Begin 1.parse_Overlap")
89 | if interface == 'openmx':
90 | assert os.path.exists(os.path.join(OLP_dir, 'openmx.out')), "Necessary files could not be found in OLP_dir"
91 | assert os.path.exists(os.path.join(OLP_dir, 'output')), "Necessary files could not be found in OLP_dir"
92 | openmx_parse_overlap(OLP_dir, work_dir)
93 | elif interface == 'abacus':
94 | print("Output subdirectories:", "OUT." + abacus_suffix)
95 | assert os.path.exists(os.path.join(OLP_dir, 'SR.csr')), "Necessary files could not be found in OLP_dir"
96 | assert os.path.exists(os.path.join(OLP_dir, f'OUT.{abacus_suffix}')), "Necessary files could not be found in OLP_dir"
97 | abacus_parse(OLP_dir, work_dir, data_name=f'OUT.{abacus_suffix}', only_S=True)
98 | assert os.path.exists(os.path.join(work_dir, "overlaps.h5"))
99 | assert os.path.exists(os.path.join(work_dir, "lat.dat"))
100 | assert os.path.exists(os.path.join(work_dir, "rlat.dat"))
101 | assert os.path.exists(os.path.join(work_dir, "site_positions.dat"))
102 | assert os.path.exists(os.path.join(work_dir, "orbital_types.dat"))
103 | assert os.path.exists(os.path.join(work_dir, "element.dat"))
104 | print('\n******* Finish 1.parse_Overlap, cost %d seconds\n' % (time.time() - begin))
105 |
106 | if not with_grad and 2 in task:
107 | begin = time.time()
108 | print(f"\n####### Begin 2.get_local_coordinate")
109 | get_rc(work_dir, work_dir, radius=radius, gen_rc_idx=gen_rc_idx, gen_rc_by_idx=gen_rc_by_idx,
110 | create_from_DFT=config.getboolean('graph', 'create_from_DFT'))
111 | assert os.path.exists(os.path.join(work_dir, "rc.h5"))
112 | print('\n******* Finish 2.get_local_coordinate, cost %d seconds\n' % (time.time() - begin))
113 |
114 | if 3 in task:
115 | begin = time.time()
116 | print(f"\n####### Begin 3.get_pred_Hamiltonian")
117 | trained_model_dir = config.get('basic', 'trained_model_dir')
118 | if trained_model_dir[0] == '[' and trained_model_dir[-1] == ']':
119 | trained_model_dir = json.loads(trained_model_dir)
120 | if with_grad:
121 | predict_with_grad(input_dir=work_dir, output_dir=work_dir, disable_cuda=disable_cuda, device=device,
122 | huge_structure=huge_structure, trained_model_dirs=trained_model_dir)
123 | else:
124 | predict(input_dir=work_dir, output_dir=work_dir, disable_cuda=disable_cuda, device=device,
125 | huge_structure=huge_structure, restore_blocks_py=restore_blocks_py,
126 | trained_model_dirs=trained_model_dir)
127 | if restore_blocks_py:
128 | if with_grad:
129 | assert os.path.exists(os.path.join(work_dir, "hamiltonians_grad_pred.h5"))
130 | assert os.path.exists(os.path.join(work_dir, "hamiltonians_pred.h5"))
131 | else:
132 | assert os.path.exists(os.path.join(work_dir, "rh_pred.h5"))
133 | else:
134 | capture_output = sp.run(cmd3_post, shell=True, capture_output=False, encoding="utf-8")
135 | assert capture_output.returncode == 0
136 | assert os.path.exists(os.path.join(work_dir, "rh_pred.h5"))
137 | print('\n******* Finish 3.get_pred_Hamiltonian, cost %d seconds\n' % (time.time() - begin))
138 |
139 | if 4 in task:
140 | begin = time.time()
141 | print(f"\n####### Begin 4.rotate_back")
142 | rotate_back(input_dir=work_dir, output_dir=work_dir)
143 | assert os.path.exists(os.path.join(work_dir, "hamiltonians_pred.h5"))
144 | print('\n******* Finish 4.rotate_back, cost %d seconds\n' % (time.time() - begin))
145 |
146 | if 5 in task:
147 | begin = time.time()
148 | print(f"\n####### Begin 5.sparse_calc")
149 | capture_output = sp.run(cmd5, shell=True, capture_output=False, encoding="utf-8")
150 | assert capture_output.returncode == 0
151 | if eigen_solver in ['sparse_jl']:
152 | assert os.path.exists(os.path.join(work_dir, "sparse_matrix.jld"))
153 | print('\n******* Finish 5.sparse_calc, cost %d seconds\n' % (time.time() - begin))
154 |
155 |
156 | if __name__ == '__main__':
157 | main()
158 |
--------------------------------------------------------------------------------
/deeph/scripts/preprocess.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess as sp
3 | import time
4 |
5 | import numpy as np
6 | import argparse
7 | from pathos.multiprocessing import ProcessingPool as Pool
8 |
9 | from deeph import get_preprocess_config, get_rc, get_rh, abacus_parse, siesta_parse
10 |
11 |
12 | def collect_magmom(input_dir, output_dir, num_atom, mag_element):
13 | magmom_data = np.zeros((num_atom, 4))
14 |
15 | cmd = f'grep --text -A {num_atom + 3} "Total spin moment" {os.path.join(input_dir, "openmx.scfout")}'
16 | magmom_str = os.popen(cmd).read().splitlines()
17 | # print("Total local magnetic moment:", magmom_str[0].split()[4])
18 |
19 | for index in range(num_atom):
20 | line = magmom_str[3 + index].split()
21 | assert line[0] == str(index + 1)
22 | element_str = line[1]
23 | magmom_r = line[5]
24 | magmom_theta = line[6]
25 | magmom_phi = line[7]
26 | magmom_data[index] = int(element_str in mag_element), magmom_r, magmom_theta, magmom_phi
27 |
28 | np.savetxt(os.path.join(output_dir, "magmom.txt"), magmom_data)
29 |
30 | def main():
31 | parser = argparse.ArgumentParser(description='Deep Hamiltonian')
32 | parser.add_argument('--config', default=[], nargs='+', type=str, metavar='N')
33 | args = parser.parse_args()
34 |
35 | print(f'User config name: {args.config}')
36 | config = get_preprocess_config(args.config)
37 |
38 | raw_dir = os.path.abspath(config.get('basic', 'raw_dir'))
39 | processed_dir = os.path.abspath(config.get('basic', 'processed_dir'))
40 | abacus_suffix = str(config.get('basic', 'abacus_suffix', fallback='ABACUS'))
41 | target = config.get('basic', 'target')
42 | interface = config.get('basic', 'interface')
43 | local_coordinate = config.getboolean('basic', 'local_coordinate')
44 | multiprocessing = config.getint('basic', 'multiprocessing')
45 | get_S = config.getboolean('basic', 'get_S')
46 |
47 | julia_interpreter = config.get('interpreter', 'julia_interpreter')
48 |
49 | def make_cmd(input_dir, output_dir, target, interface, get_S):
50 | if interface == 'openmx':
51 | if target == 'hamiltonian':
52 | cmd = f"{julia_interpreter} " \
53 | f"{os.path.join(os.path.dirname(os.path.dirname(__file__)), 'preprocess', 'openmx_get_data.jl')} " \
54 | f"--input_dir {input_dir} --output_dir {output_dir} --save_overlap {str(get_S).lower()}"
55 | elif target == 'density_matrix':
56 | cmd = f"{julia_interpreter} " \
57 | f"{os.path.join(os.path.dirname(os.path.dirname(__file__)), 'preprocess', 'openmx_get_data.jl')} " \
58 | f"--input_dir {input_dir} --output_dir {output_dir} --save_overlap {str(get_S).lower()} --if_DM true"
59 | else:
60 | raise ValueError('Unknown target: {}'.format(target))
61 | elif interface == 'siesta' or interface == 'abacus':
62 | cmd = ''
63 | elif interface == 'aims':
64 | cmd = f"{julia_interpreter} " \
65 | f"{os.path.join(os.path.dirname(os.path.dirname(__file__)), 'preprocess', 'aims_get_data.jl')} " \
66 | f"--input_dir {input_dir} --output_dir {output_dir} --save_overlap {str(get_S).lower()}"
67 | else:
68 | raise ValueError('Unknown interface: {}'.format(interface))
69 | return cmd
70 |
71 | os.chdir(raw_dir)
72 | relpath_list = []
73 | abspath_list = []
74 | for root, dirs, files in os.walk('./'):
75 | if (interface == 'openmx' and 'openmx.scfout' in files) or (
76 | interface == 'abacus' and 'OUT.' + abacus_suffix in dirs) or (
77 | interface == 'siesta' and any(['.HSX' in ifile for ifile in files])) or (
78 | interface == 'aims' and 'NoTB.dat' in files):
79 | relpath_list.append(root)
80 | abspath_list.append(os.path.abspath(root))
81 |
82 | os.makedirs(processed_dir, exist_ok=True)
83 | os.chdir(processed_dir)
84 | print(f"Found {len(abspath_list)} directories to preprocess")
85 |
86 | def worker(index):
87 | time_cost = time.time() - begin_time
88 | current_block = index // nodes
89 | if current_block < 1:
90 | time_estimate = '?'
91 | else:
92 | num_blocks = (len(abspath_list) + nodes - 1) // nodes
93 | time_estimate = time.localtime(time_cost / (current_block) * (num_blocks - current_block))
94 | time_estimate = time.strftime("%H:%M:%S", time_estimate)
95 | print(f'\rPreprocessing No. {index + 1}/{len(abspath_list)} '
96 | f'[{time.strftime("%H:%M:%S", time.localtime(time_cost))}<{time_estimate}]...', end='')
97 | abspath = abspath_list[index]
98 | relpath = relpath_list[index]
99 | os.makedirs(relpath, exist_ok=True)
100 | cmd = make_cmd(
101 | abspath,
102 | os.path.abspath(relpath),
103 | target=target,
104 | interface=interface,
105 | get_S=get_S,
106 | )
107 | capture_output = sp.run(cmd, shell=True, capture_output=True, encoding="utf-8")
108 | if capture_output.returncode != 0:
109 | with open(os.path.join(os.path.abspath(relpath), 'error.log'), 'w') as f:
110 | f.write(f'[stdout of cmd "{cmd}"]:\n\n{capture_output.stdout}\n\n\n'
111 | f'[stderr of cmd "{cmd}"]:\n\n{capture_output.stderr}')
112 | print(f'\nFailed to preprocess: {abspath}, '
113 | f'log file was saved to {os.path.join(os.path.abspath(relpath), "error.log")}')
114 | return
115 |
116 | if interface == 'abacus':
117 | print("Output subdirectories:", "OUT." + abacus_suffix)
118 | abacus_parse(abspath, os.path.abspath(relpath), 'OUT.' + abacus_suffix)
119 | elif interface == 'siesta':
120 | siesta_parse(abspath, os.path.abspath(relpath))
121 | if local_coordinate:
122 | get_rc(os.path.abspath(relpath), os.path.abspath(relpath), radius=config.getfloat('graph', 'radius'),
123 | r2_rand=config.getboolean('graph', 'r2_rand'),
124 | create_from_DFT=config.getboolean('graph', 'create_from_DFT'), neighbour_file='hamiltonians.h5')
125 | get_rh(os.path.abspath(relpath), os.path.abspath(relpath), target)
126 | if config.getboolean('magnetic_moment', 'parse_magnetic_moment'):
127 | assert interface == 'openmx', 'Magnetic moment can only be parsed from OpenMX output for now'
128 | num_atom = np.loadtxt(os.path.join(os.path.abspath(relpath), 'element.dat')).shape[0]
129 | collect_magmom(
130 | abspath, os.path.abspath(relpath),
131 | num_atom, eval(config.get('magnetic_moment', 'magnetic_element'))
132 | )
133 |
134 | begin_time = time.time()
135 | if multiprocessing != 0:
136 | if multiprocessing > 0:
137 | pool_dict = {'nodes': multiprocessing}
138 | else:
139 | pool_dict = {}
140 | with Pool(**pool_dict) as pool:
141 | nodes = pool.nodes
142 | print(f'Use multiprocessing (nodes = {nodes})')
143 | pool.map(worker, range(len(abspath_list)))
144 | else:
145 | nodes = 1
146 | for index in range(len(abspath_list)):
147 | worker(index)
148 | print(f'\nPreprocess finished in {time.time() - begin_time:.2f} seconds')
149 |
150 | if __name__ == '__main__':
151 | main()
152 |
--------------------------------------------------------------------------------
/deeph/scripts/train.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | from deeph import DeepHKernel, get_config
4 |
5 |
6 | def main():
7 | parser = argparse.ArgumentParser(description='Deep Hamiltonian')
8 | parser.add_argument('--config', default=[], nargs='+', type=str, metavar='N')
9 | args = parser.parse_args()
10 |
11 | print(f'User config name: {args.config}')
12 | config = get_config(args.config)
13 | only_get_graph = config.getboolean('basic', 'only_get_graph')
14 | kernel = DeepHKernel(config)
15 | train_loader, val_loader, test_loader, transform = kernel.get_dataset(only_get_graph)
16 | if only_get_graph:
17 | return
18 | kernel.build_model()
19 | kernel.set_train()
20 | kernel.train(train_loader, val_loader, test_loader)
21 |
22 | if __name__ == '__main__':
23 | main()
24 |
--------------------------------------------------------------------------------
/deeph/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | import sys
4 | from configparser import ConfigParser
5 | from inspect import signature
6 |
7 | import numpy as np
8 | import scipy
9 | import torch
10 | from torch import nn, package
11 | import h5py
12 |
13 |
14 | def print_args(args):
15 | for k, v in args._get_kwargs():
16 | print('{} = {}'.format(k, v))
17 | print('')
18 |
19 |
20 | class Logger(object):
21 | def __init__(self, filename):
22 | self.terminal = sys.stdout
23 | self.log = open(filename, "a", buffering=1)
24 |
25 | def write(self, message):
26 | self.terminal.write(message)
27 | self.log.write(message)
28 |
29 | def flush(self):
30 | pass
31 |
32 |
33 | class MaskMSELoss(nn.Module):
34 | def __init__(self) -> None:
35 | super(MaskMSELoss, self).__init__()
36 |
37 | def forward(self, input: torch.Tensor, target: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
38 | assert input.shape == target.shape == mask.shape
39 | mse = torch.pow(input - target, 2)
40 | mse = torch.masked_select(mse, mask).mean()
41 |
42 | return mse
43 |
44 |
45 | class MaskMAELoss(nn.Module):
46 | def __init__(self) -> None:
47 | super(MaskMAELoss, self).__init__()
48 |
49 | def forward(self, input: torch.Tensor, target: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
50 | assert input.shape == target.shape == mask.shape
51 | mae = torch.abs(input - target)
52 | mae = torch.masked_select(mae, mask).mean()
53 |
54 | return mae
55 |
56 |
57 | class LossRecord:
58 | def __init__(self):
59 | self.reset()
60 |
61 | def reset(self):
62 | self.last_val = 0
63 | self.avg = 0
64 | self.sum = 0
65 | self.count = 0
66 |
67 | def update(self, val, num=1):
68 | self.last_val = val
69 | self.sum += val * num
70 | self.count += num
71 | self.avg = self.sum / self.count
72 |
73 |
74 | def if_integer(string):
75 | try:
76 | int(string)
77 | return True
78 | except ValueError:
79 | return False
80 |
81 |
82 | class Transform:
83 | def __init__(self, tensor=None, mask=None, normalizer=False, boxcox=False):
84 | self.normalizer = normalizer
85 | self.boxcox = boxcox
86 | if normalizer:
87 | raise NotImplementedError
88 | self.mean = abs(tensor).sum(dim=0) / mask.sum(dim=0)
89 | self.std = None
90 | print(f'[normalizer] mean: {self.mean}, std: {self.std}')
91 | if boxcox:
92 | raise NotImplementedError
93 | _, self.opt_lambda = scipy.stats.boxcox(tensor.double())
94 | print('[boxcox] optimal lambda value:', self.opt_lambda)
95 |
96 | def tran(self, tensor):
97 | if self.boxcox:
98 | tensor = scipy.special.boxcox(tensor, self.opt_lambda)
99 | if self.normalizer:
100 | tensor = (tensor - self.mean) / self.std
101 | return tensor
102 |
103 | def inv_tran(self, tensor):
104 | if self.normalizer:
105 | tensor = tensor * self.std + self.mean
106 | if self.boxcox:
107 | tensor = scipy.special.inv_boxcox(tensor, self.opt_lambda)
108 | return tensor
109 |
110 | def state_dict(self):
111 | result = {'normalizer': self.normalizer,
112 | 'boxcox': self.boxcox}
113 | if self.normalizer:
114 | result['mean'] = self.mean
115 | result['std'] = self.std
116 | if self.boxcox:
117 | result['opt_lambda'] = self.opt_lambda
118 | return result
119 |
120 | def load_state_dict(self, state_dict):
121 | self.normalizer = state_dict['normalizer']
122 | self.boxcox = state_dict['boxcox']
123 | if self.normalizer:
124 | self.mean = state_dict['mean']
125 | self.std = state_dict['std']
126 | print(f'Load state dict, mean: {self.mean}, std: {self.std}')
127 | if self.boxcox:
128 | self.opt_lambda = state_dict['opt_lambda']
129 | print('Load state dict, optimal lambda value:', self.opt_lambda)
130 |
131 |
132 | def save_model(state, model_dict, model_state_dict, path, is_best):
133 | model_dir = os.path.join(path, 'model.pt')
134 | package_dict = {}
135 | if 'verbose' in list(signature(package.PackageExporter.__init__).parameters.keys()):
136 | package_dict['verbose'] = False
137 | with package.PackageExporter(model_dir, **package_dict) as exp:
138 | exp.intern('deeph.**')
139 | exp.extern([
140 | 'scipy.**', 'numpy.**', 'torch_geometric.**', 'sklearn.**',
141 | 'torch_scatter.**', 'torch_sparse.**', 'torch_sparse.**', 'torch_cluster.**', 'torch_spline_conv.**',
142 | 'pyparsing', 'jinja2', 'sys', 'mkl', 'io', 'setuptools.**', 'rdkit.Chem', 'tqdm',
143 | '__future__', '_operator', '_ctypes', 'six.moves.urllib', 'ase', 'matplotlib.pyplot', 'sympy', 'networkx',
144 | ])
145 | exp.save_pickle('checkpoint', 'model.pkl', state | model_dict)
146 | torch.save(state | model_state_dict, os.path.join(path, 'state_dict.pkl'))
147 | if is_best:
148 | shutil.copyfile(os.path.join(path, 'model.pt'), os.path.join(path, 'best_model.pt'))
149 | shutil.copyfile(os.path.join(path, 'state_dict.pkl'), os.path.join(path, 'best_state_dict.pkl'))
150 |
151 |
152 | def write_ham_h5(hoppings_dict, path):
153 | fid = h5py.File(path, "w")
154 | for k, v in hoppings_dict.items():
155 | fid[k] = v
156 | fid.close()
157 |
158 |
159 | def write_ham_npz(hoppings_dict, path):
160 | np.savez(path, **hoppings_dict)
161 |
162 |
163 | def write_ham(hoppings_dict, path):
164 | os.makedirs(path, exist_ok=True)
165 | for key_term, matrix in hoppings_dict.items():
166 | np.savetxt(os.path.join(path, f'{key_term}_real.dat'), matrix)
167 |
168 |
169 | def get_config(args):
170 | config = ConfigParser()
171 | config.read(os.path.join(os.path.dirname(__file__), 'default.ini'))
172 | for config_file in args:
173 | assert os.path.exists(config_file)
174 | config.read(config_file)
175 | if config['basic']['target'] == 'O_ij':
176 | assert config['basic']['O_component'] in ['H_minimum', 'H_minimum_withNA', 'H', 'Rho']
177 | if config['basic']['target'] == 'E_ij':
178 | assert config['basic']['energy_component'] in ['xc', 'delta_ee', 'both', 'summation', 'E_ij']
179 | else:
180 | assert config['hyperparameter']['criterion'] in ['MaskMSELoss']
181 | assert config['basic']['target'] in ['hamiltonian']
182 | assert config['basic']['interface'] in ['h5', 'h5_rc_only', 'h5_Eij', 'npz', 'npz_rc_only']
183 | assert config['network']['aggr'] in ['add', 'mean', 'max']
184 | assert config['network']['distance_expansion'] in ['GaussianBasis', 'BesselBasis', 'ExpBernsteinBasis']
185 | assert config['network']['normalization'] in ['BatchNorm', 'LayerNorm', 'PairNorm', 'InstanceNorm', 'GraphNorm',
186 | 'DiffGroupNorm', 'None']
187 | assert config['network']['atom_update_net'] in ['CGConv', 'GAT', 'PAINN']
188 | assert config['hyperparameter']['optimizer'] in ['sgd', 'sgdm', 'adam', 'adamW', 'adagrad', 'RMSprop', 'lbfgs']
189 | assert config['hyperparameter']['lr_scheduler'] in ['', 'MultiStepLR', 'ReduceLROnPlateau', 'CyclicLR']
190 |
191 | return config
192 |
193 |
194 | def get_inference_config(*args):
195 | config = ConfigParser()
196 | config.read(os.path.join(os.path.dirname(__file__), 'inference', 'inference_default.ini'))
197 | for config_file in args:
198 | config.read(config_file)
199 | assert config['basic']['interface'] in ['openmx', 'abacus']
200 |
201 | return config
202 |
203 |
204 | def get_preprocess_config(*args):
205 | config = ConfigParser()
206 | config.read(os.path.join(os.path.dirname(__file__), 'preprocess', 'preprocess_default.ini'))
207 | for config_file in args:
208 | config.read(config_file)
209 | assert config['basic']['target'] in ['hamiltonian', 'density_matrix', 'phiVdphi']
210 | assert config['basic']['interface'] in ['openmx', 'abacus', 'aims', 'siesta']
211 | assert if_integer(config['basic']['multiprocessing']), "value of multiprocessing must be an integer"
212 |
213 | return config
214 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | %SPHINXBUILD% >NUL 2>NUL
14 | if errorlevel 9009 (
15 | echo.
16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
17 | echo.installed, then set the SPHINXBUILD environment variable to point
18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
19 | echo.may add the Sphinx directory to PATH.
20 | echo.
21 | echo.If you don't have Sphinx installed, grab it from
22 | echo.https://www.sphinx-doc.org/
23 | exit /b 1
24 | )
25 |
26 | if "%1" == "" goto help
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx==4.5.0
2 | sphinx_rtd_theme==1.0.0
3 | myst-parser==0.17.2
--------------------------------------------------------------------------------
/docs/source/_static/logo-white.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/deepmodeling/DeepH-pack/8720d94ab11a1c7f42c633cb6b59f7e8eb3fa5c7/docs/source/_static/logo-white.png
--------------------------------------------------------------------------------
/docs/source/_static/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/deepmodeling/DeepH-pack/8720d94ab11a1c7f42c633cb6b59f7e8eb3fa5c7/docs/source/_static/logo.png
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | # import os
14 | # import sys
15 | # sys.path.insert(0, os.path.abspath('.'))
16 |
17 |
18 | # -- Project information -----------------------------------------------------
19 |
20 | project = 'DeepH-pack'
21 | copyright = '2022, condensed matter theory group at Tsinghua University'
22 | author = 'mzjb'
23 |
24 | # The full version, including alpha/beta/rc tags
25 | release = '0.0.1'
26 |
27 |
28 | # -- General configuration ---------------------------------------------------
29 |
30 | # Add any Sphinx extension module names here, as strings. They can be
31 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
32 | # ones.
33 | extensions = [
34 | 'myst_parser',
35 | ]
36 |
37 | # Add any paths that contain templates here, relative to this directory.
38 | templates_path = ['_templates']
39 |
40 | # List of patterns, relative to source directory, that match files and
41 | # directories to ignore when looking for source files.
42 | # This pattern also affects html_static_path and html_extra_path.
43 | exclude_patterns = []
44 |
45 |
46 | # -- Options for HTML output -------------------------------------------------
47 |
48 | # The theme to use for HTML and HTML Help pages. See the documentation for
49 | # a list of builtin themes.
50 | #
51 | html_theme = 'sphinx_rtd_theme'
52 |
53 | # Add any paths that contain custom static files (such as style sheets) here,
54 | # relative to this directory. They are copied after the builtin static files,
55 | # so a file named "default.css" will overwrite the builtin "default.css".
56 | html_static_path = ['_static']
57 |
58 | html_logo = './_static/logo-white.png'
59 |
60 | source_suffix = {
61 | '.rst': 'restructuredtext',
62 | '.md': 'markdown',
63 | }
--------------------------------------------------------------------------------
/docs/source/dataset/dataset.rst:
--------------------------------------------------------------------------------
1 | Prepare the dataset
2 | ==============================
3 |
4 | To perform efficient *ab initio* electronic structure calculation by DeepH method
5 | for a class of large-scale material systems, one needs to design an appropriate
6 | dataset of small structures that have close chemical bonding environment with
7 | the target large-scale material systems. Therefore, the first step of a DeepH
8 | study is to perform the DFT calculation on the above dataset to get the DFT
9 | Hamiltonian matrices with the localized basis. DeepH-pack supports DFT
10 | results made by ABACUS, OpenMX, FHI-aims or SIESTA and will support HONPAS
11 | soon.
12 |
13 | Using ABACUS
14 | ^^^^^^^^^^^^^^^^^^^^^^^^
15 |
16 | One needs to perform the DFT calculation with ABACUS
17 | to get the Kohn-Sham Hamiltonian output file in the csr
18 | format. This output file should be placed in a separate
19 | folder for each structure in the dataset. In order to get
20 | this csr file, the input file of ABACUS should include
21 | keywords like this::
22 |
23 | out_mat_hs2 1
24 |
25 | Using OpenMX
26 | ^^^^^^^^^^^^^^^^^^^^^^^^
27 |
28 | One needs to perform the DFT calculation with OpenMX
29 | to get the Kohn-Sham Hamiltonian output file in a binary
30 | form. This binary file should be placed in a separate
31 | folder for each structure in the dataset and should be
32 | named as ``openmx.scfout``. In order to get this binary file,
33 | the input file of OpenMX should include keywords like this::
34 |
35 | System.Name openmx
36 | HS.fileout On
37 |
38 | Besides, it is required to attach the text output of
39 | ``openmx.out`` to the end of ``openmx.scfout``, which
40 | means to run::
41 |
42 | cat openmx.out >> openmx.scfout
43 |
44 | Using FHI-aims
45 | ^^^^^^^^^^^^^^^^^^^^^^^^
46 |
47 | One needs to perform the DFT calculation with modified FHI-aims
48 | to get the Kohn-Sham Hamiltonian output file in text
49 | format. This output file should be placed in a separate
50 | folder for each structure in the dataset.
51 |
52 | Using SIESTA
53 | ^^^^^^^^^^^^^^^^^^^^^^^^
54 |
55 | One needs to perform DFT calculation with SIESTA to get Hamiltonians in binary
56 | file named ``${System_name}.HSX``. To activate this feature, you should include
57 | keyword in ``${System_name}.fdf`` file::
58 |
59 | SaveHS true
60 |
61 | It is also recommended to specify a higher convergence criteria for SCF calculation.
62 | We found it sufficient to write in ``${System_name}.fdf`` file::
63 |
64 | DM.Tolerence 1.d-9
65 |
--------------------------------------------------------------------------------
/docs/source/demo/demo1.md:
--------------------------------------------------------------------------------
1 | # Demo: DeepH study on twisted bilayer bismuthene
2 | When the directory structure of the code folder is not modified, the scripts in it can be used to generate a dataset of non-twisted structures, train a DeepH model, make predictions on the DFT Hamiltonian matrix of twisted structure, and perform sparse diagonalization to compute the band structure for the example study of bismuthene.
3 |
4 | Firstly, generate example input files according to your environment path by running the following command:
5 | ```bash
6 | cd DeepH-pack
7 | python gen_example.py ${openmx_path} ${openmx_overlap_path} ${pot_path} ${python_interpreter} ${julia_interpreter}
8 | ```
9 | with `${openmx_path}`, `${openmx_overlap_path}`, `${pot_path}`, `${python_interpreter}`, and `${julia_interpreter}` replaced by the path of original OpenMX executable program, modified 'overlap only' OpenMX executable program, VPS and PAO directories of OpenMX, python interpreter, and julia interpreter, respectively. For example,
10 | ```bash
11 | cd DeepH-pack
12 | python gen_example.py /home/user/openmx/source/openmx /home/user/openmx_overlap/source/openmx /home/user/openmx/DFT_DATA19 python /home/user/julia-1.5.4/bin/julia
13 | ```
14 |
15 | Secondly, enter the generated `example/` folder and run `run.sh` in each folder one-by-one from 1 to 5. Please note that `run.sh` should be run in the directory where the `run.sh` file is located.
16 | ```bash
17 | cd example/1_DFT_calculation
18 | bash run.sh
19 | cd ../2_preprocess
20 | bash run.sh
21 | cd ../3_train
22 | bash run.sh
23 | cd ../4_compute_overlap
24 | bash run.sh
25 | cd ../5_inference
26 | bash run.sh
27 | ```
28 | The third step, the neural network training process, is recommended to be carried out on the GPU. In addition, in order to get the energy band faster, it is recommended to calculate the eigenvalues of different k points in parallel in the fifth step by *which_k* interface.
29 |
30 | After completing the calculation, you can find the band structure data in OpenMX Band format of twisted bilayer bismuthene with 244 atoms per supercell computed by the predicted DFT Hamiltonian in the file below:
31 | ```
32 | example/work_dir/inference/5_4/openmx.Band
33 | ```
34 | The plotted band structure will be consistent with the right pannel of figure 6c in our paper.
35 |
--------------------------------------------------------------------------------
/docs/source/demo/demo2.md:
--------------------------------------------------------------------------------
1 | # Demo: Reproduce the experimental results of the paper
2 | You can train DeepH models using the existing [dataset](https://zenodo.org/record/6555484) to reproduce the results of this [paper](https://www.nature.com/articles/s43588-022-00265-6).
3 |
4 | Firstly, download the processed dataset for graphene (*graphene_dataset.zip*), MoS2 (*MoS2_dataset.zip*), twisted bilayer graphene (*TBG_dataset.zip*) or twisted bilayer bismuthene (*TBB_dataset.zip*). Uncompress the ZIP file.
5 |
6 | Secondly, edit corresponding config files in the `DeepH-pack/ini/`. *raw_dir* should be set to the path of the downloaded dataset. *graph_dir* and *save_dir* should be set to the path to save your graph file and results file during the training. For grahene, twisted bilayer graphene and twisted bilayer bismuthene, a single MPNN model is used for each dataset. For MoS2, four MPNN models are used. Run
7 | ```bash
8 | deeph-train --config ${config_path}
9 | ```
10 | with `${config_path}` replaced by the path of config file for training.
11 |
12 | After completing the training, you can find the trained model in *save_dir*, which can be used to make prediction on new structures by run
13 | ```bash
14 | deeph-inference --config ${inference_config_path}
15 | ```
16 | with `${inference_config_path}` replaced by the path of config file for inference.
17 | Please note that the DFT results in this dataset were calculated using OpenMX.
18 | This means that if you want to use a model trained on this dataset to calculate properties, you need to use the overlap calculated using OpenMX.
19 | The orbital information required for overlap calculations can be found in the [paper](https://www.nature.com/articles/s43588-022-00265-6).
--------------------------------------------------------------------------------
/docs/source/demo/demo3.md:
--------------------------------------------------------------------------------
1 | # Demo: Train the DeepH model using the ABACUS interface
2 | Train the DeepH model by random graphene supercells
3 | and predict the Hamiltonian of carbon nanotube using
4 | the ABACUS interface. See README.md in
5 | [this file](https://github.com/deepmodeling/DeepH-pack/files/9526304/demo_abacus.zip)
6 | for details.
7 |
8 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. image:: ./_static/logo.png
2 | :scale: 3 %
3 | :alt: DeepH-pack
4 | :align: center
5 |
6 | DeepH-pack's documentation
7 | ======================================
8 |
9 | `DeepH-pack `_ is a package for the application of deep neural
10 | networks to the prediction of density functional theory (DFT)
11 | Hamiltonian matrices based on local coordinates and basis
12 | transformation [#deeph]_. DeepH-pack supports DFT results made
13 | by `ABACUS `_,
14 | `OpenMX `_,
15 | `FHI-aims `_ or
16 | `SIESTA `_,
17 | and will support `HONPAS `_ soon.
18 |
19 | .. toctree::
20 | :glob:
21 | :caption: Getting Started
22 | :maxdepth: 2
23 |
24 | installation/installation
25 | dataset/dataset
26 | preprocess/preprocess
27 | train/train
28 | inference/inference
29 |
30 | .. toctree::
31 | :glob:
32 | :caption: Demo
33 | :maxdepth: 1
34 |
35 | demo/demo1
36 | demo/demo2
37 | demo/demo3
38 |
39 | .. toctree::
40 | :glob:
41 | :caption: Input Keywords
42 | :maxdepth: 2
43 |
44 | keyword/keyword
45 |
46 | References
47 | ^^^^^^^^^^^^^^^^^
48 | .. [#deeph] H. Li, Z. Wang, N. Zou, M. Ye, R. Xu, X. Gong, W. Duan, Y. Xu.
49 | `Deep-learning density functional theory Hamiltonian for efficient ab initio electronic-structure calculation `_.
50 | *Nat. Comput. Sci.* **2**, 367–377 (2022).
51 |
52 |
--------------------------------------------------------------------------------
/docs/source/inference/inference.md:
--------------------------------------------------------------------------------
1 | # Inference with your model
2 |
3 | `Inference` is a part of DeepH-pack, which is used to predict the
4 | DFT Hamiltonian for large-scale material structures and perform
5 | sparse calculation of physical properties.
6 |
7 | Firstly, one should prepare the structure file of large-scale material
8 | and calculate the overlap matrix. Overlap matrix calculation does not
9 | require `SCF`. Even if the material system is large, only a small calculation
10 | time and memory consumption are required. Following are the steps to
11 | calculate the overlap matrix using different supported DFT packages:
12 | 1. **ABACUS**: Set the following parameters in the input file of ABACUS `INPUT`:
13 | ```
14 | calculation get_S
15 | ```
16 | and run ABACUS like a normal `SCF` calculation.
17 | [ABACUS version >= 2.3.2](https://github.com/deepmodeling/abacus-develop/releases/tag/v2.3.2) is required.
18 | 2. **OpenMX**: See this [repository](https://github.com/mzjb/overlap-only-OpenMX#usage).
19 |
20 | For overlap matrix calculation, you need to use the same basis set and DFT
21 | software when preparing the dataset.
22 |
23 | Then, prepare a configuration in the format of *ini*, setting up the
24 | file referring to the default `DeepH-pack/deeph/inference/inference_default.ini`.
25 | The meaning of the keywords can be found in the
26 | [INPUT KEYWORDS section](https://deeph-pack.readthedocs.io/en/latest/keyword/inference.html).
27 | For a quick start, you must set up *OLP_dir*, *work_dir*, *interface*,
28 | *trained_model_dir* and *sparse_calc_config*, as well as a `JSON`
29 | configuration file located at *sparse_calc_config* for sparse calculation.
30 |
31 | With the configuration files prepared, run
32 | ```bash
33 | deeph-inference --config ${config_path}
34 | ```
35 | with `${config_path}` replaced by the path of your configuration file.
36 |
--------------------------------------------------------------------------------
/docs/source/installation/abacus.md:
--------------------------------------------------------------------------------
1 | # ABACUS
2 | Install [ABACUS package](https://abacus.ustc.edu.cn) for density functional theory Hamiltonian matrix calculation to construct datasets.
3 |
4 | DeepH-pack requires [ABACUS version >= 2.3.2](https://github.com/deepmodeling/abacus-develop/releases/tag/v2.3.2).
--------------------------------------------------------------------------------
/docs/source/installation/aims.md:
--------------------------------------------------------------------------------
1 | # FHI-aims
2 | Install the modified FHI-aims, which can output Hamiltonian matrices, overlap matrices and position matrices.
3 |
--------------------------------------------------------------------------------
/docs/source/installation/installation.rst:
--------------------------------------------------------------------------------
1 | Installation
2 | ============
3 |
4 | Requirements
5 | ------------
6 |
7 | To use DeepH-pack, following environments and packages are required:
8 |
9 | Python packages
10 | ^^^^^^^^^^^^^^^^^^^^^^^^
11 |
12 | Prepare the Python 3.9 interpreter. Install the following Python packages required:
13 |
14 | * NumPy
15 | * SciPy
16 | * PyTorch = 1.9.1
17 | * PyTorch Geometric = 1.7.2
18 | * e3nn = 0.3.5
19 | * pymatgen
20 | * h5py
21 | * TensorBoard
22 | * pathos
23 | * psutil
24 |
25 | In Linux, you can quickly achieve the requirements by running::
26 |
27 | # install miniconda with python 3.9
28 | wget https://repo.anaconda.com/miniconda/Miniconda3-py39_4.10.3-Linux-x86_64.sh
29 | bash Miniconda3-py39_4.10.3-Linux-x86_64.sh
30 |
31 | # install packages by conda
32 | conda install numpy
33 | conda install scipy
34 | conda install pytorch==1.9.1 ${pytorch_config}
35 | conda install pytorch-geometric=1.7.2 -c rusty1s -c conda-forge
36 | conda install pymatgen -c conda-forge
37 |
38 | # install packages by pip
39 | pip install e3nn==0.3.5
40 | pip install h5py
41 | pip install tensorboard
42 | pip install pathos
43 | pip install psutil
44 |
45 | with ``${pytorch_config}`` replaced by your own configuration.
46 | You can find how to set it in `the official website of PyTorch `_.
47 |
48 | Julia packages
49 | ^^^^^^^^^^^^^^^^^^^^^^^^
50 |
51 | Prepare the Julia 1.5.4 interpreter. Install the following Julia packages required with Julia's builtin package manager:
52 |
53 | * Arpack.jl
54 | * HDF5.jl
55 | * ArgParse.jl
56 | * JLD.jl
57 | * JSON.jl
58 | * IterativeSolvers.jl
59 | * DelimitedFiles.jl
60 | * StaticArrays.jl
61 | * LinearMaps.jl
62 | * Pardiso.jl
63 |
64 | In Linux, you can quickly achieve the requirements by first running::
65 |
66 | # install julia 1.6.6
67 | wget https://julialang-s3.julialang.org/bin/linux/x64/1.6/julia-1.6.6-linux-x86_64.tar.gz
68 | tar xzvf julia-1.6.6-linux-x86_64.tar.gz
69 |
70 | # open the julia REPL
71 | julia
72 |
73 | Then enter the pkg REPL by pressing ``]`` from the Julia REPL. In the pkg REPL run::
74 |
75 | (@v1.6) pkg> add Arpack
76 | (@v1.6) pkg> add HDF5
77 | (@v1.6) pkg> add ArgParse
78 | (@v1.6) pkg> add JLD
79 | (@v1.6) pkg> add JSON
80 | (@v1.6) pkg> add IterativeSolvers
81 | (@v1.6) pkg> add DelimitedFiles
82 | (@v1.6) pkg> add StaticArrays
83 | (@v1.6) pkg> add LinearMaps
84 |
85 | Follow `these instructions `_ to install Pardiso.jl.
86 |
87 | Install DeepH-pack
88 | ------------------------
89 |
90 | Run the following command in the path of DeepH-pack::
91 |
92 | git clone https://github.com/mzjb/DeepH-pack.git
93 | cd DeepH-pack
94 | pip install .
95 |
96 |
97 | Install one of the supported DFT packages
98 | ------------------------------------------------
99 |
100 | One of the supported DFT packages is required to obtain the dataset.
101 | DeepH-pack supports DFT results made by ABACUS, OpenMX, FHI-aims or SIESTA,
102 | and will support HONPAS soon.
103 |
104 | ABACUS
105 | ^^^^^^^^^^^^^^^^^^^^^^^^
106 |
107 | .. toctree::
108 | :maxdepth: 1
109 |
110 | abacus
111 |
112 | OpenMX
113 | ^^^^^^^^^^^^^^^^^^^^^^^^
114 |
115 | .. toctree::
116 | :maxdepth: 1
117 |
118 | openmx
119 |
120 | FHI-aims
121 | ^^^^^^^^^^^^^^^^^^^^^^^^
122 |
123 | .. toctree::
124 | :maxdepth: 1
125 |
126 | aims
127 |
128 | SIESTA
129 | ^^^^^^^^^^^^^^^^^^^^^^^^
130 |
131 | .. toctree::
132 | :maxdepth: 1
133 |
134 | siesta
135 |
136 |
--------------------------------------------------------------------------------
/docs/source/installation/openmx.md:
--------------------------------------------------------------------------------
1 | # OpenMX
2 | Install [OpenMX package version 3.9](http://www.openmx-square.org/download.html) for density functional theory Hamiltonian matrix calculation to construct datasets.
3 |
4 | If you are using Intel MKL and Intel MPI environments, you can use the following variable definitions for makefile
5 | ```
6 | CC = mpiicc -O3 -xHOST -ip -no-prec-div -qopenmp -I${MKLROOT}/include/fftw -I${MKLROOT}/include
7 | FC = mpiifort -O3 -xHOST -ip -no-prec-div -qopenmp -I${MKLROOT}/include
8 | LIB = ${CMPLR_ROOT}/linux/compiler/lib/intel64_lin/libiomp5.a ${MKLROOT}/lib/intel64/libmkl_blas95_lp64.a ${MKLROOT}/lib/intel64/libmkl_lapack95_lp64.a ${MKLROOT}/lib/intel64/libmkl_scalapack_lp64.a -Wl,--start-group ${MKLROOT}/lib/intel64/libmkl_intel_lp64.a ${MKLROOT}/lib/intel64/libmkl_intel_thread.a ${MKLROOT}/lib/intel64/libmkl_core.a ${MKLROOT}/lib/intel64/libmkl_blacs_intelmpi_lp64.a -Wl,--end-group ${CMPLR_ROOT}/linux/compiler/lib/intel64_lin/libifcoremt.a -lpthread -lm -ldl
9 | ```
10 | Or edit the makefile yourself according to your environment to install OpenMX version 3.9.
11 |
12 | # 'overlap only' OpenMX
13 | A modified OpenMX package is also used to compute overlap matrices only for large-scale materials structure. Install 'overlap only' OpenMX according to the *readme* documentation in this [repository](https://github.com/mzjb/overlap-only-OpenMX).
--------------------------------------------------------------------------------
/docs/source/installation/siesta.md:
--------------------------------------------------------------------------------
1 | # SIESTA
2 | Install [SIESTA package](https://gitlab.com/siesta-project/siesta) for density functional theory Hamiltonian matrix calculation to construct datasets.
3 |
--------------------------------------------------------------------------------
/docs/source/keyword/inference.md:
--------------------------------------------------------------------------------
1 | # Inference
2 |
3 | The default value can be found in `DeepH-pack/deeph/inference/inference_default.ini`. The following arguments can be set in the configuration files for `Inference`:
4 |
5 | ## basic
6 |
7 | - *OLP_dir* : The output directory of the 'overlap only' OpenMX calculation.
8 |
9 | + *work_dir* : The directory to run the workflow and save the results.
10 |
11 | - *interface* : Which DFT package is used to get the overlap matrix. Support `abacus` and `openmx`.
12 |
13 | + *trained_model_dir* : The directory to the trained model. If only one model is used for the current material system, fill in the string of the directory to the trained model, e.g. `/your/trained/model`. If multiple models are used for the current material system, fill in the JSON format string containing all the directories of models, e.g. `["/your/trained/model1", "/your/trained/model2"]`.
14 |
15 | - *task* : Set it `[1, 2, 3, 4, 5]` to run all the tasks for inference. `1` in list means to parse the overlap, `2` means to get the local coordinate, `3` means to predict the Hamiltonian, `4` means to rotate the Hamiltonian back, and `5` means to perform the sparse calculation.
16 |
17 | + *sparse_calc_config* : The directory to the *JSON* configuration file.
18 |
19 | - *eigen_solver* : Which algorithm to use for diagonalization. Support `sparse_jl` for sparse matrix using Julia code (default), `dense_jl` for dense matrix using Julia code, and `dense_py` for dense matrix using Python code.
20 |
21 | + *huge_structure* : Whether to save your memory and cost more time during inference.
22 |
23 | - *restore_blocks_py* : Whether to use Python code to rearrange matrix blocks. You can set it `False` to use Julia code instead to improve efficiency.
24 |
25 | ## interpreter
26 |
27 | - *julia_interpreter* : The directory to the Julia interpreter (if you want to use Julia code to perform band structure calculation or rearrange matrix blocks, you need to set it).
28 |
29 | + *python_interpreter* : The directory to the Python interpreter (if you want to use Python code to perform band structure calculation, you need to set it).
30 |
31 | ## graph
32 |
33 | - *radius* : The additional cut-off radius for crystal graph based on the truncation that adopted in overlap matrices. `-1.0` means using the same truncation that adopted in overlap matrices.
34 |
35 | ## *JSON* configuration file
36 |
37 | - *calc_job* : Which quantity you want to calculate after the hamiltonian gotten. Can only be 'band' for now.
38 |
39 | + *fermi_level* : Fermi level.
40 |
41 | - *k_data* : The k-path to calculate, formatted like `["number_of_points x1 y1 z1 x2 y2 z2 name_of_begin_point name_of_end_point", ...]`.
42 |
43 | + *which_k* : Define which point in k-path to calculate, start counting from 1. You can set it '0' for all k points, or '-1' for no point. It is recommended to calculate the eigenvalues of different k points in parallel through it. (Invalid for dense matrix calculation)
44 |
45 | - *num_band* : The number of eigenvalues and eigenvectors desired. (Invalid for dense matrix calculation)
46 |
47 | + *max_iter*: Maximum number of iterations. (Invalid for dense matrix calculation)
48 |
--------------------------------------------------------------------------------
/docs/source/keyword/keyword.rst:
--------------------------------------------------------------------------------
1 | Input Keywords
2 | =====================
3 |
4 | .. toctree::
5 |
6 | preprocess
7 | train
8 | inference
--------------------------------------------------------------------------------
/docs/source/keyword/preprocess.md:
--------------------------------------------------------------------------------
1 | # Preprocess
2 |
3 | The default value can be found in `DeepH-pack/deeph/preprocess/preprocess_default.ini`. The following arguments can be set in the configuration file for `Preprocess`:
4 |
5 | ## basic
6 |
7 | - *raw_dir* : The path to the root directory of your dataset. A subdirectory inside will be seen as a piece of data if there are the Hamiltonian file `openmx.scfout`.
8 |
9 | + *processed_dir* : The path to the root directory to save the preprocessed files. A subdirectory will be created to save the corresponding files for each piece of data. This argument can have the same value as *raw_dir*, when the preprocessed files will be created in the same directory as the corresponding `openmx.scfout` in.
10 |
11 | - *interface* : Which DFT package is used to get the Hamiltonian. Support `abacus`, `openmx` and `aims`.
12 |
13 | + *multiprocessing* : Number of processes to use to perform `Preprocess`. Set to `-1` to use all available CPUs. Set to `0` to disable multiprocessing.
14 |
15 | - *get_S* : Whether to get the overlap matrix.
16 |
17 | ## interpreter
18 |
19 | - *julia_interpreter* : The path to the julia interpreter.
20 |
21 | ## graph
22 |
23 | - *radius* : The additional cut-off radius for crystal graph based on the truncation that adopted in Hamiltonian matrices. `-1.0` means using the same truncation that adopted in Hamiltonian matrices.
24 |
--------------------------------------------------------------------------------
/docs/source/keyword/train.md:
--------------------------------------------------------------------------------
1 | # Train
2 |
3 | The default value can be found in `DeepH-pack/deeph/default.ini`. The following arguments can be set in the configuration file for `Train`:
4 |
5 | ## basic
6 |
7 | - *graph_dir* : The directory to save the graph for the dataset.
8 |
9 | + *save_dir* : The root directory to save the training result.
10 |
11 | - *raw_dir* : The root directory of the preprocessed dataset.
12 |
13 | + *dataset_name* : The name of your dataset.
14 |
15 | - *disable_cuda* : Whether to disable the cuda during training.
16 |
17 | + *device* : The device you used for training (`cpu` or `cuda:x`, where `x` is the index of the cuda device). If the cuda is disabled by *disable_cuda* or it is not available in your environment, you may not set this option as it will be automatically set as 'cpu'.
18 |
19 | - *num_threads* : The number of threads used for PyTorch on CPU.
20 |
21 | + *save_to_time_folder* : Whether to create a subfolder named with the current time in *save_dir*.
22 |
23 | - *save_csv* : Whether to output labels and predictions for all the structures in the format of csv.
24 |
25 | + *tb_writer* : Whether to track and visualize the training process by TensorBoard.
26 |
27 | - *seed* : The seed for generating random numbers.
28 |
29 | + *multiprocessing* : Number of processes to use multiprocessing to generate crystal graphs. Set to `-1` to use all available CPUs. Set to `0` (default) to disable multiprocessing. WARNING: The keyword "num_threads" is incompatible with `multiprocessing`. If you use `multiprocessing` with a value of 1 or higher, the crystal graphs generation process will ignore the `num_threads` keyword. For optimal performance and memory usage, we recommend setting `multiprocessing = 0` and adjusting `num_threads` to control the number of threads. This is because generating crystal graphs can consume large memory.
30 |
31 | - *orbital* : A JSON format string that defines matrix elements to be predicted. For example, let  denotes DFT Hamiltonian matrix element between orbital  of atom  and orbital  of atom . An input of `[{"N1 N2": [a1, a2], "N3 N4": [a3, a4], "N5 N6": [a5, a6]}, {"N7 N8": [a7, a8]}]` can be set for the *orbital* option, if you want to predict two matrix elements `H1` and `H2` for the edge feature of an atom pair , where and Alternatively, a Python script at `DeepH-pack/tools/get_all_orbital_str.py` can be used to generate a default configuration to predict all orbitals with one model.
55 |
56 | ## graph
57 |
58 | - *create_from_DFT* : Whether to use the DFT Hamiltonian matrices to create the graph instead of setting the cut-off radius by hand. It is recommended to set *create_from_DFT* to `True` and not to set *radius* for training.
59 |
60 | + *radius* : The cut-off radius to create graph. Keyword *radius* has no effect if *create_from_DFT* is set to `True`.
61 |
62 | ## train
63 |
64 | - *epochs* : The number of passes of the entire training dataset the learning algorithm has completed.
65 |
66 | + *pretrained* : The path to the pretrained model, e.g. `/your/pretrained/model/best_state_dict.pkl`.
67 |
68 | - *resume* : The path to the half-trained model, e.g. `/your/half_trained/model/best_state_dict.pkl`.
69 |
70 | + *train_ratio* : The ratio of training data.
71 |
72 | - *val_ratio* : The ratio of validation data.
73 |
74 | + *test_ratio* : The ratio of test data.
75 |
76 | ## hyperparameter
77 |
78 | - *batch_size* : The size of mini-batch.
79 |
80 | + *learning_rate* : Initial learning rate.
81 |
82 | ## network
83 |
84 | - *atom_fea_len* : The number of atom features in MPNN layers.
85 |
86 | + *edge_fea_len* : The number of edge features in MPNN layers.
87 |
88 | - *gauss_stop* : The stopping radius of basis functions used to represent interatomic distances.
89 |
90 | + *num_l* : The number of angular quantum numbers that spherical harmonic functions have.
91 |
92 | - *distance_expansion* : Which basis functions are used to represent interatomic distances. `choices = ['GaussianBasis', 'BesselBasis', 'ExpBernsteinBasis']`
93 |
94 | + *normalization* : Which form of normalization layers are used. `choices = ['BatchNorm', 'LayerNorm', 'PairNorm', 'InstanceNorm', 'GraphNorm', 'DiffGroupNorm', 'None']`
95 |
96 | -*atom_update_net* : Which form of convolutional layers to update atom features are used. `choices = ['CGConv', 'GAT', 'PAINN']`
97 |
98 |
--------------------------------------------------------------------------------
/docs/source/preprocess/preprocess.md:
--------------------------------------------------------------------------------
1 | # Preprocess the dataset
2 |
3 | `Preprocess` is a part of DeepH-pack. Through `Preprocess`,
4 | DeepH-pack will convert the unit of physical quantity, store
5 | the data files in the format of text and *HDF5* for each structure
6 | in a separate folder, generate local coordinates, and perform basis
7 | transformation for DFT Hamiltonian matrices. We use the following
8 | convention of units:
9 |
10 | Quantity | Unit
11 | ---|---
12 | Length | Å
13 | Energy | eV
14 |
15 | You need to edit a configuration in the format of *ini*, setting
16 | up the file referring to the default file
17 | `DeepH-pack/deeph/preprocess/preprocess_default.ini`. The meaning
18 | of the keywords can be found in the [INPUT KEYWORDS section](https://deeph-pack.readthedocs.io/en/latest/keyword/preprocess.html).
19 | For a quick start, you must set up *raw_dir*, *processed_dir* and *interface*.
20 |
21 | With the configuration file prepared, run
22 | ```bash
23 | deeph-preprocess --config ${config_path}
24 | ```
25 | with `${config_path}` replaced by the path of your configuration file.
26 |
--------------------------------------------------------------------------------
/docs/source/train/train.md:
--------------------------------------------------------------------------------
1 | # Train your model
2 |
3 | `Train` is a part of DeepH-pack, which is used to train a deep
4 | learning model using the processed dataset.
5 |
6 | Prepare a configuration in the format of *ini*, setting up the
7 | file referring to the default `DeepH-pack/deeph/default.ini`.
8 | The meaning of the keywords can be found in the [INPUT KEYWORDS section](https://deeph-pack.readthedocs.io/en/latest/keyword/train.html).
9 | For a quick start, you must set up *graph_dir*, *save_dir*,
10 | *raw_dir* and *orbital*, other keywords can stay default and
11 | be adjusted later.
12 |
13 | With the configuration file prepared, run
14 | ```bash
15 | deeph-train --config ${config_path}
16 | ```
17 | with `${config_path}` replaced by the path of your configuration file.
18 |
19 | Tips:
20 | - **Name your dataset**. Use *dataset_name* to name your dataset,
21 | the same names may overwrite each other.
22 |
23 | - **Hyperparameters of the neural network**. The neural network here contains
24 | some hyperparameters. For a specific problem your should try adjusting
25 | the hyperparameters to obtain better results.
26 |
27 | - **The keyword *orbital***. The keyword *orbital* states which orbitals or matrix elements are predicted. It is a little complicated to understand its data structure. To figure out it, you can refer to the [INPUT KEYWORDS section](https://deeph-pack.readthedocs.io/en/latest/keyword/train.html#:~:text=generate%20crystal%20graphs.-,orbital,-%3A%20A%20JSON%20format) or the method [make_mask](https://github.com/mzjb/DeepH-pack/blob/main/deeph/kernel.py#:~:text=def%20make_mask(self%2C%20dataset)%3A) in class `DeepHKernel` defined in `DeepH-pack/deeph/kernel.py`.
28 |
29 | Alternatively, a Python script at `DeepH-pack/tools/get_all_orbital_str.py` can be used to generate a default configuration to predict all orbitals with one model.
30 |
31 | - **Use TensorBoard for visualizations**. You can track and visualize the training process through TensorBoard by running
32 | ```bash
33 | tensorboard --logdir=./tensorboard
34 | ```
35 | in the output directory (*save_dir*):
--------------------------------------------------------------------------------
/ini/MoS2_1.ini:
--------------------------------------------------------------------------------
1 | [basic]
2 | graph_dir = /home/user/DeepH/graph_data/
3 | save_dir = /home/user/DeepH/result/
4 | raw_dir = /path/to/downloaded/dataset
5 | dataset_name = MoS2
6 | interface = h5
7 | save_to_time_folder = True
8 | save_csv = False
9 | orbital = [{"42 42": [0, 0]}, {"42 42": [0, 1]}, {"42 42": [0, 2]}, {"42 42": [0, 3]}, {"42 42": [0, 4]}, {"42 42": [0, 5]}, {"42 42": [0, 6]}, {"42 42": [0, 7]}, {"42 42": [0, 8]}, {"42 42": [0, 9]}, {"42 42": [0, 10]}, {"42 42": [0, 11]}, {"42 42": [0, 12]}, {"42 42": [0, 13]}, {"42 42": [0, 14]}, {"42 42": [0, 15]}, {"42 42": [0, 16]}, {"42 42": [0, 17]}, {"42 42": [0, 18]}, {"42 42": [1, 0]}, {"42 42": [1, 1]}, {"42 42": [1, 2]}, {"42 42": [1, 3]}, {"42 42": [1, 4]}, {"42 42": [1, 5]}, {"42 42": [1, 6]}, {"42 42": [1, 7]}, {"42 42": [1, 8]}, {"42 42": [1, 9]}, {"42 42": [1, 10]}, {"42 42": [1, 11]}, {"42 42": [1, 12]}, {"42 42": [1, 13]}, {"42 42": [1, 14]}, {"42 42": [1, 15]}, {"42 42": [1, 16]}, {"42 42": [1, 17]}, {"42 42": [1, 18]}, {"42 42": [2, 0]}, {"42 42": [2, 1]}, {"42 42": [2, 2]}, {"42 42": [2, 3]}, {"42 42": [2, 4]}, {"42 42": [2, 5]}, {"42 42": [2, 6]}, {"42 42": [2, 7]}, {"42 42": [2, 8]}, {"42 42": [2, 9]}, {"42 42": [2, 10]}, {"42 42": [2, 11]}, {"42 42": [2, 12]}, {"42 42": [2, 13]}, {"42 42": [2, 14]}, {"42 42": [2, 15]}, {"42 42": [2, 16]}, {"42 42": [2, 17]}, {"42 42": [2, 18]}, {"42 42": [3, 0]}, {"42 42": [3, 1]}, {"42 42": [3, 2]}, {"42 42": [3, 3]}, {"42 42": [3, 4]}, {"42 42": [3, 5]}, {"42 42": [3, 6]}, {"42 42": [3, 7]}, {"42 42": [3, 8]}, {"42 42": [3, 9]}, {"42 42": [3, 10]}, {"42 42": [3, 11]}, {"42 42": [3, 12]}, {"42 42": [3, 13]}, {"42 42": [3, 14]}, {"42 42": [3, 15]}, {"42 42": [3, 16]}, {"42 42": [3, 17]}, {"42 42": [3, 18]}, {"42 42": [4, 0]}, {"42 42": [4, 1]}, {"42 42": [4, 2]}, {"42 42": [4, 3]}, {"42 42": [4, 4]}, {"42 42": [4, 5]}, {"42 42": [4, 6]}, {"42 42": [4, 7]}, {"42 42": [4, 8]}, {"42 42": [4, 9]}, {"42 42": [4, 10]}, {"42 42": [4, 11]}, {"42 42": [4, 12]}, {"42 42": [4, 13]}, {"42 42": [4, 14]}, {"42 42": [4, 15]}, {"42 42": [4, 16]}, {"42 42": [4, 17]}, {"42 42": [4, 18]}, {"42 42": [5, 0]}, {"42 42": [5, 1]}, {"42 42": [5, 2]}, {"42 42": [5, 3]}, {"42 42": [5, 4]}, {"42 42": [5, 5]}, {"42 42": [5, 6]}, {"42 42": [5, 7]}, {"42 42": [5, 8]}, {"42 42": [5, 9]}, {"42 42": [5, 10]}, {"42 42": [5, 11]}, {"42 42": [5, 12]}, {"42 42": [5, 13]}, {"42 42": [5, 14]}, {"42 42": [5, 15]}, {"42 42": [5, 16]}, {"42 42": [5, 17]}, {"42 42": [5, 18]}, {"42 42": [6, 0]}, {"42 42": [6, 1]}, {"42 42": [6, 2]}, {"42 42": [6, 3]}, {"42 42": [6, 4]}, {"42 42": [6, 5]}, {"42 42": [6, 6]}, {"42 42": [6, 7]}, {"42 42": [6, 8]}, {"42 42": [6, 9]}, {"42 42": [6, 10]}, {"42 42": [6, 11]}, {"42 42": [6, 12]}, {"42 42": [6, 13]}, {"42 42": [6, 14]}, {"42 42": [6, 15]}, {"42 42": [6, 16]}, {"42 42": [6, 17]}, {"42 42": [6, 18]}, {"42 42": [7, 0]}, {"42 42": [7, 1]}, {"42 42": [7, 2]}, {"42 42": [7, 3]}, {"42 42": [7, 4]}, {"42 42": [7, 5]}, {"42 42": [7, 6]}, {"42 42": [7, 7]}, {"42 42": [7, 8]}, {"42 42": [7, 9]}, {"42 42": [7, 10]}, {"42 42": [7, 11]}, {"42 42": [7, 12]}, {"42 42": [7, 13]}, {"42 42": [7, 14]}, {"42 42": [7, 15]}, {"42 42": [7, 16]}, {"42 42": [7, 17]}, {"42 42": [7, 18]}, {"42 42": [8, 0]}, {"42 42": [8, 1]}, {"42 42": [8, 2]}, {"42 42": [8, 3]}, {"42 42": [8, 4]}, {"42 42": [8, 5]}, {"42 42": [8, 6]}, {"42 42": [8, 7]}, {"42 42": [8, 8]}, {"42 42": [8, 9]}, {"42 42": [8, 10]}, {"42 42": [8, 11]}, {"42 42": [8, 12]}, {"42 42": [8, 13]}, {"42 42": [8, 14]}, {"42 42": [8, 15]}, {"42 42": [8, 16]}, {"42 42": [8, 17]}, {"42 42": [8, 18]}, {"42 42": [9, 0]}, {"42 42": [9, 1]}, {"42 42": [9, 2]}, {"42 42": [9, 3]}, {"42 42": [9, 4]}, {"42 42": [9, 5]}, {"42 42": [9, 6]}, {"42 42": [9, 7]}, {"42 42": [9, 8]}, {"42 42": [9, 9]}, {"42 42": [9, 10]}, {"42 42": [9, 11]}, {"42 42": [9, 12]}, {"42 42": [9, 13]}, {"42 42": [9, 14]}, {"42 42": [9, 15]}, {"42 42": [9, 16]}, {"42 42": [9, 17]}, {"42 42": [9, 18]}, {"42 42": [10, 0]}, {"42 42": [10, 1]}, {"42 42": [10, 2]}, {"42 42": [10, 3]}, {"42 42": [10, 4]}, {"42 42": [10, 5]}, {"42 42": [10, 6]}, {"42 42": [10, 7]}, {"42 42": [10, 8]}, {"42 42": [10, 9]}, {"42 42": [10, 10]}, {"42 42": [10, 11]}, {"42 42": [10, 12]}, {"42 42": [10, 13]}, {"42 42": [10, 14]}, {"42 42": [10, 15]}, {"42 42": [10, 16]}, {"42 42": [10, 17]}, {"42 42": [10, 18]}, {"42 42": [11, 0]}, {"42 42": [11, 1]}, {"42 42": [11, 2]}, {"42 42": [11, 3]}, {"42 42": [11, 4]}, {"42 42": [11, 5]}, {"42 42": [11, 6]}, {"42 42": [11, 7]}, {"42 42": [11, 8]}, {"42 42": [11, 9]}, {"42 42": [11, 10]}, {"42 42": [11, 11]}, {"42 42": [11, 12]}, {"42 42": [11, 13]}, {"42 42": [11, 14]}, {"42 42": [11, 15]}, {"42 42": [11, 16]}, {"42 42": [11, 17]}, {"42 42": [11, 18]}, {"42 42": [12, 0]}, {"42 42": [12, 1]}, {"42 42": [12, 2]}, {"42 42": [12, 3]}, {"42 42": [12, 4]}, {"42 42": [12, 5]}, {"42 42": [12, 6]}, {"42 42": [12, 7]}, {"42 42": [12, 8]}, {"42 42": [12, 9]}, {"42 42": [12, 10]}, {"42 42": [12, 11]}, {"42 42": [12, 12]}, {"42 42": [12, 13]}, {"42 42": [12, 14]}, {"42 42": [12, 15]}, {"42 42": [12, 16]}, {"42 42": [12, 17]}, {"42 42": [12, 18]}, {"42 42": [13, 0]}, {"42 42": [13, 1]}, {"42 42": [13, 2]}, {"42 42": [13, 3]}, {"42 42": [13, 4]}, {"42 42": [13, 5]}, {"42 42": [13, 6]}, {"42 42": [13, 7]}, {"42 42": [13, 8]}, {"42 42": [13, 9]}, {"42 42": [13, 10]}, {"42 42": [13, 11]}, {"42 42": [13, 12]}, {"42 42": [13, 13]}, {"42 42": [13, 14]}, {"42 42": [13, 15]}, {"42 42": [13, 16]}, {"42 42": [13, 17]}, {"42 42": [13, 18]}, {"42 42": [14, 0]}, {"42 42": [14, 1]}, {"42 42": [14, 2]}, {"42 42": [14, 3]}, {"42 42": [14, 4]}, {"42 42": [14, 5]}, {"42 42": [14, 6]}, {"42 42": [14, 7]}, {"42 42": [14, 8]}, {"42 42": [14, 9]}, {"42 42": [14, 10]}, {"42 42": [14, 11]}, {"42 42": [14, 12]}, {"42 42": [14, 13]}, {"42 42": [14, 14]}, {"42 42": [14, 15]}, {"42 42": [14, 16]}, {"42 42": [14, 17]}, {"42 42": [14, 18]}, {"42 42": [15, 0]}, {"42 42": [15, 1]}, {"42 42": [15, 2]}, {"42 42": [15, 3]}, {"42 42": [15, 4]}, {"42 42": [15, 5]}, {"42 42": [15, 6]}, {"42 42": [15, 7]}, {"42 42": [15, 8]}, {"42 42": [15, 9]}, {"42 42": [15, 10]}, {"42 42": [15, 11]}, {"42 42": [15, 12]}, {"42 42": [15, 13]}, {"42 42": [15, 14]}, {"42 42": [15, 15]}, {"42 42": [15, 16]}, {"42 42": [15, 17]}, {"42 42": [15, 18]}, {"42 42": [16, 0]}, {"42 42": [16, 1]}, {"42 42": [16, 2]}, {"42 42": [16, 3]}, {"42 42": [16, 4]}, {"42 42": [16, 5]}, {"42 42": [16, 6]}, {"42 42": [16, 7]}, {"42 42": [16, 8]}, {"42 42": [16, 9]}, {"42 42": [16, 10]}, {"42 42": [16, 11]}, {"42 42": [16, 12]}, {"42 42": [16, 13]}, {"42 42": [16, 14]}, {"42 42": [16, 15]}, {"42 42": [16, 16]}, {"42 42": [16, 17]}, {"42 42": [16, 18]}, {"42 42": [17, 0]}, {"42 42": [17, 1]}, {"42 42": [17, 2]}, {"42 42": [17, 3]}, {"42 42": [17, 4]}, {"42 42": [17, 5]}, {"42 42": [17, 6]}, {"42 42": [17, 7]}, {"42 42": [17, 8]}, {"42 42": [17, 9]}, {"42 42": [17, 10]}, {"42 42": [17, 11]}, {"42 42": [17, 12]}, {"42 42": [17, 13]}, {"42 42": [17, 14]}, {"42 42": [17, 15]}, {"42 42": [17, 16]}, {"42 42": [17, 17]}, {"42 42": [17, 18]}, {"42 42": [18, 0]}, {"42 42": [18, 1]}, {"42 42": [18, 2]}, {"42 42": [18, 3]}, {"42 42": [18, 4]}, {"42 42": [18, 5]}, {"42 42": [18, 6]}, {"42 42": [18, 7]}, {"42 42": [18, 8]}, {"42 42": [18, 9]}, {"42 42": [18, 10]}, {"42 42": [18, 11]}, {"42 42": [18, 12]}, {"42 42": [18, 13]}, {"42 42": [18, 14]}, {"42 42": [18, 15]}, {"42 42": [18, 16]}, {"42 42": [18, 17]}, {"42 42": [18, 18]}]
10 |
11 | [graph]
12 | create_from_DFT = True
13 |
14 | [train]
15 | epochs = 5000
16 | train_ratio = 0.6
17 | val_ratio = 0.2
18 | test_ratio = 0.2
19 | revert_then_decay = True
20 | revert_decay_epoch = [800, 2000, 3000, 4000]
21 | revert_decay_gamma = [0.4, 0.5, 0.5, 0.4]
22 |
23 | [hyperparameter]
24 | batch_size = 1
25 | learning_rate = 0.001
26 |
27 | [network]
28 | gauss_stop = 7.0
29 |
--------------------------------------------------------------------------------
/ini/MoS2_2.ini:
--------------------------------------------------------------------------------
1 | [basic]
2 | graph_dir = /home/user/DeepH/graph_data/
3 | save_dir = /home/user/DeepH/result/
4 | raw_dir = /path/to/downloaded/dataset
5 | dataset_name = MoS2
6 | interface = h5
7 | save_to_time_folder = True
8 | save_csv = False
9 | orbital = [{"42 16": [0, 0]}, {"42 16": [0, 1]}, {"42 16": [0, 2]}, {"42 16": [0, 3]}, {"42 16": [0, 4]}, {"42 16": [0, 5]}, {"42 16": [0, 6]}, {"42 16": [0, 7]}, {"42 16": [0, 8]}, {"42 16": [0, 9]}, {"42 16": [0, 10]}, {"42 16": [0, 11]}, {"42 16": [0, 12]}, {"42 16": [1, 0]}, {"42 16": [1, 1]}, {"42 16": [1, 2]}, {"42 16": [1, 3]}, {"42 16": [1, 4]}, {"42 16": [1, 5]}, {"42 16": [1, 6]}, {"42 16": [1, 7]}, {"42 16": [1, 8]}, {"42 16": [1, 9]}, {"42 16": [1, 10]}, {"42 16": [1, 11]}, {"42 16": [1, 12]}, {"42 16": [2, 0]}, {"42 16": [2, 1]}, {"42 16": [2, 2]}, {"42 16": [2, 3]}, {"42 16": [2, 4]}, {"42 16": [2, 5]}, {"42 16": [2, 6]}, {"42 16": [2, 7]}, {"42 16": [2, 8]}, {"42 16": [2, 9]}, {"42 16": [2, 10]}, {"42 16": [2, 11]}, {"42 16": [2, 12]}, {"42 16": [3, 0]}, {"42 16": [3, 1]}, {"42 16": [3, 2]}, {"42 16": [3, 3]}, {"42 16": [3, 4]}, {"42 16": [3, 5]}, {"42 16": [3, 6]}, {"42 16": [3, 7]}, {"42 16": [3, 8]}, {"42 16": [3, 9]}, {"42 16": [3, 10]}, {"42 16": [3, 11]}, {"42 16": [3, 12]}, {"42 16": [4, 0]}, {"42 16": [4, 1]}, {"42 16": [4, 2]}, {"42 16": [4, 3]}, {"42 16": [4, 4]}, {"42 16": [4, 5]}, {"42 16": [4, 6]}, {"42 16": [4, 7]}, {"42 16": [4, 8]}, {"42 16": [4, 9]}, {"42 16": [4, 10]}, {"42 16": [4, 11]}, {"42 16": [4, 12]}, {"42 16": [5, 0]}, {"42 16": [5, 1]}, {"42 16": [5, 2]}, {"42 16": [5, 3]}, {"42 16": [5, 4]}, {"42 16": [5, 5]}, {"42 16": [5, 6]}, {"42 16": [5, 7]}, {"42 16": [5, 8]}, {"42 16": [5, 9]}, {"42 16": [5, 10]}, {"42 16": [5, 11]}, {"42 16": [5, 12]}, {"42 16": [6, 0]}, {"42 16": [6, 1]}, {"42 16": [6, 2]}, {"42 16": [6, 3]}, {"42 16": [6, 4]}, {"42 16": [6, 5]}, {"42 16": [6, 6]}, {"42 16": [6, 7]}, {"42 16": [6, 8]}, {"42 16": [6, 9]}, {"42 16": [6, 10]}, {"42 16": [6, 11]}, {"42 16": [6, 12]}, {"42 16": [7, 0]}, {"42 16": [7, 1]}, {"42 16": [7, 2]}, {"42 16": [7, 3]}, {"42 16": [7, 4]}, {"42 16": [7, 5]}, {"42 16": [7, 6]}, {"42 16": [7, 7]}, {"42 16": [7, 8]}, {"42 16": [7, 9]}, {"42 16": [7, 10]}, {"42 16": [7, 11]}, {"42 16": [7, 12]}, {"42 16": [8, 0]}, {"42 16": [8, 1]}, {"42 16": [8, 2]}, {"42 16": [8, 3]}, {"42 16": [8, 4]}, {"42 16": [8, 5]}, {"42 16": [8, 6]}, {"42 16": [8, 7]}, {"42 16": [8, 8]}, {"42 16": [8, 9]}, {"42 16": [8, 10]}, {"42 16": [8, 11]}, {"42 16": [8, 12]}, {"42 16": [9, 0]}, {"42 16": [9, 1]}, {"42 16": [9, 2]}, {"42 16": [9, 3]}, {"42 16": [9, 4]}, {"42 16": [9, 5]}, {"42 16": [9, 6]}, {"42 16": [9, 7]}, {"42 16": [9, 8]}, {"42 16": [9, 9]}, {"42 16": [9, 10]}, {"42 16": [9, 11]}, {"42 16": [9, 12]}, {"42 16": [10, 0]}, {"42 16": [10, 1]}, {"42 16": [10, 2]}, {"42 16": [10, 3]}, {"42 16": [10, 4]}, {"42 16": [10, 5]}, {"42 16": [10, 6]}, {"42 16": [10, 7]}, {"42 16": [10, 8]}, {"42 16": [10, 9]}, {"42 16": [10, 10]}, {"42 16": [10, 11]}, {"42 16": [10, 12]}, {"42 16": [11, 0]}, {"42 16": [11, 1]}, {"42 16": [11, 2]}, {"42 16": [11, 3]}, {"42 16": [11, 4]}, {"42 16": [11, 5]}, {"42 16": [11, 6]}, {"42 16": [11, 7]}, {"42 16": [11, 8]}, {"42 16": [11, 9]}, {"42 16": [11, 10]}, {"42 16": [11, 11]}, {"42 16": [11, 12]}, {"42 16": [12, 0]}, {"42 16": [12, 1]}, {"42 16": [12, 2]}, {"42 16": [12, 3]}, {"42 16": [12, 4]}, {"42 16": [12, 5]}, {"42 16": [12, 6]}, {"42 16": [12, 7]}, {"42 16": [12, 8]}, {"42 16": [12, 9]}, {"42 16": [12, 10]}, {"42 16": [12, 11]}, {"42 16": [12, 12]}, {"42 16": [13, 0]}, {"42 16": [13, 1]}, {"42 16": [13, 2]}, {"42 16": [13, 3]}, {"42 16": [13, 4]}, {"42 16": [13, 5]}, {"42 16": [13, 6]}, {"42 16": [13, 7]}, {"42 16": [13, 8]}, {"42 16": [13, 9]}, {"42 16": [13, 10]}, {"42 16": [13, 11]}, {"42 16": [13, 12]}, {"42 16": [14, 0]}, {"42 16": [14, 1]}, {"42 16": [14, 2]}, {"42 16": [14, 3]}, {"42 16": [14, 4]}, {"42 16": [14, 5]}, {"42 16": [14, 6]}, {"42 16": [14, 7]}, {"42 16": [14, 8]}, {"42 16": [14, 9]}, {"42 16": [14, 10]}, {"42 16": [14, 11]}, {"42 16": [14, 12]}, {"42 16": [15, 0]}, {"42 16": [15, 1]}, {"42 16": [15, 2]}, {"42 16": [15, 3]}, {"42 16": [15, 4]}, {"42 16": [15, 5]}, {"42 16": [15, 6]}, {"42 16": [15, 7]}, {"42 16": [15, 8]}, {"42 16": [15, 9]}, {"42 16": [15, 10]}, {"42 16": [15, 11]}, {"42 16": [15, 12]}, {"42 16": [16, 0]}, {"42 16": [16, 1]}, {"42 16": [16, 2]}, {"42 16": [16, 3]}, {"42 16": [16, 4]}, {"42 16": [16, 5]}, {"42 16": [16, 6]}, {"42 16": [16, 7]}, {"42 16": [16, 8]}, {"42 16": [16, 9]}, {"42 16": [16, 10]}, {"42 16": [16, 11]}, {"42 16": [16, 12]}, {"42 16": [17, 0]}, {"42 16": [17, 1]}, {"42 16": [17, 2]}, {"42 16": [17, 3]}, {"42 16": [17, 4]}, {"42 16": [17, 5]}, {"42 16": [17, 6]}, {"42 16": [17, 7]}, {"42 16": [17, 8]}, {"42 16": [17, 9]}, {"42 16": [17, 10]}, {"42 16": [17, 11]}, {"42 16": [17, 12]}, {"42 16": [18, 0]}, {"42 16": [18, 1]}, {"42 16": [18, 2]}, {"42 16": [18, 3]}, {"42 16": [18, 4]}, {"42 16": [18, 5]}, {"42 16": [18, 6]}, {"42 16": [18, 7]}, {"42 16": [18, 8]}, {"42 16": [18, 9]}, {"42 16": [18, 10]}, {"42 16": [18, 11]}, {"42 16": [18, 12]}]
10 |
11 | [graph]
12 | create_from_DFT = True
13 |
14 | [train]
15 | epochs = 5000
16 | train_ratio = 0.6
17 | val_ratio = 0.2
18 | test_ratio = 0.2
19 | revert_then_decay = True
20 | revert_decay_epoch = [800, 2000, 3000, 4000]
21 | revert_decay_gamma = [0.4, 0.5, 0.5, 0.4]
22 |
23 | [hyperparameter]
24 | batch_size = 1
25 | learning_rate = 0.001
26 |
27 | [network]
28 | gauss_stop = 7.0
29 |
--------------------------------------------------------------------------------
/ini/MoS2_3.ini:
--------------------------------------------------------------------------------
1 | [basic]
2 | graph_dir = /home/user/DeepH/graph_data/
3 | save_dir = /home/user/DeepH/result/
4 | raw_dir = /path/to/downloaded/dataset
5 | dataset_name = MoS2
6 | interface = h5
7 | save_to_time_folder = True
8 | save_csv = False
9 | orbital = [{"16 42": [0, 0]}, {"16 42": [0, 1]}, {"16 42": [0, 2]}, {"16 42": [0, 3]}, {"16 42": [0, 4]}, {"16 42": [0, 5]}, {"16 42": [0, 6]}, {"16 42": [0, 7]}, {"16 42": [0, 8]}, {"16 42": [0, 9]}, {"16 42": [0, 10]}, {"16 42": [0, 11]}, {"16 42": [0, 12]}, {"16 42": [0, 13]}, {"16 42": [0, 14]}, {"16 42": [0, 15]}, {"16 42": [0, 16]}, {"16 42": [0, 17]}, {"16 42": [0, 18]}, {"16 42": [1, 0]}, {"16 42": [1, 1]}, {"16 42": [1, 2]}, {"16 42": [1, 3]}, {"16 42": [1, 4]}, {"16 42": [1, 5]}, {"16 42": [1, 6]}, {"16 42": [1, 7]}, {"16 42": [1, 8]}, {"16 42": [1, 9]}, {"16 42": [1, 10]}, {"16 42": [1, 11]}, {"16 42": [1, 12]}, {"16 42": [1, 13]}, {"16 42": [1, 14]}, {"16 42": [1, 15]}, {"16 42": [1, 16]}, {"16 42": [1, 17]}, {"16 42": [1, 18]}, {"16 42": [2, 0]}, {"16 42": [2, 1]}, {"16 42": [2, 2]}, {"16 42": [2, 3]}, {"16 42": [2, 4]}, {"16 42": [2, 5]}, {"16 42": [2, 6]}, {"16 42": [2, 7]}, {"16 42": [2, 8]}, {"16 42": [2, 9]}, {"16 42": [2, 10]}, {"16 42": [2, 11]}, {"16 42": [2, 12]}, {"16 42": [2, 13]}, {"16 42": [2, 14]}, {"16 42": [2, 15]}, {"16 42": [2, 16]}, {"16 42": [2, 17]}, {"16 42": [2, 18]}, {"16 42": [3, 0]}, {"16 42": [3, 1]}, {"16 42": [3, 2]}, {"16 42": [3, 3]}, {"16 42": [3, 4]}, {"16 42": [3, 5]}, {"16 42": [3, 6]}, {"16 42": [3, 7]}, {"16 42": [3, 8]}, {"16 42": [3, 9]}, {"16 42": [3, 10]}, {"16 42": [3, 11]}, {"16 42": [3, 12]}, {"16 42": [3, 13]}, {"16 42": [3, 14]}, {"16 42": [3, 15]}, {"16 42": [3, 16]}, {"16 42": [3, 17]}, {"16 42": [3, 18]}, {"16 42": [4, 0]}, {"16 42": [4, 1]}, {"16 42": [4, 2]}, {"16 42": [4, 3]}, {"16 42": [4, 4]}, {"16 42": [4, 5]}, {"16 42": [4, 6]}, {"16 42": [4, 7]}, {"16 42": [4, 8]}, {"16 42": [4, 9]}, {"16 42": [4, 10]}, {"16 42": [4, 11]}, {"16 42": [4, 12]}, {"16 42": [4, 13]}, {"16 42": [4, 14]}, {"16 42": [4, 15]}, {"16 42": [4, 16]}, {"16 42": [4, 17]}, {"16 42": [4, 18]}, {"16 42": [5, 0]}, {"16 42": [5, 1]}, {"16 42": [5, 2]}, {"16 42": [5, 3]}, {"16 42": [5, 4]}, {"16 42": [5, 5]}, {"16 42": [5, 6]}, {"16 42": [5, 7]}, {"16 42": [5, 8]}, {"16 42": [5, 9]}, {"16 42": [5, 10]}, {"16 42": [5, 11]}, {"16 42": [5, 12]}, {"16 42": [5, 13]}, {"16 42": [5, 14]}, {"16 42": [5, 15]}, {"16 42": [5, 16]}, {"16 42": [5, 17]}, {"16 42": [5, 18]}, {"16 42": [6, 0]}, {"16 42": [6, 1]}, {"16 42": [6, 2]}, {"16 42": [6, 3]}, {"16 42": [6, 4]}, {"16 42": [6, 5]}, {"16 42": [6, 6]}, {"16 42": [6, 7]}, {"16 42": [6, 8]}, {"16 42": [6, 9]}, {"16 42": [6, 10]}, {"16 42": [6, 11]}, {"16 42": [6, 12]}, {"16 42": [6, 13]}, {"16 42": [6, 14]}, {"16 42": [6, 15]}, {"16 42": [6, 16]}, {"16 42": [6, 17]}, {"16 42": [6, 18]}, {"16 42": [7, 0]}, {"16 42": [7, 1]}, {"16 42": [7, 2]}, {"16 42": [7, 3]}, {"16 42": [7, 4]}, {"16 42": [7, 5]}, {"16 42": [7, 6]}, {"16 42": [7, 7]}, {"16 42": [7, 8]}, {"16 42": [7, 9]}, {"16 42": [7, 10]}, {"16 42": [7, 11]}, {"16 42": [7, 12]}, {"16 42": [7, 13]}, {"16 42": [7, 14]}, {"16 42": [7, 15]}, {"16 42": [7, 16]}, {"16 42": [7, 17]}, {"16 42": [7, 18]}, {"16 42": [8, 0]}, {"16 42": [8, 1]}, {"16 42": [8, 2]}, {"16 42": [8, 3]}, {"16 42": [8, 4]}, {"16 42": [8, 5]}, {"16 42": [8, 6]}, {"16 42": [8, 7]}, {"16 42": [8, 8]}, {"16 42": [8, 9]}, {"16 42": [8, 10]}, {"16 42": [8, 11]}, {"16 42": [8, 12]}, {"16 42": [8, 13]}, {"16 42": [8, 14]}, {"16 42": [8, 15]}, {"16 42": [8, 16]}, {"16 42": [8, 17]}, {"16 42": [8, 18]}, {"16 42": [9, 0]}, {"16 42": [9, 1]}, {"16 42": [9, 2]}, {"16 42": [9, 3]}, {"16 42": [9, 4]}, {"16 42": [9, 5]}, {"16 42": [9, 6]}, {"16 42": [9, 7]}, {"16 42": [9, 8]}, {"16 42": [9, 9]}, {"16 42": [9, 10]}, {"16 42": [9, 11]}, {"16 42": [9, 12]}, {"16 42": [9, 13]}, {"16 42": [9, 14]}, {"16 42": [9, 15]}, {"16 42": [9, 16]}, {"16 42": [9, 17]}, {"16 42": [9, 18]}, {"16 42": [10, 0]}, {"16 42": [10, 1]}, {"16 42": [10, 2]}, {"16 42": [10, 3]}, {"16 42": [10, 4]}, {"16 42": [10, 5]}, {"16 42": [10, 6]}, {"16 42": [10, 7]}, {"16 42": [10, 8]}, {"16 42": [10, 9]}, {"16 42": [10, 10]}, {"16 42": [10, 11]}, {"16 42": [10, 12]}, {"16 42": [10, 13]}, {"16 42": [10, 14]}, {"16 42": [10, 15]}, {"16 42": [10, 16]}, {"16 42": [10, 17]}, {"16 42": [10, 18]}, {"16 42": [11, 0]}, {"16 42": [11, 1]}, {"16 42": [11, 2]}, {"16 42": [11, 3]}, {"16 42": [11, 4]}, {"16 42": [11, 5]}, {"16 42": [11, 6]}, {"16 42": [11, 7]}, {"16 42": [11, 8]}, {"16 42": [11, 9]}, {"16 42": [11, 10]}, {"16 42": [11, 11]}, {"16 42": [11, 12]}, {"16 42": [11, 13]}, {"16 42": [11, 14]}, {"16 42": [11, 15]}, {"16 42": [11, 16]}, {"16 42": [11, 17]}, {"16 42": [11, 18]}, {"16 42": [12, 0]}, {"16 42": [12, 1]}, {"16 42": [12, 2]}, {"16 42": [12, 3]}, {"16 42": [12, 4]}, {"16 42": [12, 5]}, {"16 42": [12, 6]}, {"16 42": [12, 7]}, {"16 42": [12, 8]}, {"16 42": [12, 9]}, {"16 42": [12, 10]}, {"16 42": [12, 11]}, {"16 42": [12, 12]}, {"16 42": [12, 13]}, {"16 42": [12, 14]}, {"16 42": [12, 15]}, {"16 42": [12, 16]}, {"16 42": [12, 17]}, {"16 42": [12, 18]}]
10 |
11 | [graph]
12 | create_from_DFT = True
13 |
14 | [train]
15 | epochs = 5000
16 | train_ratio = 0.6
17 | val_ratio = 0.2
18 | test_ratio = 0.2
19 | revert_then_decay = True
20 | revert_decay_epoch = [800, 2000, 3000, 4000]
21 | revert_decay_gamma = [0.4, 0.5, 0.5, 0.4]
22 |
23 | [hyperparameter]
24 | batch_size = 1
25 | learning_rate = 0.001
26 |
27 | [network]
28 | gauss_stop = 7.0
29 |
--------------------------------------------------------------------------------
/ini/MoS2_4.ini:
--------------------------------------------------------------------------------
1 | [basic]
2 | graph_dir = /home/user/DeepH/graph_data/
3 | save_dir = /home/user/DeepH/result/
4 | raw_dir = /path/to/downloaded/dataset
5 | dataset_name = MoS2
6 | interface = h5
7 | save_to_time_folder = True
8 | save_csv = False
9 | orbital = [{"16 16": [0, 0]}, {"16 16": [0, 1]}, {"16 16": [0, 2]}, {"16 16": [0, 3]}, {"16 16": [0, 4]}, {"16 16": [0, 5]}, {"16 16": [0, 6]}, {"16 16": [0, 7]}, {"16 16": [0, 8]}, {"16 16": [0, 9]}, {"16 16": [0, 10]}, {"16 16": [0, 11]}, {"16 16": [0, 12]}, {"16 16": [1, 0]}, {"16 16": [1, 1]}, {"16 16": [1, 2]}, {"16 16": [1, 3]}, {"16 16": [1, 4]}, {"16 16": [1, 5]}, {"16 16": [1, 6]}, {"16 16": [1, 7]}, {"16 16": [1, 8]}, {"16 16": [1, 9]}, {"16 16": [1, 10]}, {"16 16": [1, 11]}, {"16 16": [1, 12]}, {"16 16": [2, 0]}, {"16 16": [2, 1]}, {"16 16": [2, 2]}, {"16 16": [2, 3]}, {"16 16": [2, 4]}, {"16 16": [2, 5]}, {"16 16": [2, 6]}, {"16 16": [2, 7]}, {"16 16": [2, 8]}, {"16 16": [2, 9]}, {"16 16": [2, 10]}, {"16 16": [2, 11]}, {"16 16": [2, 12]}, {"16 16": [3, 0]}, {"16 16": [3, 1]}, {"16 16": [3, 2]}, {"16 16": [3, 3]}, {"16 16": [3, 4]}, {"16 16": [3, 5]}, {"16 16": [3, 6]}, {"16 16": [3, 7]}, {"16 16": [3, 8]}, {"16 16": [3, 9]}, {"16 16": [3, 10]}, {"16 16": [3, 11]}, {"16 16": [3, 12]}, {"16 16": [4, 0]}, {"16 16": [4, 1]}, {"16 16": [4, 2]}, {"16 16": [4, 3]}, {"16 16": [4, 4]}, {"16 16": [4, 5]}, {"16 16": [4, 6]}, {"16 16": [4, 7]}, {"16 16": [4, 8]}, {"16 16": [4, 9]}, {"16 16": [4, 10]}, {"16 16": [4, 11]}, {"16 16": [4, 12]}, {"16 16": [5, 0]}, {"16 16": [5, 1]}, {"16 16": [5, 2]}, {"16 16": [5, 3]}, {"16 16": [5, 4]}, {"16 16": [5, 5]}, {"16 16": [5, 6]}, {"16 16": [5, 7]}, {"16 16": [5, 8]}, {"16 16": [5, 9]}, {"16 16": [5, 10]}, {"16 16": [5, 11]}, {"16 16": [5, 12]}, {"16 16": [6, 0]}, {"16 16": [6, 1]}, {"16 16": [6, 2]}, {"16 16": [6, 3]}, {"16 16": [6, 4]}, {"16 16": [6, 5]}, {"16 16": [6, 6]}, {"16 16": [6, 7]}, {"16 16": [6, 8]}, {"16 16": [6, 9]}, {"16 16": [6, 10]}, {"16 16": [6, 11]}, {"16 16": [6, 12]}, {"16 16": [7, 0]}, {"16 16": [7, 1]}, {"16 16": [7, 2]}, {"16 16": [7, 3]}, {"16 16": [7, 4]}, {"16 16": [7, 5]}, {"16 16": [7, 6]}, {"16 16": [7, 7]}, {"16 16": [7, 8]}, {"16 16": [7, 9]}, {"16 16": [7, 10]}, {"16 16": [7, 11]}, {"16 16": [7, 12]}, {"16 16": [8, 0]}, {"16 16": [8, 1]}, {"16 16": [8, 2]}, {"16 16": [8, 3]}, {"16 16": [8, 4]}, {"16 16": [8, 5]}, {"16 16": [8, 6]}, {"16 16": [8, 7]}, {"16 16": [8, 8]}, {"16 16": [8, 9]}, {"16 16": [8, 10]}, {"16 16": [8, 11]}, {"16 16": [8, 12]}, {"16 16": [9, 0]}, {"16 16": [9, 1]}, {"16 16": [9, 2]}, {"16 16": [9, 3]}, {"16 16": [9, 4]}, {"16 16": [9, 5]}, {"16 16": [9, 6]}, {"16 16": [9, 7]}, {"16 16": [9, 8]}, {"16 16": [9, 9]}, {"16 16": [9, 10]}, {"16 16": [9, 11]}, {"16 16": [9, 12]}, {"16 16": [10, 0]}, {"16 16": [10, 1]}, {"16 16": [10, 2]}, {"16 16": [10, 3]}, {"16 16": [10, 4]}, {"16 16": [10, 5]}, {"16 16": [10, 6]}, {"16 16": [10, 7]}, {"16 16": [10, 8]}, {"16 16": [10, 9]}, {"16 16": [10, 10]}, {"16 16": [10, 11]}, {"16 16": [10, 12]}, {"16 16": [11, 0]}, {"16 16": [11, 1]}, {"16 16": [11, 2]}, {"16 16": [11, 3]}, {"16 16": [11, 4]}, {"16 16": [11, 5]}, {"16 16": [11, 6]}, {"16 16": [11, 7]}, {"16 16": [11, 8]}, {"16 16": [11, 9]}, {"16 16": [11, 10]}, {"16 16": [11, 11]}, {"16 16": [11, 12]}, {"16 16": [12, 0]}, {"16 16": [12, 1]}, {"16 16": [12, 2]}, {"16 16": [12, 3]}, {"16 16": [12, 4]}, {"16 16": [12, 5]}, {"16 16": [12, 6]}, {"16 16": [12, 7]}, {"16 16": [12, 8]}, {"16 16": [12, 9]}, {"16 16": [12, 10]}, {"16 16": [12, 11]}, {"16 16": [12, 12]}]
10 |
11 | [graph]
12 | create_from_DFT = True
13 |
14 | [train]
15 | epochs = 5000
16 | train_ratio = 0.6
17 | val_ratio = 0.2
18 | test_ratio = 0.2
19 | revert_then_decay = True
20 | revert_decay_epoch = [800, 2000, 3000, 4000]
21 | revert_decay_gamma = [0.4, 0.5, 0.5, 0.4]
22 |
23 | [hyperparameter]
24 | batch_size = 1
25 | learning_rate = 0.001
26 |
27 | [network]
28 | gauss_stop = 7.0
29 |
--------------------------------------------------------------------------------
/ini/TBB.ini:
--------------------------------------------------------------------------------
1 | [basic]
2 | graph_dir = /home/user/DeepH/graph_data/
3 | save_dir = /home/user/DeepH/result/
4 | raw_dir = /path/to/downloaded/dataset
5 | dataset_name = TBB
6 | interface = h5
7 | save_to_time_folder = True
8 | save_csv = False
9 | orbital = [{"83 83": [0, 0]}, {"83 83": [0, 1]}, {"83 83": [0, 2]}, {"83 83": [0, 3]}, {"83 83": [0, 4]}, {"83 83": [0, 5]}, {"83 83": [0, 6]}, {"83 83": [0, 7]}, {"83 83": [0, 8]}, {"83 83": [0, 9]}, {"83 83": [0, 10]}, {"83 83": [0, 11]}, {"83 83": [0, 12]}, {"83 83": [0, 13]}, {"83 83": [0, 14]}, {"83 83": [0, 15]}, {"83 83": [0, 16]}, {"83 83": [0, 17]}, {"83 83": [0, 18]}, {"83 83": [1, 0]}, {"83 83": [1, 1]}, {"83 83": [1, 2]}, {"83 83": [1, 3]}, {"83 83": [1, 4]}, {"83 83": [1, 5]}, {"83 83": [1, 6]}, {"83 83": [1, 7]}, {"83 83": [1, 8]}, {"83 83": [1, 9]}, {"83 83": [1, 10]}, {"83 83": [1, 11]}, {"83 83": [1, 12]}, {"83 83": [1, 13]}, {"83 83": [1, 14]}, {"83 83": [1, 15]}, {"83 83": [1, 16]}, {"83 83": [1, 17]}, {"83 83": [1, 18]}, {"83 83": [2, 0]}, {"83 83": [2, 1]}, {"83 83": [2, 2]}, {"83 83": [2, 3]}, {"83 83": [2, 4]}, {"83 83": [2, 5]}, {"83 83": [2, 6]}, {"83 83": [2, 7]}, {"83 83": [2, 8]}, {"83 83": [2, 9]}, {"83 83": [2, 10]}, {"83 83": [2, 11]}, {"83 83": [2, 12]}, {"83 83": [2, 13]}, {"83 83": [2, 14]}, {"83 83": [2, 15]}, {"83 83": [2, 16]}, {"83 83": [2, 17]}, {"83 83": [2, 18]}, {"83 83": [3, 0]}, {"83 83": [3, 1]}, {"83 83": [3, 2]}, {"83 83": [3, 3]}, {"83 83": [3, 4]}, {"83 83": [3, 5]}, {"83 83": [3, 6]}, {"83 83": [3, 7]}, {"83 83": [3, 8]}, {"83 83": [3, 9]}, {"83 83": [3, 10]}, {"83 83": [3, 11]}, {"83 83": [3, 12]}, {"83 83": [3, 13]}, {"83 83": [3, 14]}, {"83 83": [3, 15]}, {"83 83": [3, 16]}, {"83 83": [3, 17]}, {"83 83": [3, 18]}, {"83 83": [4, 0]}, {"83 83": [4, 1]}, {"83 83": [4, 2]}, {"83 83": [4, 3]}, {"83 83": [4, 4]}, {"83 83": [4, 5]}, {"83 83": [4, 6]}, {"83 83": [4, 7]}, {"83 83": [4, 8]}, {"83 83": [4, 9]}, {"83 83": [4, 10]}, {"83 83": [4, 11]}, {"83 83": [4, 12]}, {"83 83": [4, 13]}, {"83 83": [4, 14]}, {"83 83": [4, 15]}, {"83 83": [4, 16]}, {"83 83": [4, 17]}, {"83 83": [4, 18]}, {"83 83": [5, 0]}, {"83 83": [5, 1]}, {"83 83": [5, 2]}, {"83 83": [5, 3]}, {"83 83": [5, 4]}, {"83 83": [5, 5]}, {"83 83": [5, 6]}, {"83 83": [5, 7]}, {"83 83": [5, 8]}, {"83 83": [5, 9]}, {"83 83": [5, 10]}, {"83 83": [5, 11]}, {"83 83": [5, 12]}, {"83 83": [5, 13]}, {"83 83": [5, 14]}, {"83 83": [5, 15]}, {"83 83": [5, 16]}, {"83 83": [5, 17]}, {"83 83": [5, 18]}, {"83 83": [6, 0]}, {"83 83": [6, 1]}, {"83 83": [6, 2]}, {"83 83": [6, 3]}, {"83 83": [6, 4]}, {"83 83": [6, 5]}, {"83 83": [6, 6]}, {"83 83": [6, 7]}, {"83 83": [6, 8]}, {"83 83": [6, 9]}, {"83 83": [6, 10]}, {"83 83": [6, 11]}, {"83 83": [6, 12]}, {"83 83": [6, 13]}, {"83 83": [6, 14]}, {"83 83": [6, 15]}, {"83 83": [6, 16]}, {"83 83": [6, 17]}, {"83 83": [6, 18]}, {"83 83": [7, 0]}, {"83 83": [7, 1]}, {"83 83": [7, 2]}, {"83 83": [7, 3]}, {"83 83": [7, 4]}, {"83 83": [7, 5]}, {"83 83": [7, 6]}, {"83 83": [7, 7]}, {"83 83": [7, 8]}, {"83 83": [7, 9]}, {"83 83": [7, 10]}, {"83 83": [7, 11]}, {"83 83": [7, 12]}, {"83 83": [7, 13]}, {"83 83": [7, 14]}, {"83 83": [7, 15]}, {"83 83": [7, 16]}, {"83 83": [7, 17]}, {"83 83": [7, 18]}, {"83 83": [8, 0]}, {"83 83": [8, 1]}, {"83 83": [8, 2]}, {"83 83": [8, 3]}, {"83 83": [8, 4]}, {"83 83": [8, 5]}, {"83 83": [8, 6]}, {"83 83": [8, 7]}, {"83 83": [8, 8]}, {"83 83": [8, 9]}, {"83 83": [8, 10]}, {"83 83": [8, 11]}, {"83 83": [8, 12]}, {"83 83": [8, 13]}, {"83 83": [8, 14]}, {"83 83": [8, 15]}, {"83 83": [8, 16]}, {"83 83": [8, 17]}, {"83 83": [8, 18]}, {"83 83": [9, 0]}, {"83 83": [9, 1]}, {"83 83": [9, 2]}, {"83 83": [9, 3]}, {"83 83": [9, 4]}, {"83 83": [9, 5]}, {"83 83": [9, 6]}, {"83 83": [9, 7]}, {"83 83": [9, 8]}, {"83 83": [9, 9]}, {"83 83": [9, 10]}, {"83 83": [9, 11]}, {"83 83": [9, 12]}, {"83 83": [9, 13]}, {"83 83": [9, 14]}, {"83 83": [9, 15]}, {"83 83": [9, 16]}, {"83 83": [9, 17]}, {"83 83": [9, 18]}, {"83 83": [10, 0]}, {"83 83": [10, 1]}, {"83 83": [10, 2]}, {"83 83": [10, 3]}, {"83 83": [10, 4]}, {"83 83": [10, 5]}, {"83 83": [10, 6]}, {"83 83": [10, 7]}, {"83 83": [10, 8]}, {"83 83": [10, 9]}, {"83 83": [10, 10]}, {"83 83": [10, 11]}, {"83 83": [10, 12]}, {"83 83": [10, 13]}, {"83 83": [10, 14]}, {"83 83": [10, 15]}, {"83 83": [10, 16]}, {"83 83": [10, 17]}, {"83 83": [10, 18]}, {"83 83": [11, 0]}, {"83 83": [11, 1]}, {"83 83": [11, 2]}, {"83 83": [11, 3]}, {"83 83": [11, 4]}, {"83 83": [11, 5]}, {"83 83": [11, 6]}, {"83 83": [11, 7]}, {"83 83": [11, 8]}, {"83 83": [11, 9]}, {"83 83": [11, 10]}, {"83 83": [11, 11]}, {"83 83": [11, 12]}, {"83 83": [11, 13]}, {"83 83": [11, 14]}, {"83 83": [11, 15]}, {"83 83": [11, 16]}, {"83 83": [11, 17]}, {"83 83": [11, 18]}, {"83 83": [12, 0]}, {"83 83": [12, 1]}, {"83 83": [12, 2]}, {"83 83": [12, 3]}, {"83 83": [12, 4]}, {"83 83": [12, 5]}, {"83 83": [12, 6]}, {"83 83": [12, 7]}, {"83 83": [12, 8]}, {"83 83": [12, 9]}, {"83 83": [12, 10]}, {"83 83": [12, 11]}, {"83 83": [12, 12]}, {"83 83": [12, 13]}, {"83 83": [12, 14]}, {"83 83": [12, 15]}, {"83 83": [12, 16]}, {"83 83": [12, 17]}, {"83 83": [12, 18]}, {"83 83": [13, 0]}, {"83 83": [13, 1]}, {"83 83": [13, 2]}, {"83 83": [13, 3]}, {"83 83": [13, 4]}, {"83 83": [13, 5]}, {"83 83": [13, 6]}, {"83 83": [13, 7]}, {"83 83": [13, 8]}, {"83 83": [13, 9]}, {"83 83": [13, 10]}, {"83 83": [13, 11]}, {"83 83": [13, 12]}, {"83 83": [13, 13]}, {"83 83": [13, 14]}, {"83 83": [13, 15]}, {"83 83": [13, 16]}, {"83 83": [13, 17]}, {"83 83": [13, 18]}, {"83 83": [14, 0]}, {"83 83": [14, 1]}, {"83 83": [14, 2]}, {"83 83": [14, 3]}, {"83 83": [14, 4]}, {"83 83": [14, 5]}, {"83 83": [14, 6]}, {"83 83": [14, 7]}, {"83 83": [14, 8]}, {"83 83": [14, 9]}, {"83 83": [14, 10]}, {"83 83": [14, 11]}, {"83 83": [14, 12]}, {"83 83": [14, 13]}, {"83 83": [14, 14]}, {"83 83": [14, 15]}, {"83 83": [14, 16]}, {"83 83": [14, 17]}, {"83 83": [14, 18]}, {"83 83": [15, 0]}, {"83 83": [15, 1]}, {"83 83": [15, 2]}, {"83 83": [15, 3]}, {"83 83": [15, 4]}, {"83 83": [15, 5]}, {"83 83": [15, 6]}, {"83 83": [15, 7]}, {"83 83": [15, 8]}, {"83 83": [15, 9]}, {"83 83": [15, 10]}, {"83 83": [15, 11]}, {"83 83": [15, 12]}, {"83 83": [15, 13]}, {"83 83": [15, 14]}, {"83 83": [15, 15]}, {"83 83": [15, 16]}, {"83 83": [15, 17]}, {"83 83": [15, 18]}, {"83 83": [16, 0]}, {"83 83": [16, 1]}, {"83 83": [16, 2]}, {"83 83": [16, 3]}, {"83 83": [16, 4]}, {"83 83": [16, 5]}, {"83 83": [16, 6]}, {"83 83": [16, 7]}, {"83 83": [16, 8]}, {"83 83": [16, 9]}, {"83 83": [16, 10]}, {"83 83": [16, 11]}, {"83 83": [16, 12]}, {"83 83": [16, 13]}, {"83 83": [16, 14]}, {"83 83": [16, 15]}, {"83 83": [16, 16]}, {"83 83": [16, 17]}, {"83 83": [16, 18]}, {"83 83": [17, 0]}, {"83 83": [17, 1]}, {"83 83": [17, 2]}, {"83 83": [17, 3]}, {"83 83": [17, 4]}, {"83 83": [17, 5]}, {"83 83": [17, 6]}, {"83 83": [17, 7]}, {"83 83": [17, 8]}, {"83 83": [17, 9]}, {"83 83": [17, 10]}, {"83 83": [17, 11]}, {"83 83": [17, 12]}, {"83 83": [17, 13]}, {"83 83": [17, 14]}, {"83 83": [17, 15]}, {"83 83": [17, 16]}, {"83 83": [17, 17]}, {"83 83": [17, 18]}, {"83 83": [18, 0]}, {"83 83": [18, 1]}, {"83 83": [18, 2]}, {"83 83": [18, 3]}, {"83 83": [18, 4]}, {"83 83": [18, 5]}, {"83 83": [18, 6]}, {"83 83": [18, 7]}, {"83 83": [18, 8]}, {"83 83": [18, 9]}, {"83 83": [18, 10]}, {"83 83": [18, 11]}, {"83 83": [18, 12]}, {"83 83": [18, 13]}, {"83 83": [18, 14]}, {"83 83": [18, 15]}, {"83 83": [18, 16]}, {"83 83": [18, 17]}, {"83 83": [18, 18]}]
10 |
11 | [graph]
12 | create_from_DFT = True
13 |
14 | epochs = 5000
15 | train_ratio = 0.6
16 | val_ratio = 0.2
17 | test_ratio = 0.2
18 | revert_then_decay = True
19 | revert_decay_epoch = [800, 2000, 3000, 4000]
20 | revert_decay_gamma = [0.4, 0.5, 0.5, 0.4]
21 |
22 | [hyperparameter]
23 | batch_size = 1
24 | learning_rate = 0.001
25 |
26 | [network]
27 | gauss_stop = 8.0
28 |
--------------------------------------------------------------------------------
/ini/TBG.ini:
--------------------------------------------------------------------------------
1 | [basic]
2 | graph_dir = /home/user/DeepH/graph_data/
3 | save_dir = /home/user/DeepH/result/
4 | raw_dir = /path/to/downloaded/dataset
5 | dataset_name = TBG
6 | interface = h5
7 | save_to_time_folder = True
8 | save_csv = False
9 | orbital = [{"6 6": [0, 0]}, {"6 6": [0, 1]}, {"6 6": [0, 2]}, {"6 6": [0, 3]}, {"6 6": [0, 4]}, {"6 6": [0, 5]}, {"6 6": [0, 6]}, {"6 6": [0, 7]}, {"6 6": [0, 8]}, {"6 6": [0, 9]}, {"6 6": [0, 10]}, {"6 6": [0, 11]}, {"6 6": [0, 12]}, {"6 6": [1, 0]}, {"6 6": [1, 1]}, {"6 6": [1, 2]}, {"6 6": [1, 3]}, {"6 6": [1, 4]}, {"6 6": [1, 5]}, {"6 6": [1, 6]}, {"6 6": [1, 7]}, {"6 6": [1, 8]}, {"6 6": [1, 9]}, {"6 6": [1, 10]}, {"6 6": [1, 11]}, {"6 6": [1, 12]}, {"6 6": [2, 0]}, {"6 6": [2, 1]}, {"6 6": [2, 2]}, {"6 6": [2, 3]}, {"6 6": [2, 4]}, {"6 6": [2, 5]}, {"6 6": [2, 6]}, {"6 6": [2, 7]}, {"6 6": [2, 8]}, {"6 6": [2, 9]}, {"6 6": [2, 10]}, {"6 6": [2, 11]}, {"6 6": [2, 12]}, {"6 6": [3, 0]}, {"6 6": [3, 1]}, {"6 6": [3, 2]}, {"6 6": [3, 3]}, {"6 6": [3, 4]}, {"6 6": [3, 5]}, {"6 6": [3, 6]}, {"6 6": [3, 7]}, {"6 6": [3, 8]}, {"6 6": [3, 9]}, {"6 6": [3, 10]}, {"6 6": [3, 11]}, {"6 6": [3, 12]}, {"6 6": [4, 0]}, {"6 6": [4, 1]}, {"6 6": [4, 2]}, {"6 6": [4, 3]}, {"6 6": [4, 4]}, {"6 6": [4, 5]}, {"6 6": [4, 6]}, {"6 6": [4, 7]}, {"6 6": [4, 8]}, {"6 6": [4, 9]}, {"6 6": [4, 10]}, {"6 6": [4, 11]}, {"6 6": [4, 12]}, {"6 6": [5, 0]}, {"6 6": [5, 1]}, {"6 6": [5, 2]}, {"6 6": [5, 3]}, {"6 6": [5, 4]}, {"6 6": [5, 5]}, {"6 6": [5, 6]}, {"6 6": [5, 7]}, {"6 6": [5, 8]}, {"6 6": [5, 9]}, {"6 6": [5, 10]}, {"6 6": [5, 11]}, {"6 6": [5, 12]}, {"6 6": [6, 0]}, {"6 6": [6, 1]}, {"6 6": [6, 2]}, {"6 6": [6, 3]}, {"6 6": [6, 4]}, {"6 6": [6, 5]}, {"6 6": [6, 6]}, {"6 6": [6, 7]}, {"6 6": [6, 8]}, {"6 6": [6, 9]}, {"6 6": [6, 10]}, {"6 6": [6, 11]}, {"6 6": [6, 12]}, {"6 6": [7, 0]}, {"6 6": [7, 1]}, {"6 6": [7, 2]}, {"6 6": [7, 3]}, {"6 6": [7, 4]}, {"6 6": [7, 5]}, {"6 6": [7, 6]}, {"6 6": [7, 7]}, {"6 6": [7, 8]}, {"6 6": [7, 9]}, {"6 6": [7, 10]}, {"6 6": [7, 11]}, {"6 6": [7, 12]}, {"6 6": [8, 0]}, {"6 6": [8, 1]}, {"6 6": [8, 2]}, {"6 6": [8, 3]}, {"6 6": [8, 4]}, {"6 6": [8, 5]}, {"6 6": [8, 6]}, {"6 6": [8, 7]}, {"6 6": [8, 8]}, {"6 6": [8, 9]}, {"6 6": [8, 10]}, {"6 6": [8, 11]}, {"6 6": [8, 12]}, {"6 6": [9, 0]}, {"6 6": [9, 1]}, {"6 6": [9, 2]}, {"6 6": [9, 3]}, {"6 6": [9, 4]}, {"6 6": [9, 5]}, {"6 6": [9, 6]}, {"6 6": [9, 7]}, {"6 6": [9, 8]}, {"6 6": [9, 9]}, {"6 6": [9, 10]}, {"6 6": [9, 11]}, {"6 6": [9, 12]}, {"6 6": [10, 0]}, {"6 6": [10, 1]}, {"6 6": [10, 2]}, {"6 6": [10, 3]}, {"6 6": [10, 4]}, {"6 6": [10, 5]}, {"6 6": [10, 6]}, {"6 6": [10, 7]}, {"6 6": [10, 8]}, {"6 6": [10, 9]}, {"6 6": [10, 10]}, {"6 6": [10, 11]}, {"6 6": [10, 12]}, {"6 6": [11, 0]}, {"6 6": [11, 1]}, {"6 6": [11, 2]}, {"6 6": [11, 3]}, {"6 6": [11, 4]}, {"6 6": [11, 5]}, {"6 6": [11, 6]}, {"6 6": [11, 7]}, {"6 6": [11, 8]}, {"6 6": [11, 9]}, {"6 6": [11, 10]}, {"6 6": [11, 11]}, {"6 6": [11, 12]}, {"6 6": [12, 0]}, {"6 6": [12, 1]}, {"6 6": [12, 2]}, {"6 6": [12, 3]}, {"6 6": [12, 4]}, {"6 6": [12, 5]}, {"6 6": [12, 6]}, {"6 6": [12, 7]}, {"6 6": [12, 8]}, {"6 6": [12, 9]}, {"6 6": [12, 10]}, {"6 6": [12, 11]}, {"6 6": [12, 12]}]
10 |
11 | [graph]
12 | create_from_DFT = True
13 |
14 | [train]
15 | epochs = 5000
16 | train_ratio = 0.6
17 | val_ratio = 0.2
18 | test_ratio = 0.2
19 | revert_then_decay = True
20 | revert_decay_epoch = [800, 2000, 3000, 4000]
21 | revert_decay_gamma = [0.4, 0.5, 0.5, 0.4]
22 |
23 | [hyperparameter]
24 | batch_size = 1
25 | learning_rate = 0.001
26 |
27 | [network]
28 | gauss_stop = 6.0
29 |
--------------------------------------------------------------------------------
/ini/graphene.ini:
--------------------------------------------------------------------------------
1 | [basic]
2 | graph_dir = /home/user/DeepH/graph_data/
3 | save_dir = /home/user/DeepH/result/
4 | raw_dir = /path/to/downloaded/dataset
5 | dataset_name = graphene
6 | interface = npz
7 | save_to_time_folder = True
8 | save_csv = False
9 | orbital = [{"6 6": [0, 0]}, {"6 6": [0, 1]}, {"6 6": [0, 2]}, {"6 6": [0, 3]}, {"6 6": [0, 4]}, {"6 6": [0, 5]}, {"6 6": [0, 6]}, {"6 6": [0, 7]}, {"6 6": [0, 8]}, {"6 6": [0, 9]}, {"6 6": [0, 10]}, {"6 6": [0, 11]}, {"6 6": [0, 12]}, {"6 6": [1, 0]}, {"6 6": [1, 1]}, {"6 6": [1, 2]}, {"6 6": [1, 3]}, {"6 6": [1, 4]}, {"6 6": [1, 5]}, {"6 6": [1, 6]}, {"6 6": [1, 7]}, {"6 6": [1, 8]}, {"6 6": [1, 9]}, {"6 6": [1, 10]}, {"6 6": [1, 11]}, {"6 6": [1, 12]}, {"6 6": [2, 0]}, {"6 6": [2, 1]}, {"6 6": [2, 2]}, {"6 6": [2, 3]}, {"6 6": [2, 4]}, {"6 6": [2, 5]}, {"6 6": [2, 6]}, {"6 6": [2, 7]}, {"6 6": [2, 8]}, {"6 6": [2, 9]}, {"6 6": [2, 10]}, {"6 6": [2, 11]}, {"6 6": [2, 12]}, {"6 6": [3, 0]}, {"6 6": [3, 1]}, {"6 6": [3, 2]}, {"6 6": [3, 3]}, {"6 6": [3, 4]}, {"6 6": [3, 5]}, {"6 6": [3, 6]}, {"6 6": [3, 7]}, {"6 6": [3, 8]}, {"6 6": [3, 9]}, {"6 6": [3, 10]}, {"6 6": [3, 11]}, {"6 6": [3, 12]}, {"6 6": [4, 0]}, {"6 6": [4, 1]}, {"6 6": [4, 2]}, {"6 6": [4, 3]}, {"6 6": [4, 4]}, {"6 6": [4, 5]}, {"6 6": [4, 6]}, {"6 6": [4, 7]}, {"6 6": [4, 8]}, {"6 6": [4, 9]}, {"6 6": [4, 10]}, {"6 6": [4, 11]}, {"6 6": [4, 12]}, {"6 6": [5, 0]}, {"6 6": [5, 1]}, {"6 6": [5, 2]}, {"6 6": [5, 3]}, {"6 6": [5, 4]}, {"6 6": [5, 5]}, {"6 6": [5, 6]}, {"6 6": [5, 7]}, {"6 6": [5, 8]}, {"6 6": [5, 9]}, {"6 6": [5, 10]}, {"6 6": [5, 11]}, {"6 6": [5, 12]}, {"6 6": [6, 0]}, {"6 6": [6, 1]}, {"6 6": [6, 2]}, {"6 6": [6, 3]}, {"6 6": [6, 4]}, {"6 6": [6, 5]}, {"6 6": [6, 6]}, {"6 6": [6, 7]}, {"6 6": [6, 8]}, {"6 6": [6, 9]}, {"6 6": [6, 10]}, {"6 6": [6, 11]}, {"6 6": [6, 12]}, {"6 6": [7, 0]}, {"6 6": [7, 1]}, {"6 6": [7, 2]}, {"6 6": [7, 3]}, {"6 6": [7, 4]}, {"6 6": [7, 5]}, {"6 6": [7, 6]}, {"6 6": [7, 7]}, {"6 6": [7, 8]}, {"6 6": [7, 9]}, {"6 6": [7, 10]}, {"6 6": [7, 11]}, {"6 6": [7, 12]}, {"6 6": [8, 0]}, {"6 6": [8, 1]}, {"6 6": [8, 2]}, {"6 6": [8, 3]}, {"6 6": [8, 4]}, {"6 6": [8, 5]}, {"6 6": [8, 6]}, {"6 6": [8, 7]}, {"6 6": [8, 8]}, {"6 6": [8, 9]}, {"6 6": [8, 10]}, {"6 6": [8, 11]}, {"6 6": [8, 12]}, {"6 6": [9, 0]}, {"6 6": [9, 1]}, {"6 6": [9, 2]}, {"6 6": [9, 3]}, {"6 6": [9, 4]}, {"6 6": [9, 5]}, {"6 6": [9, 6]}, {"6 6": [9, 7]}, {"6 6": [9, 8]}, {"6 6": [9, 9]}, {"6 6": [9, 10]}, {"6 6": [9, 11]}, {"6 6": [9, 12]}, {"6 6": [10, 0]}, {"6 6": [10, 1]}, {"6 6": [10, 2]}, {"6 6": [10, 3]}, {"6 6": [10, 4]}, {"6 6": [10, 5]}, {"6 6": [10, 6]}, {"6 6": [10, 7]}, {"6 6": [10, 8]}, {"6 6": [10, 9]}, {"6 6": [10, 10]}, {"6 6": [10, 11]}, {"6 6": [10, 12]}, {"6 6": [11, 0]}, {"6 6": [11, 1]}, {"6 6": [11, 2]}, {"6 6": [11, 3]}, {"6 6": [11, 4]}, {"6 6": [11, 5]}, {"6 6": [11, 6]}, {"6 6": [11, 7]}, {"6 6": [11, 8]}, {"6 6": [11, 9]}, {"6 6": [11, 10]}, {"6 6": [11, 11]}, {"6 6": [11, 12]}, {"6 6": [12, 0]}, {"6 6": [12, 1]}, {"6 6": [12, 2]}, {"6 6": [12, 3]}, {"6 6": [12, 4]}, {"6 6": [12, 5]}, {"6 6": [12, 6]}, {"6 6": [12, 7]}, {"6 6": [12, 8]}, {"6 6": [12, 9]}, {"6 6": [12, 10]}, {"6 6": [12, 11]}, {"6 6": [12, 12]}]
10 |
11 | [graph]
12 | create_from_DFT = False
13 | radius = 6.0
14 |
15 | [train]
16 | epochs = 5000
17 | train_ratio = 0.6
18 | val_ratio = 0.2
19 | test_ratio = 0.2
20 | revert_then_decay = True
21 | revert_decay_epoch = [800, 2000, 3000, 4000]
22 | revert_decay_gamma = [0.4, 0.5, 0.5, 0.4]
23 |
24 | [hyperparameter]
25 | batch_size = 4
26 | learning_rate = 0.001
27 |
28 | [network]
29 | gauss_stop = 6.0
30 |
--------------------------------------------------------------------------------
/logo/logo_word.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | import codecs
3 | from setuptools import setup, find_packages
4 |
5 |
6 | def read(rel_path):
7 | here = os.path.abspath(os.path.dirname(__file__))
8 | with codecs.open(os.path.join(here, rel_path), 'r') as fp:
9 | return fp.read()
10 |
11 |
12 | def get_version(rel_path):
13 | for line in read(rel_path).splitlines():
14 | if line.startswith('__version__'):
15 | delim = '"' if '"' in line else "'"
16 | return line.split(delim)[1]
17 | else:
18 | raise RuntimeError("Unable to find version string.")
19 |
20 |
21 | setup(
22 | name="deeph",
23 | version=get_version("deeph/__init__.py"),
24 | description="DeepH-pack is the official implementation of the Deep Hamiltonian (DeepH) method.",
25 | download_url="https://github.com/mzjb/DeepH-pack",
26 | author="He Li",
27 | python_requires=">=3.9",
28 | packages=find_packages(),
29 | package_dir={'deeph': 'deeph'},
30 | package_data={'': ['*.jl', '*.ini', 'periodic_table.json']},
31 | entry_points={
32 | "console_scripts": [
33 | "deeph-preprocess = deeph.scripts.preprocess:main",
34 | "deeph-train = deeph.scripts.train:main",
35 | "deeph-evaluate = deeph.scripts.evaluate:main",
36 | "deeph-inference = deeph.scripts.inference:main",
37 | ]
38 | },
39 | install_requires=[
40 | "numpy",
41 | "scipy",
42 | "torch>=1.9",
43 | "torch_geometric>=1.7.2",
44 | "e3nn>=0.3.5, <=0.4.4",
45 | "h5py",
46 | "pymatgen",
47 | "pathos",
48 | "psutil",
49 | "tqdm",
50 | "tensorboard",
51 | ],
52 | license="MIT",
53 | license_files="LICENSE",
54 | zip_safe=False,
55 | )
56 |
--------------------------------------------------------------------------------
/tools/get_all_orbital_str.py:
--------------------------------------------------------------------------------
1 | num_element = int(input("Number of atomic types: "))
2 | atomic_number = []
3 | num_orbitals = []
4 | assert num_element > 0, "Number of atomic types should be greater than 0."
5 | for index_element in range(num_element):
6 | input1 = int(input(f"Atomic type #{index_element + 1}'s atomic number: "))
7 | assert input1 > 0, "Atomic number should be greater than 0."
8 | input2 = int(input(f"Atomic type #{index_element + 1}'s orbital basis number: "))
9 | assert input2 > 0, "Orbital basis number should be greater than 0."
10 | atomic_number.append(input1)
11 | num_orbitals.append(input2)
12 |
13 | orbital_str = '['
14 | first_flag = True
15 | for ele_i, ele_j in ((ele_i, ele_j) for ele_i in range(num_element) for ele_j in range(num_element)):
16 | for orb_i, orb_j in ((orb_i, orb_j) for orb_i in range(num_orbitals[ele_i]) for orb_j in range(num_orbitals[ele_j])):
17 | if first_flag:
18 | orbital_str += '{'
19 | first_flag = False
20 | else:
21 | orbital_str += ', {'
22 | orbital_str += f'"{atomic_number[ele_i]} {atomic_number[ele_j]}": [{orb_i}, {orb_j}]}}'
23 | orbital_str += ']'
24 | print(" keyword can be set as:")
25 | print(orbital_str)
26 |
--------------------------------------------------------------------------------