├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── benches └── bench_tensors.rs ├── build.rs ├── build.rs.bak └── src ├── davidson.rs ├── eri.rs ├── external_libs ├── compile.sh ├── ffi_restmatr.rs ├── mod.rs └── restmatr.f90 ├── index.rs ├── lib.rs ├── matrix ├── einsum.rs ├── matrix_blas_lapack.rs ├── matrix_trait.rs ├── matrixconst.rs ├── matrixfull.rs ├── matrixfullslice.rs ├── matrixupper.rs ├── mod.rs └── submatrixfull.rs ├── ri.rs └── tensor_basic_operation.rs /.gitignore: -------------------------------------------------------------------------------- 1 | *.so 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rest_tensors" 3 | version = "0.1.1" 4 | edition = "2021" 5 | authors = ["Igor Ying Zhang "] 6 | description = "Provide efficient tensor operations for the Rust-based Electronic Structure Tool (REST)" 7 | license = "MIT OR Apache-2.0" 8 | 9 | #build = "build.rs" 10 | [build-dependencies] 11 | dunce = "1.0.0" 12 | 13 | [lib] 14 | bench = true 15 | crate-type = ["dylib", "rlib"] 16 | #crate-type = ["lib"] 17 | 18 | [dev-dependencies] 19 | criterion = "0.4" 20 | 21 | [[bench]] 22 | name = "bench_tensors" 23 | harness = false 24 | 25 | 26 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 27 | 28 | [dependencies] 29 | lapack-sys = "0.14" 30 | lapack = "0.19" 31 | blas-sys = "0.7" 32 | blas = "0.22" 33 | num-complex = "0.4" 34 | libc = "0.2" 35 | typenum = "1.14" 36 | rayon = "1.5.1" 37 | itertools = "0.10.3" 38 | anyhow = "1" 39 | nalgebra = "0.31.1" 40 | regex = "0.1.41" 41 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rest_tensors 2 | 3 | **rest_tensors** is a linear algebra library, which aims at providing efficient tensor operations for the Rust-based electronic structure tool (REST). 4 | ### Using rest_tensors 5 | 6 | - Several global environment variables should be specified 7 | 1) REST_BLAS_DIR: The path to the openblas library: `libopenblas.so` 8 | 2) REST_FORTRAN_COMPILER: The compiler to build a fortran library for effcient tensor operations: `restmatr.f90` -> `librestmatr.so` 9 | 3) REST_EXT_DIR: The path to store the fortran library: `librestmatr.so` after compilation 10 | 4) LD_LIBRARY_PATH: attach REST_BLAS_DIR and REST_EXT_DIR to LD_LIBRARY_PATH: `export LD_LIBRARY_PATH="$REST_BLAS_DIR:$REST_EXT_DIR:$LD_LIBRARY_PATH"` 11 | 12 | - Simply add the following to your Carto.toml file: 13 | ```ignore 14 | [dependencies] 15 | // replace the * by the latest version 16 | rest_tensors = "*" 17 | ``` 18 | 19 | ### Fetures 20 | 21 | * [`MatrixFull`](MatrixFull): the `column-major` rank-2 tensor, i.e. `matrix`, which is used for the molecular geometries, 22 | orbital coefficients, density matrix, and most of intermediate data for REST. 23 | There are several relevant structures for matrix, which share the same trait, namely 24 | [`BasicMatrix`](BasicMatrix), [`BasicMatrixOpt`](BasicMatrixOpt), [`MathMatrix`](MathMatrix) and so forth. 25 | * [`MatrixUpper`](MatrixUpper): the structure storing the upper triangle of the matrix, which is used for Hamiltonian matrix, and many other Hermitian matrices in the REST package. 26 | * [`RIFull`](RIFull): the `column-major` rank-3 tensor structure, which is used for the three-center integrals 27 | in the resoution-of-identity approximation (RI). For example, ri3ao, ri3mo, and so forth. 28 | **NOTE**:: Although RIFull is created for very specific purpose use in REST, most of the relevant operations provided here are quite general and can be easily extended to any other 3-rank tensors 29 | * [`ERIFull`](ERIFull): the `column-major` 4-dimention tensors for electronic repulsive integrals (ERI). 30 | **NOTE**:: ERIFull is created to handle the analytic electronic-repulsive integrals in REST. 31 | Because REST mainly uses the Resolution-of-Identity (RI) technique. The analytic ERI is provided for benchmark, and thus is not fully optimized. 32 | 33 | 34 | * Detailed usage of [`MatrixFull`](MatrixFull) can be find in the corresponding pages; while those of [`RIFull`] and [`ERIFull`] are not yet ready. 35 | 36 | ### To-Do-List 37 | 38 | * Introduce more LAPACK and BLAS functions to the 2-dimention matrix struct in rest-tensors, like [`MatrixFull`](MatrixFull), [`MatrixFullSlice`](MatrixFullSlice), [`SubMatrixFull`](SubMatrixFull) and so forth. 39 | * Reoptimize the API for the rank-3 tensor, mainly [`RIFull`](RIFull) and complete the detailed usage accordingly. 40 | * Enable the ScaLAPCK (scalable linear algebra package) functions to the 2-dimention matrix struct in rest-tensors, like [`MatrixFull`](MatrixFull). 41 | * Conversions between `rest_tensors` and `numpy` in python 42 | -------------------------------------------------------------------------------- /benches/bench_tensors.rs: -------------------------------------------------------------------------------- 1 | use criterion::{criterion_group, criterion_main, Criterion}; 2 | use rest_tensors::{RIFull,MatrixFull}; 3 | 4 | 5 | fn ao2mo() { 6 | let ri3fn = RIFull::new([10,10,20],2.0); 7 | let eigenvector = MatrixFull::new([10,10],1.0); 8 | 9 | ri3fn.ao2mo_v02(&eigenvector).unwrap(); 10 | 11 | //println!("{:?}", ri3mo.size); 12 | } 13 | 14 | 15 | fn bench_ao2mo(c: &mut Criterion) { 16 | c.bench_function("ao2mo for RIFull", |b| b.iter(|| ao2mo())); 17 | } 18 | 19 | criterion_group!(benches, bench_ao2mo); 20 | criterion_main!(benches); -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | //extern crate dunce; 2 | use std::path::PathBuf; 3 | use std::{fs, env, process::Command}; 4 | 5 | fn main() -> std::io::Result<()> { 6 | 7 | // the lib directory to store librestmatr.so 8 | let external_dir = if let Ok(target_dir) = env::var("REST_EXT_DIR") { 9 | PathBuf::from(target_dir) 10 | } else {PathBuf::from(".".to_string())}; 11 | 12 | if ! external_dir.is_dir() { 13 | fs::create_dir(&external_dir)? 14 | }; 15 | 16 | let blas_dir = if let Ok(blas_dir) = env::var("REST_BLAS_DIR") { 17 | PathBuf::from(blas_dir) 18 | } else {PathBuf::from(".".to_string())}; 19 | 20 | let fortran_compiler = if let Ok(fortran_compiler) = env::var("REST_FORTRAN_COMPILER") { 21 | fortran_compiler 22 | } else {"gfortran".to_string()}; 23 | 24 | 25 | let restmatr_file = format!("src/external_libs/restmatr.f90"); 26 | let restmatr_libr = format!("{}/librestmatr.so",&external_dir.to_str().unwrap()); 27 | let restmatr_link = format!("-L{}",&blas_dir.display()); 28 | 29 | Command::new(fortran_compiler) 30 | .args(&["-shared", "-fpic", "-O2",&restmatr_file,"-o",&restmatr_libr,&restmatr_link, "-lopenblas"]) 31 | .status().unwrap(); 32 | 33 | 34 | println!("cargo:rustc-link-lib=restmatr"); 35 | println!("cargo:rustc-link-search=native={}",&external_dir.to_str().unwrap()); 36 | 37 | 38 | println!("cargo:rustc-link-lib=openblas"); 39 | println!("cargo:rustc-link-search=native={}",&blas_dir.display()); 40 | 41 | println!("cargo:rerun-if-changed=src/external_libs/restmatr.f90"); 42 | println!("cargo:rerun-if-changed={}/librestmatr.so", &external_dir.to_str().unwrap()); 43 | 44 | Ok(()) 45 | 46 | } 47 | -------------------------------------------------------------------------------- /build.rs.bak: -------------------------------------------------------------------------------- 1 | //extern crate dunce; 2 | use std::path::PathBuf; 3 | use std::{fs, env, process::Command}; 4 | 5 | fn main() -> std::io::Result<()> { 6 | 7 | // the lib directory to store librestmatr.so 8 | let external_dir = if let Ok(target_dir) = env::var("REST_EXT_DIR") { 9 | PathBuf::from(target_dir) 10 | } else {PathBuf::from("".to_string())}; 11 | 12 | if ! external_dir.is_dir() { 13 | fs::create_dir(&external_dir)? 14 | }; 15 | 16 | let blas_dir = if let Ok(blas_dir) = env::var("REST_BLAS_DIR") { 17 | blas_dir 18 | } else {"".to_string()}; 19 | let fortran_compiler = if let Ok(fortran_compiler) = env::var("REST_FORTRAN_COMPILER") { 20 | fortran_compiler 21 | } else {"gfortran".to_string()}; 22 | 23 | 24 | let restmatr_file = format!("src/external_libs/restmatr.f90"); 25 | let restmatr_libr = format!("{}/librestmatr.so",&external_dir.to_str().unwrap()); 26 | let restmatr_link = format!("-L{} -lopenblas",&blas_dir); 27 | 28 | Command::new(fortran_compiler) 29 | .args(&["-shared", "-fpic", "-O2",&restmatr_file,"-o",&restmatr_libr,&restmatr_link]) 30 | .status().unwrap(); 31 | 32 | 33 | println!("cargo:rustc-link-lib=restmatr"); 34 | println!("cargo:rustc-link-search=native={}",&external_dir.to_str().unwrap()); 35 | 36 | 37 | println!("cargo:rustc-link-lib=openblas"); 38 | println!("cargo:rustc-link-search=native={}",&blas_dir); 39 | 40 | println!("cargo:rerun-if-changed=src/external_libs/restmatr.f90"); 41 | println!("cargo:rerun-if-changed={}/librestmatr.so", &external_dir.to_str().unwrap()); 42 | 43 | Ok(()) 44 | 45 | } 46 | -------------------------------------------------------------------------------- /src/davidson.rs: -------------------------------------------------------------------------------- 1 | // 2 | //pub fn davidson(Ax: Box) -> (DVector, DMatrix) { 3 | // impl MatrixOperations for DMatrix { 4 | // fn matrix_vector_prod(&self, vs: DVectorSlice) -> DVector { 5 | // Ax(vs) 6 | // } 7 | // } 8 | // let arr = generate_diagonal_dominant(10, 0.005); 9 | // //let eig = sort_eigenpairs(nalgebra::linalg::SymmetricEigen::new(arr.clone()), true); 10 | // let spectrum_target = SpectrumTarget::Lowest; 11 | // let tolerance = 1.0e-4; 12 | // 13 | // let dav = Davidson::new( 14 | // arr.clone(), 15 | // 2, 16 | // DavidsonCorrection::DPR, 17 | // spectrum_target.clone(), 18 | // tolerance, 19 | // ) 20 | // .unwrap(); 21 | // (dav.eigenvalues, dav.eigenvectors) 22 | //} 23 | use std::default::Default; 24 | use crate::MatrixFull; 25 | use nalgebra::DVector; 26 | 27 | #[derive(Debug)] 28 | pub struct DavidsonParams { 29 | pub tol: f64, 30 | pub maxcyc: usize, 31 | pub maxspace: usize, 32 | pub lindep: f64, 33 | pub nroots: usize 34 | } 35 | 36 | impl Default for DavidsonParams { 37 | fn default() -> Self { 38 | DavidsonParams{ 39 | tol: 1e-5, 40 | maxcyc: 50, 41 | maxspace: 12, 42 | lindep: 1e-14, 43 | nroots: 3 44 | } 45 | } 46 | } 47 | 48 | pub fn davidson_solve(mut a_x: Box) -> Vec + '_>, 49 | x0_1d: &mut Vec, 50 | hdiag: &mut Vec, 51 | params: &DavidsonParams, 52 | print_level: usize, 53 | ) -> (Vec, Vec, Vec>) { 54 | let tol = params.tol; 55 | let maxcyc = params.maxcyc; 56 | let mut maxspace = params.maxspace; 57 | let lindep = params.lindep; 58 | let nroots = params.nroots; 59 | println!("Davidson solver parameters:"); 60 | println!(" tol {:?} maxcyc {:?} maxspace {:?} nroots {:?}", tol, maxcyc, maxspace, nroots); 61 | let tol_res = tol.sqrt(); 62 | 63 | let precond = |dx: Vec, e: f64| -> Vec { 64 | let mut hdiagd = hdiag.clone(); 65 | hdiagd.iter_mut().for_each(|h| *h -= e); 66 | hdiagd.iter_mut().for_each(|h| if h.abs()<1e-8 {*h = 1e-8}); 67 | let mut x2 = dx.clone(); 68 | x2.iter_mut().zip(hdiagd.iter()).for_each(|(x, h)| *x /= *h); 69 | x2 70 | }; 71 | 72 | let mut x0:Vec> = vec![]; 73 | x0.push(x0_1d.to_vec()); 74 | maxspace = maxspace + (nroots-1) * 3; 75 | //let mut heff = MatrixFull::new([maxspace+nroots, maxspace+nroots], 0.0f64); 76 | let mut fresh_start = true; 77 | let mut xt:Vec> = vec![]; 78 | let mut axt:Vec> = vec![]; 79 | let mut xtlen = 0; 80 | let mut xs:Vec> = vec![]; 81 | let mut ax:Vec> = vec![]; 82 | let mut space = 0; 83 | let mut ov = x0_1d.len(); 84 | let mut e:Vec = vec![]; 85 | let mut v:MatrixFull = MatrixFull::empty(); 86 | let mut nroots_current = 0; 87 | let mut conv = vec![false; nroots]; 88 | //emin = None 89 | let mut norm_min:f64 = 1.0; 90 | let mut max_dx_last = 1e9; 91 | let mut heff = MatrixFull::empty(); 92 | 93 | for icyc in 0..maxcyc { 94 | 95 | if fresh_start { 96 | xs = vec![]; 97 | ax = vec![]; 98 | space = 0; 99 | xt = _qr(&mut x0, lindep).0; 100 | xtlen = xt.len(); 101 | println!("ov {:?} xt.len {:?}", ov, xtlen); 102 | if (ov - nroots) < 3 { 103 | panic!("Too much nroots required"); 104 | } 105 | //println!("{:?}", xt); 106 | if xtlen == 0 { 107 | panic!("No more linear independent basis found"); 108 | } 109 | max_dx_last = 1e9; 110 | //heff = MatrixFull::new([space, space], 0.0f64); 111 | } else { 112 | xtlen = xt.len(); 113 | if xtlen > 1 { 114 | xt = _qr(&mut xt.clone(), lindep).0; 115 | if xtlen > 40 { 116 | xt = xt[0..40].to_vec(); 117 | } 118 | } 119 | } 120 | println!(">>> start cyc {:?} fresh {:?} ", icyc, fresh_start); 121 | //println!(" xs {:?} ", xs); 122 | //println!(" xt {:?} ", xt); 123 | //let mut axt = ax(xt) 124 | axt = vec![]; 125 | for xi in xt { 126 | let mut axi = a_x(&mut xi.clone()); 127 | if print_level > 3 { println!(" axi {:?} ", axi); } 128 | axt.push(axi.to_vec()); 129 | xs.push(xi.clone().to_vec()); 130 | ax.push(axi.to_vec()); 131 | } 132 | //println!(" xs {:?} ", xs); 133 | //let xslen = xs.len(); 134 | let mut rnow = xtlen; 135 | let mut head = space; 136 | space = space+rnow; 137 | let mut elast = e.clone(); 138 | let mut vlast = v.clone(); 139 | let mut convlast = conv.clone(); 140 | //println!(" space {:?}", space); 141 | //heff = 142 | heff = fill_heff(&mut heff, &mut xs, &mut ax, //&xt, &axt, 143 | xtlen, fresh_start); 144 | let mut heff_upper = heff.clone().to_matrixupper(); 145 | let (mut eigvec, mut eigval, n_found) = heff_upper.to_matrixupperslicemut().lapack_dspevx().unwrap(); 146 | if eigval.len() < nroots { 147 | e = eigval.clone(); 148 | v = eigvec; 149 | } else { 150 | e = eigval[0..nroots].to_vec(); 151 | v = MatrixFull::from_vec([space, nroots], 152 | eigvec.iter_submatrix(0..space, 0..nroots).map(|i| *i).collect()).unwrap(); 153 | } 154 | nroots_current = eigval.len().min(nroots); 155 | if print_level > 3 { 156 | println!(" heff {:?}", heff); 157 | println!(" eigval {:?}", e); 158 | println!(" eigvec[0] {:?}", v); 159 | println!(" xs {:?} ", xs); 160 | println!(" ax {:?} ", ax); 161 | } 162 | x0 = _gen_x0(&mut v, &mut xs); 163 | let mut ax0 = _gen_x0(&mut v, &mut ax); 164 | if print_level > 3 { 165 | //println!(" xs {:?} ", xs); 166 | println!(" x0 {:?} ", x0); 167 | } 168 | (elast, convlast) = _sort_elast(elast, convlast, &mut vlast, &mut v, fresh_start); 169 | xt = vec![]; 170 | let mut dx_norm = vec![0.0f64;nroots_current]; 171 | let mut de = vec![0.0f64;nroots_current]; 172 | for k in 0..nroots_current { 173 | //let de_k = e[k] - elast[k]; 174 | //de.push(de_k); 175 | if k < elast.len() { 176 | de[k] = e[k] - elast[k]; 177 | } else { 178 | de[k] = e[k] 179 | } 180 | let mut xt_k = ax0[k].clone(); 181 | xt_k.iter_mut().zip(x0[k].iter()).for_each(|(xt,x0)| *xt -= e[k]* *x0); 182 | if print_level > 3 { println!(" xt_k {:?}", xt_k)} 183 | xt.push(xt_k.clone()); 184 | let xt_k_na = DVector::from(xt_k.clone()); 185 | let dx_k_norm = xt_k_na.norm(); 186 | //println!("{:?} {:?} {:?} ", ax0[k], e[k], x0); 187 | //println!("{:?}", dx_k_norm); 188 | dx_norm[k] = dx_k_norm; 189 | let conv_k = de[k].abs() < tol && dx_k_norm < tol.sqrt(); 190 | conv[k] = conv_k; 191 | //println!("{:?} {:?} {:?} {:?} {:?}", conv, de_k, dx_k_norm, de_k.abs() < tol, dx_k_norm < tol.sqrt()); 192 | if conv[k] && !convlast[k] { 193 | println!("> root {:?} converged |r|= {:?} e= {:?} de= {:?}", 194 | k, dx_k_norm, e[k], de[k]); 195 | } else { 196 | if print_level > 3 { 197 | println!("> root {:?} |r|= {:?} e= {:?} de= {:?}", 198 | k, dx_k_norm, e[k], de[k]);} 199 | } 200 | } 201 | //println!(" xt {:?} ", xt); 202 | let mut ax0:Vec> = vec![]; 203 | let all_conv = conv.iter().fold(true, |acc, x| acc && *x); 204 | let max_dx_norm = DVector::from(dx_norm.clone()).max(); 205 | if all_conv { 206 | println!(">>> converged at step {:?} |r|= {:?} e= {:?} de= {:?}", 207 | icyc, max_dx_norm, e, de); 208 | break; 209 | } else { 210 | if max_dx_norm > 1.0 && max_dx_norm/max_dx_last > 3.0 && space > nroots+2 { 211 | println!(">>> davidson step {:?} |r|= {:?} e= {:?} de= {:?} ", 212 | icyc, max_dx_norm, e, de); 213 | println!("Large |r| detected, restore previous x0"); 214 | x0 = _gen_x0(&mut vlast, &mut xs); 215 | fresh_start = true; 216 | continue; 217 | } 218 | } 219 | 220 | let mut xt_new:Vec> = vec![]; 221 | for k in 0..nroots_current { 222 | if print_level > 3 { println!("k {:?} dx_norm {:?}", k, dx_norm[k]);} 223 | if dx_norm[k].powf(2.0) > lindep { 224 | xt[k] = precond(xt[k].clone(), e[0],// x0[k] 225 | ); 226 | let xt_k_na = DVector::from(xt[k].clone()); 227 | let norm = xt_k_na.norm(); 228 | xt[k].iter_mut().for_each(|x| *x /= norm); 229 | xt_new.push(xt[k].clone()); 230 | } else { 231 | println!("Drop eigvec {:?} with norm {:?}", k, dx_norm[k]); 232 | } 233 | } 234 | xt = xt_new.clone(); 235 | for i in 0..space { 236 | let xsi_na = DVector::from(xs[i].clone()); 237 | for k in 0..xt.len() { 238 | let mut xtk = &xt[k]; 239 | let mut xtk_na = DVector::from(xtk.clone()); 240 | xtk_na -= xsi_na.clone() * xsi_na.dot(&xtk_na); 241 | xt[k] = xtk_na.data.into(); 242 | } 243 | } 244 | 245 | (xt, norm_min) = _normalize_xt_(xt, lindep); 246 | //println!(" xt {:?} ", xt); 247 | //println!(" xs {:?} ", xs); 248 | println!(">>> davidson step {:?} |r|= {:?} e= {:?} de= {:?} lindep= {:?}", 249 | icyc, max_dx_norm, e, de, norm_min); 250 | if xt.len() == 0 { 251 | println!("Linear dependency in trial subspace. |r| for each state {:?}", 252 | dx_norm); 253 | for k in 0..dx_norm.len() { 254 | conv[k] = conv[k] || dx_norm[k] < tol.sqrt(); 255 | } 256 | break; 257 | } 258 | 259 | let max_dx_last = max_dx_norm; 260 | fresh_start = space + nroots > maxspace; 261 | } 262 | if x0.len() < ov.min(nroots) { 263 | println!("Warning: Not enough eigvec {:?} < min({:?}, {:?})", x0.len(), ov, nroots); 264 | } 265 | (conv, e, x0) 266 | } 267 | 268 | pub fn _normalize_xt_(xt: Vec>, lindep:f64) -> (Vec>, f64) { 269 | let mut norm_min:f64 = 1.0; 270 | //println!("xt {:?} ", xt); 271 | let mut xt_new:Vec> = vec![]; 272 | for k in 0..xt.len() { 273 | let xt_k_na = DVector::from(xt[k].clone()); 274 | let norm = xt_k_na.norm(); 275 | if norm.powf(2.0) > lindep { 276 | let mut xt_k = xt[k].clone(); 277 | xt_k.iter_mut().for_each(|x| *x /= norm); 278 | xt_new.push(xt_k); 279 | norm_min = norm_min.min(norm); 280 | } else { 281 | println!("drop eigvec {:?} with norm {:?}", k, norm); 282 | } 283 | } 284 | (xt_new, norm_min) 285 | } 286 | 287 | pub fn _qr(x:&mut Vec>, lindep:f64) -> (Vec>, MatrixFull) 288 | { 289 | let nvec = x.len(); 290 | let vecsize = x[0].len(); 291 | //println!("_qr \n nvec {:?} vecsize {:?}", nvec, vecsize); 292 | //let mut qs = MatrixFull::new([nvec,vecsize], 0.0f64); 293 | let mut qs = vec![vec![0.0f64;vecsize]; nvec]; 294 | let mut rmat = MatrixFull::new([nvec,nvec], 0.0f64); 295 | 296 | let mut nv = 0; 297 | for i in 0..nvec { 298 | let mut xi = x[i].clone(); 299 | rmat.iter_column_mut(nv).for_each(|r| *r = 0.0); 300 | rmat.data[nv*nvec+nv] = 1.0; 301 | //println!("{:?}", rmat.data); 302 | for j in 0..nv { 303 | //let mut qsj = qs[j]; 304 | let mut prod:f64 = qs[j].iter().zip(xi.iter()).map(|(q,x)| q*x).sum(); 305 | xi.iter_mut().zip( qs[j].iter()).for_each(|(x,q)| *x -= *q *prod); 306 | let mut rmat_clone = rmat.clone(); 307 | rmat.iter_column_mut(nv).zip(rmat_clone.iter_column(j)).for_each(|(r, rj)| *r -= *rj *prod); 308 | }; 309 | //let mut innerprod:f64 = xi.iter().zip(xi.iter()).map(|(x1,x2)| x1*x2).sum(); 310 | let mut xi_na = DVector::from(xi.clone()); 311 | let mut norm = xi_na.norm(); 312 | //let mut norm = innerprod.sqrt(); 313 | //println!("{:?}", xi); 314 | //println!("{:?}", norm); 315 | if norm.powf(2.0) > lindep { 316 | qs[nv].iter_mut().zip(xi.iter()).for_each(|(q,x)| *q = *x / norm); 317 | rmat.iter_submatrix_mut(0..nv+1,nv..nv+1).for_each(|r| *r /= norm); 318 | nv += 1; 319 | } 320 | }; 321 | let mut rmat_inv = rmat.lapack_inverse().unwrap(); 322 | //println!(" qs {:?}", qs[0]); 323 | 324 | (qs[0..nv].to_vec(), rmat_inv) 325 | } 326 | 327 | pub fn fill_heff(heff_old:&mut MatrixFull, 328 | xs:&mut Vec>, ax:&mut Vec>, 329 | //xt:&Vec>, axt:&Vec>, 330 | xtlen:usize, 331 | fresh_start:bool 332 | ) -> MatrixFull 333 | { 334 | let nrow = xtlen; 335 | let row1 = ax.len(); 336 | let row0 = row1 - nrow; 337 | let space = row1; 338 | println!(" space {:?} xt.len {:?} xs.len {:?}", space, nrow, row1); 339 | // println!(" xs {:?} ", xs); 340 | //println!("xt[0] {:?} ", xt[0]); 341 | //println!("xs[0] {:?} \nax[0] {:?}", xs[0], ax[0]); 342 | //if row1 > 1 { 343 | //println!("xs[1] {:?} \nax[1] {:?}", xs[1], ax[1]);} 344 | let mut heff = if fresh_start { 345 | MatrixFull::new([space, space], 0.0f64) 346 | } else { 347 | let space_old = heff_old.size[0]; 348 | let mut heff = MatrixFull::new([space, space], 0.0f64); 349 | for i in 0..space_old { 350 | for j in 0..space_old { 351 | heff.data[i*space+j] = heff_old.data[i*space_old+j]; 352 | } 353 | }; 354 | heff 355 | }; 356 | for i in row0..row1 { 357 | let xt_i_na = DVector::from(xs[i].clone()); 358 | for j in row0..row1 { 359 | //let mut xt_i_axt_j:f64 = xt[i-row0].iter().zip(axt[j-row0].iter() 360 | // ).map(|(x,a)| x*a).sum(); 361 | let axt_j_na = DVector::from(ax[j].clone()); 362 | let mut xt_i_axt_j = xt_i_na.dot(&axt_j_na); 363 | heff.data[i*space+j] = xt_i_axt_j; 364 | heff.data[j*space+i] = xt_i_axt_j; 365 | } 366 | }; 367 | // println!(" xs {:?} ", xs); 368 | for j in row0..row1 { 369 | let xt_j_na = DVector::from(xs[j].clone()); 370 | //let mut all_neg = false; 371 | for i in 0..row0 { 372 | let ax_i_na = DVector::from(ax[i].clone()); 373 | //let mut ax_i_xt_j:f64 = ax[i].iter().zip(xt[j-row0].iter() 374 | // ).map(|(x,a)| x*a).sum(); 375 | let mut ax_i_xt_j = ax_i_na.dot(&xt_j_na); 376 | heff.data[i*space+j] = ax_i_xt_j; 377 | heff.data[j*space+i] = ax_i_xt_j; 378 | } 379 | //if heff.data[0+j] < 0.0 { 380 | // xs[j].iter_mut().for_each(|x| *x *= (-1.0)); 381 | // ax[j].iter_mut().for_each(|x| *x *= (-1.0)); 382 | // for i in 0..row0 { 383 | // heff.data[i*space+j] *= (-1.0); 384 | // heff.data[j*space+i] *= (-1.0); 385 | // } 386 | //} 387 | }; 388 | // println!(" xs {:?} ", xs); 389 | heff 390 | } 391 | 392 | fn _gen_x0(v:&mut MatrixFull, x:&mut Vec>) -> Vec> { 393 | let space = v.size[0]; 394 | let nroots = v.size[1]; 395 | let xlen = x[0].len(); 396 | let mut xmat = MatrixFull::from_vec([xlen, space], x.concat() ).unwrap(); 397 | let mut x0mat = MatrixFull::new([xlen, nroots], 0.0f64); 398 | 399 | x0mat.lapack_dgemm(&mut xmat, v, 'N', 'N', 1.0, 0.0); 400 | let mut x0vecs:Vec> = vec![]; 401 | x0mat.iter_columns_full().for_each(|x0| x0vecs.push(x0.to_vec())); 402 | 403 | x0vecs 404 | } 405 | 406 | fn _sort_elast(elast:Vec, convlast:Vec, 407 | vlast:&mut MatrixFull, v:&mut MatrixFull, fresh_start:bool) -> (Vec, Vec) { 408 | if fresh_start { 409 | (elast, convlast) 410 | } else { 411 | //println!("v.size {:?} vlast.size {:?}", v.size, vlast.size); 412 | let head = vlast.size[0]; 413 | let nroots = vlast.size[1]; 414 | let mut ovlp = MatrixFull::new([nroots, nroots], 0.0f64); 415 | let mut v_head = MatrixFull::from_vec([head, nroots], 416 | v.iter_submatrix(0..head, 0..nroots).map(|i| *i).collect()).unwrap(); 417 | ovlp.lapack_dgemm(&mut v_head, vlast, 'T', 'N', 1.0, 0.0); 418 | let mut idx:Vec = vec![]; 419 | ovlp.iter_columns_full().for_each(|x| { 420 | let x_na = DVector::from(x.to_vec()); 421 | idx.push(x_na.imax() ); } ); 422 | // todo: log for eigenstate flip 423 | //println!("{:?}", idx); 424 | let mut new_elast:Vec = vec![]; 425 | let mut new_convlast:Vec = vec![]; 426 | for i in idx { 427 | new_elast.push(elast[i]); 428 | new_convlast.push(convlast[i]); 429 | } 430 | (elast, convlast) 431 | } 432 | } 433 | 434 | -------------------------------------------------------------------------------- /src/eri.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Display, collections::binary_heap::Iter, ops::Range, iter::Flatten, vec::IntoIter}; 2 | use libc::P_PID; 3 | use blas::dcopy; 4 | use rayon::prelude::*; 5 | use itertools::iproduct; 6 | 7 | //use crate::index::{TensorIndex, TensorIndexUncheck}, Tensors4D, tensors_slice::{TensorsSliceMut, TensorsSlice}, matrix::{MatrixFullSliceMut, MatrixFullSlice}, MatrixFull, MatrixUpperSliceMut, MatrixUpperSlice, TensorOptUncheck, TensorOptMutUncheck, TensorSliceUncheck, TensorSliceMutUncheck}; 8 | //use crate::index::{TensorIndex, TensorIndexUncheck}; 9 | //use crate::matrix::{MatrixFullSliceMut, MatrixFullSlice}; 10 | //use crate::matrix::{MatrixFullSliceMut, MatrixFullSlice,MatrixFull, MatrixUpperSliceMut, MatrixUpperSlice }; 11 | //use crate::{TensorOptUncheck, TensorOptMutUncheck, TensorSliceUncheck, TensorSliceMutUncheck}; 12 | //use crate::tensor_basic_operation::{TensorOpt, TensorOptMut, TensorSlice, TensorSliceMut}; 13 | //use crate::matrix:: 14 | //{Indexing,Tensors4D}; 15 | use crate::index::*; 16 | use crate::tensor_basic_operation::*; 17 | use crate::matrix::matrixfullslice::*; 18 | use crate::matrix::matrixfull::*; 19 | use crate::matrix::matrixupper::*; 20 | 21 | 22 | #[derive(Clone, Copy,Debug, PartialEq)] 23 | pub enum ERIFormat { 24 | Full, 25 | Fold4, 26 | Fold8 27 | } 28 | #[derive(Clone,Debug,PartialEq)] 29 | pub struct ERIFull { 30 | /// Coloum-major 4-D ERI designed for quantum chemistry calculations specifically. 31 | //pub store_format : ERIFormat, 32 | //pub rank: usize, 33 | pub size : [usize;4], 34 | pub indicing: [usize;4], 35 | pub data : Vec 36 | } 37 | 38 | //pub type ERIFull = Tensors4D; 39 | //pub type ERI4F = Tensors4D; 40 | //pub type ERI8F = Tensors4D; 41 | 42 | //impl ERI4F { 43 | // pub fn 44 | //} 45 | 46 | impl ERIFull { 47 | pub fn new(size: [usize;4], new_default: T) -> ERIFull { 48 | let mut indicing = [0usize;4]; 49 | //let mut len = size.iter().enumerate().fold(1usize,|len,(di,i)| { 50 | // indicing[*i] = len; 51 | // len * di 52 | //}); 53 | let mut len = size.iter().zip(indicing.iter_mut()).fold(1usize,|len,(sizei,indicing_i)| { 54 | *indicing_i = len; 55 | len * sizei 56 | }); 57 | //len *= size[3]; 58 | ERIFull { 59 | //rank: U4::default(), 60 | size, 61 | indicing, 62 | data: vec![new_default.clone(); len] 63 | } 64 | } 65 | pub unsafe fn from_vec_unchecked(size: [usize;4], new_vec: Vec) -> ERIFull { 66 | let mut indicing = [0usize;4]; 67 | //let mut len = size.iter().enumerate().fold(1usize,|len,(di,i)| { 68 | let mut len = size.iter().zip(indicing.iter_mut()).fold(1usize,|len,(sizei,indicing_i)| { 69 | *indicing_i = len; 70 | len * sizei 71 | }); 72 | //len *= size[3]; 73 | //if len>new_vec.len() { 74 | // panic!("Error: inconsistency happens when formating a tensor from a given vector, (length from size, length of new vector) = ({},{})",len,new_vec.len()); 75 | //} else if len) -> Option> { 86 | unsafe{ 87 | let tmp_eri = ERIFull::from_vec_unchecked(size, new_vec); 88 | let len = tmp_eri.indicing[3]*tmp_eri.size[3]; 89 | if len>tmp_eri.data.len() { 90 | panic!("Error: inconsistency happens when formating a tensor from a given vector, (length from size, length of new vector) = ({},{})",len,tmp_eri.data.len()); 91 | None 92 | } else { 93 | if len MatrixFullSliceMut { 100 | let mut position = [0; 4]; 101 | position[2] = i_reduced[0]; 102 | position[3] = i_reduced[1]; 103 | let p_start = self.index4d(position).unwrap(); 104 | let p_length = self.indicing[2]; 105 | MatrixFullSliceMut { 106 | size: &self.size[0..2], 107 | indicing: &self.indicing[0..2], 108 | data : &mut self.data[p_start..p_start+p_length]} 109 | } 110 | 111 | #[inline] 112 | pub fn get_reducing_matrix(&self, i_reduced: &[usize;2]) -> MatrixFullSlice { 113 | let mut position = [0; 4]; 114 | position[2] = i_reduced[0]; 115 | position[3] = i_reduced[1]; 116 | let p_start = self.index4d(position).unwrap(); 117 | let p_length = self.indicing[2]; 118 | MatrixFullSlice { 119 | size: &self.size[0..2], 120 | indicing: &self.indicing[0..2], 121 | data : &self.data[p_start..p_start+p_length]} 122 | } 123 | #[inline] 124 | pub fn chrunk_copy(&mut self, range: [Range;4], buf: Vec) { 125 | let mut len = [0usize;4]; 126 | len.iter_mut().zip(range.iter()).for_each(|(i,j)| *i=j.len()); 127 | let mat_local = unsafe{ERIFull::from_vec_unchecked(len, buf)}; 128 | for (ll,l) in (0..len[3]).zip(range[3].clone()) { 129 | for (kk,k) in (0..len[2]).zip(range[2].clone()) { 130 | let mut mat_full_kl = self.get_reducing_matrix_mut(&[k,l]); 131 | let mat_local_kl = mat_local.get_reducing_matrix(&[kk,ll]); 132 | //for (jj,j) in (0..len[1]).zip(jrange.clone()) { 133 | for (jj,j) in (0..len[1]).zip(range[1].clone()) { 134 | let mut mat_full_klj = mat_full_kl.get2d_slice_mut([range[0].start,j],len[0]).unwrap(); 135 | let mat_local_klj = mat_local_kl.get2d_slice([0,jj],len[0]).unwrap(); 136 | //unsafe{ 137 | // dcopy(len[0] as i32, mat_local_klj, 1, mat_full_klj, 1); 138 | //} 139 | mat_full_klj.iter_mut().zip(mat_local_klj.iter()).for_each(|(t,f)| *t = f.clone()); 140 | } 141 | } 142 | } 143 | } 144 | #[inline] 145 | pub fn chrunk_copy_transpose_ij(&mut self, range: [Range;4], buf: Vec) { 146 | //let mut len = [irange.len(),jrange.len(),krange.len(),lrange.len()]; 147 | let ilen = self.size[0]; 148 | let mut len = [0usize;4]; 149 | len.iter_mut().zip(range.iter()).for_each(|(i,j)| *i=j.len()); 150 | let mat_local = unsafe{ERIFull::from_vec_unchecked(len, buf)}; 151 | for (ll,l) in (0..len[3]).zip(range[3].clone()) { 152 | for (kk,k) in (0..len[2]).zip(range[2].clone()) { 153 | let mut mat_full_kl = self.get_reducing_matrix_mut(&[k,l]); 154 | let mat_local_kl = mat_local.get_reducing_matrix(&[kk,ll]); 155 | for (jj,j) in (0..len[1]).zip(range[1].clone()) { 156 | let mut mat_full_klj = mat_full_kl.get2d_slice_mut([range[0].start,j],len[0]).unwrap(); 157 | let mat_local_klj = mat_local_kl.get2d_slice([0,jj],len[0]).unwrap(); 158 | let mut global_ij = j + range[0].start*ilen; 159 | for ii in (0..len[0]) { 160 | mat_full_kl.data[global_ij] = mat_local_klj[ii].clone(); 161 | global_ij += ilen; 162 | } 163 | } 164 | } 165 | } 166 | } 167 | } 168 | 169 | 170 | #[derive(Clone,Debug,PartialEq)] 171 | pub struct ERIFold4 { 172 | /// Coloum-major 4-D ERI designed for quantum chemistry calculations specifically. 173 | pub size : [usize;2], 174 | pub indicing: [usize;2], 175 | pub data : Vec 176 | } 177 | 178 | impl ERIFold4 { 179 | pub fn new(size: [usize;2], new_default: T) -> ERIFold4 { 180 | let mut indicing = [0usize;2]; 181 | let mut len = size.iter().zip(indicing.iter_mut()).fold(1usize,|len,(sizei,indicing_i)| { 182 | *indicing_i = len; 183 | len * sizei 184 | }); 185 | ERIFold4 { 186 | size, 187 | indicing, 188 | data: vec![new_default.clone(); len] 189 | } 190 | } 191 | pub unsafe fn from_vec_unchecked(size: [usize;2], new_vec: Vec) -> ERIFold4 { 192 | let mut indicing = [0usize;2]; 193 | let mut len = size.iter().zip(indicing.iter_mut()).fold(1usize,|len,(sizei,indicing_i)| { 194 | *indicing_i = len; 195 | len * sizei 196 | }); 197 | ERIFold4 { 198 | size, 199 | indicing, 200 | data: new_vec 201 | } 202 | } 203 | pub fn from_vec(size: [usize;2], new_vec: Vec) -> Option> { 204 | unsafe{ 205 | let tmp_eri = ERIFold4::from_vec_unchecked(size, new_vec); 206 | let len = tmp_eri.indicing[1]*tmp_eri.size[1]; 207 | if len>tmp_eri.data.len() { 208 | panic!("Error: inconsistency happens when formating a tensor from a given vector, (length from size, length of new vector) = ({},{})",len,tmp_eri.data.len()); 209 | None 210 | } else { 211 | if len MatrixUpperSliceMut { 218 | let p_start = self.index2d([0,i_reduced]).unwrap(); 219 | let p_length = self.indicing[1]; 220 | MatrixUpperSliceMut { 221 | size: self.size[0], 222 | //indicing: &self.indicing[0], 223 | data : &mut self.data[p_start..p_start+p_length]} 224 | } 225 | 226 | #[inline] 227 | pub fn get_reducing_matrix(&self, i_reduced: usize) -> MatrixUpperSlice { 228 | let p_start = self.index2d([0,i_reduced]).unwrap(); 229 | let p_length = self.indicing[1]; 230 | MatrixUpperSlice { 231 | size: self.size[0], 232 | //indicing: &self.indicing[0], 233 | data : &self.data[p_start..p_start+p_length]} 234 | } 235 | #[inline] 236 | pub fn get_slices_mut(&mut self, dim: usize, d1: Range, d2:Range, d3:Range, d4:Range) -> Flatten> { 237 | //let mut tmp_slices = vec![&self.data[..]; d2.len()*d3.len()*d4.len()]; 238 | let mut tmp_slices: Vec<&mut [T]> = Vec::new(); 239 | let len_slices_d1 = d1.len(); 240 | let dd = self.data.split_at_mut(0).1; 241 | let len_slices_d1 = d1.len(); 242 | let len_d12 = dim*(dim+1)/2; 243 | let len_d34 = len_d12; 244 | 245 | //let mut debug_total_len = 0_usize; 246 | 247 | let mut final_slice_len = 0_usize; 248 | let mut start = 0_usize; 249 | iproduct!(d4,d3,d2).fold((dd,0_usize),|(ee, offset),(d4,d3,d2)| { 250 | if d3 > d4 || d2 < d1.start { 251 | (ee,offset) 252 | } else { 253 | start = (d4*(d4+1)/2+d3)*len_d12 + d2*(d2+1)/2 + d1.start; 254 | // check the slice length to gather 255 | final_slice_len = if d2 >= d1.end {len_slices_d1} else {d2-d1.start+1}; 256 | let gg = ee.split_at_mut(start-offset).1.split_at_mut(final_slice_len); 257 | tmp_slices.push(gg.0); 258 | //debug_total_len += final_slice_len; 259 | (gg.1,start+final_slice_len) 260 | } 261 | }); 262 | 263 | //let len = tmp_slices.len(); 264 | 265 | //println!("debug: the length of to_slices: {}", debug_total_len); 266 | tmp_slices.into_iter().flatten() 267 | 268 | } 269 | #[inline] 270 | pub fn chunk_copy_from_local_erifull(&mut self, dim:usize, d1:Range, d2:Range, d3:Range, d4:Range, buf: Vec) { 271 | //let mat_local = unsafe{ERIFull::from_vec_unchecked([d1.len(),d2.len(),d3.len(),d4.len()], buf)}; 272 | // prepare the slices from self for copy 273 | let mut to_slices = self.get_slices_mut(dim, d1.clone(), d2.clone(), d3.clone(), d4.clone()); 274 | 275 | // now prepare the slices form buf to copy 276 | let mut from_slices: Vec<&[T]> = Vec::new(); 277 | let mut final_slice_len = 0_usize; 278 | let local_ind1 = d1.len(); 279 | let local_ind2 = local_ind1*d2.len(); 280 | let local_ind3 = local_ind2*d3.len(); 281 | 282 | //let mut debug_total_len = 0_usize; 283 | 284 | let mut start = 0_usize; 285 | let mut final_len = 0_usize; 286 | let max_final_len = d1.len(); 287 | for l in d4.enumerate() { 288 | let start_l = l.0*local_ind3; 289 | for k in d3.clone().enumerate() { 290 | if k.1 <= l.1 { 291 | let start_kl = start_l + k.0*local_ind2; 292 | for j in d2.clone().enumerate() { 293 | if j.1 >=d1.start { 294 | start = start_kl + j.0*local_ind1; 295 | final_len = if j.1>=d1.end {max_final_len} else {j.1-d1.start+1}; 296 | from_slices.push(&buf[start..start+final_len]); 297 | //debug_total_len += final_len; 298 | } 299 | } 300 | } 301 | } 302 | } 303 | //println!("debug: the length of from_slices: {}", debug_total_len); 304 | from_slices.into_iter().flatten().zip(to_slices).for_each(|value| {*value.1=value.0.clone()}); 305 | } 306 | } 307 | 308 | impl ERIFold4 { 309 | #[inline] 310 | /// Establish especially for the tensor generation of ERIFold4, for which 311 | /// the eri subtensors are generated by LibCINT 312 | pub fn chunk_copy_from_a_full_vector(&mut self, range: [Range;4], buf: Vec) { 313 | //======================================================== 314 | // algorithm 1: locate and copy each slices one by one 315 | //======================================================== 316 | let mut len = [0usize;4]; 317 | len.iter_mut().zip(range.iter()).for_each(|(i,j)| *i=j.len()); 318 | let mat_local = unsafe{ERIFull::from_vec_unchecked(len, buf)}; 319 | if range[0].startl {continue}; 323 | let klpair = (l+1)*l/2+k; // nevigate to the (k,l) position in the upper-formated tensor 324 | let mut mat_full_kl = self.get_reducing_matrix_mut(klpair); 325 | let mat_local_kl = mat_local.get_reducing_matrix(&[kk,ll]); 326 | let mut local_start = 0usize; 327 | for (jj,j) in range[1].clone().enumerate() { 328 | let mut mat_full_klj = mat_full_kl 329 | .get2d_slice_mut_uncheck([range[0].start,j],len[0]).unwrap(); 330 | //let mat_local_klj = mat_local_kl.get2d_slice([0,jj],len[0]).unwrap(); 331 | let mat_local_klj = mat_local_kl 332 | .get1d_slice(local_start,len[0]).unwrap(); 333 | mat_full_klj.iter_mut().zip(mat_local_klj.iter()).for_each(|(t,f)| *t = f.clone()); 334 | local_start += mat_local.size[0]; 335 | }; 336 | //let mut tmp_slices: Vec<&mut [f64]> = Vec::new(); 337 | //let mut dd = mat_full_kl.data.split_at_mut(0).1; 338 | //range[1].clone().fold((dd,0_usize),|(ee,offset),y| { 339 | 340 | // (dd,0_usize) 341 | //}); 342 | 343 | } 344 | } 345 | } else if range[0].start==range[1].start { 346 | for (ll,l) in range[3].clone().enumerate() { 347 | for (kk,k) in range[2].clone().enumerate() { 348 | if k>l {continue}; 349 | let klpair = (l+1)*l/2+k; // nevigate to the (k,l) position in the upper-formated tensor 350 | let mut mat_full_kl = self.get_reducing_matrix_mut(klpair); 351 | let mat_local_kl = mat_local.get_reducing_matrix(&[kk,ll]); 352 | let mut local_start = 0usize; 353 | for (jj,j) in range[1].clone().enumerate() { 354 | let mut mat_full_klj = mat_full_kl.get2d_slice_mut_uncheck([range[0].start,j],jj+1).unwrap(); 355 | //let mat_local_klj = mat_local_kl.get2d_slice([0,jj],jj).unwrap(); 356 | let mat_local_klj = mat_local_kl.get1d_slice(local_start,jj+1).unwrap(); 357 | //unsafe{ 358 | // dcopy(len[0] as i32, mat_local_klj, 1, mat_full_klj, 1); 359 | //} 360 | mat_full_klj.iter_mut().zip(mat_local_klj.iter()).for_each(|(t,f)| *t = f.clone()); 361 | local_start += len[0]; 362 | } 363 | } 364 | } 365 | } 366 | //======================================================================= 367 | // algorithm 2: filter out the discontinued slices and copy them once 368 | //======================================================================= 369 | //let tmp_len = self.size[0] as f64; 370 | //let new_size = ((1.0+8.0*tmp_len).sqrt()*0.5-0.5) as usize; 371 | //let mut to_slices = self.get_slices_mut(new_size, range[0].clone(), range[1].clone(), range[2].clone(), range[3].clone()); 372 | 373 | } 374 | } -------------------------------------------------------------------------------- /src/external_libs/compile.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ifort restmatr.f90 -shared -fpic -o librestmatr.so -O2 -L/share/apps/rust/OpenBLAS-0.3.17 -lopenblas 4 | 5 | #ifort -O2 restmatr.f90 -L/share/apps/rust/OpenBLAS-0.3.17 -lopenblas 6 | 7 | 8 | -------------------------------------------------------------------------------- /src/external_libs/ffi_restmatr.rs: -------------------------------------------------------------------------------- 1 | use std::ffi::{c_double, c_int, c_char}; 2 | 3 | //#[link(name="restmatr")] 4 | extern "C" { 5 | pub fn ri_ao2mo_f_(eigenvector: *const c_double, 6 | ri3fn: *const c_double, 7 | ri3mo: *mut c_double, 8 | num_states: *const c_int, 9 | num_basis: *const c_int, 10 | num_auxbas: *const c_int, 11 | ); 12 | 13 | pub fn general_dgemm_f_( 14 | matr_a: *const c_double, rows_a: *const c_int, columns_a: *const c_int, 15 | start_row_a: *const c_int, len_row_a: *const c_int, 16 | start_column_a: *const c_int, len_column_a: *const c_int, opa: *const c_char, 17 | matr_b: *const c_double, rows_b: *const c_int, columns_b: *const c_int, 18 | start_row_b: *const c_int, len_row_b: *const c_int, 19 | start_column_b: *const c_int, len_column_b: *const c_int, opb: *const c_char, 20 | matr_c: *mut c_double, rows_c: *const c_int, columns_c: *const c_int, 21 | start_row_c: *const c_int, len_row_c: *const c_int, 22 | start_column_c: *const c_int, len_column_c: *const c_int, 23 | alpha: *const c_double, beta: *const c_double 24 | ); 25 | pub fn special_dgemm_f_01_( 26 | ten3_a: *const c_double, x_a: *const c_int, y_a: *const c_int, z_a: *const c_int, 27 | start_x_a: *const c_int, len_x_a: *const c_int, i_y: *const c_int, 28 | start_z_a: *const c_int, len_z_a: *const c_int, 29 | matr_b: *const c_double, rows_b: *const c_int, columns_b: *const c_int, 30 | start_row_b: *const c_int, len_row_b: *const c_int, 31 | start_column_b: *const c_int, len_column_b: *const c_int, 32 | alpha: *const c_double, beta: *const c_double 33 | ); 34 | 35 | pub fn copy_mm_( 36 | x_len: *const c_int, y_len: *const c_int, 37 | f_matr: *const c_double, f_x_len: *const c_int, f_y_len: *const c_int, f_x_start: *const c_int, f_y_start: *const c_int, 38 | t_matr: *mut c_double, t_x_len: *const c_int, t_y_len: *const c_int, t_x_start: *const c_int, t_y_start: *const c_int, 39 | ); 40 | 41 | pub fn copy_mr_( 42 | x_len: *const c_int, y_len: *const c_int, 43 | f_matr: *const c_double, f_x_len: *const c_int, f_y_len: *const c_int, f_x_start: *const c_int, f_y_start: *const c_int, 44 | t_ri: *mut c_double, t_x_len: *const c_int, t_y_len: *const c_int, t_z_len: *const c_int, t_x_start: *const c_int, t_y_start: *const c_int, 45 | t_x3: *const c_int, t_mod: *const c_int 46 | ); 47 | 48 | pub fn copy_rm_( 49 | x_len: *const c_int, y_len: *const c_int, 50 | f_ri: *const c_double, f_x_len: *const c_int, f_y_len: *const c_int, f_z_len: *const c_int, f_x_start: *const c_int, f_y_start: *const c_int, 51 | f_x3: *const c_int, f_mod: *const c_int, 52 | t_matr: *mut c_double, t_x_len: *const c_int, t_y_len: *const c_int, t_x_start: *const c_int, t_y_start: *const c_int 53 | ); 54 | 55 | pub fn copy_rr_( 56 | x_len: *const c_int, y_len: *const c_int, z_len: *const c_int, 57 | f_ri: *const c_double, f_x_len: *const c_int, f_y_len: *const c_int, f_z_len: *const c_int, 58 | f_x_start: *const c_int, f_y_start: *const c_int, f_z_start: *const c_int, 59 | t_ri: *mut c_double, t_x_len: *const c_int, t_y_len: *const c_int, t_z_len: *const c_int, 60 | t_x_start: *const c_int, t_y_start: *const c_int, t_z_start: *const c_int 61 | ); 62 | } 63 | -------------------------------------------------------------------------------- /src/external_libs/mod.rs: -------------------------------------------------------------------------------- 1 | mod ffi_restmatr; 2 | use std::{ops::Range}; 3 | 4 | use crate::external_libs::ffi_restmatr::*; 5 | 6 | pub fn ri_ao2mo_f( 7 | eigenvector:&[f64], 8 | ri3fn:&[f64], 9 | ri3mo:&mut [f64], 10 | num_states: usize, 11 | num_basis: usize, 12 | num_auxbas: usize) 13 | { 14 | unsafe{ri_ao2mo_f_( 15 | eigenvector.as_ptr(), 16 | ri3fn.as_ptr(), 17 | ri3mo.as_mut_ptr(), 18 | &(num_states as i32), 19 | &(num_basis as i32), 20 | &(num_auxbas as i32)) 21 | } 22 | } 23 | 24 | 25 | /// # an efficient and general dgemm wrapped to a fortran source 26 | /// matr_c[(range_row_c, range_column_c)] = 27 | /// alpha * opa(matr_a[(range_row_a, range_column_a)])*opb(matr_b[(range_row_b, range_column_b)]) + 28 | /// beta * matr_c[(range_row_c, range_column_c)] 29 | /// Example 30 | /// ``` 31 | /// use rest_tensors::MatrixFull; 32 | /// use crate::rest_tensors::BasicMatrix; 33 | /// use rest_tensors::external_libs::general_dgemm_f; 34 | /// let matr_a = MatrixFull::from_vec([3,3], (1..10).map(|x| x as f64).collect::>()).unwrap(); 35 | /// // | 1.0 | 4.0 | 7.0 | 36 | /// // matr_a = | 2.0 | 5.0 | 8.0 | 37 | /// // | 3.0 | 6.0 | 9.0 | 38 | /// let matr_b = MatrixFull::from_vec([3,3], (6..15).map(|x| x as f64).collect::>()).unwrap(); 39 | /// // | 6.0 | 9.0 |12.0 | 40 | /// // matr_b = | 7.0 |10.0 |13.0 | 41 | /// // | 8.0 |11.0 |14.0 | 42 | /// let mut matr_c = MatrixFull::new([3,3], 2.0); 43 | /// // | 2.0 | 2.0 | 2.0 | 44 | /// // matr_c = | 2.0 | 2.0 | 2.0 | 45 | /// // | 2.0 | 2.0 | 2.0 | 46 | /// 47 | /// let matr_c_size = matr_c.size.clone(); 48 | /// 49 | /// general_dgemm_f( 50 | /// matr_a.data_ref().unwrap(), matr_a.size(), 1..3, 1..3, 'N', 51 | /// matr_b.data_ref().unwrap(), matr_b.size(), 0..2, 0..2, 'N', 52 | /// matr_c.data_ref_mut().unwrap(), &matr_c_size, 1..3, 0..2, 53 | /// 1.0, 1.0 54 | /// ); 55 | /// // | 2.0 | 2.0 | 2.0 | 56 | /// // matr_c = | 88.0 |127.0 | 2.0 | 57 | /// // |101.0 |144.0 | 2.0 | 58 | /// assert_eq!(matr_c.get_submatrix(1..3, 0..2).data(),vec![88.0,101.0,127.0,146.0]) 59 | /// ``` 60 | pub fn general_dgemm_f( 61 | matr_a: &[f64], size_a: &[usize], range_row_a: Range, range_column_a: Range, opa: char, 62 | matr_b: &[f64], size_b: &[usize], range_row_b: Range, range_column_b: Range, opb: char, 63 | matr_c: &mut [f64], size_c: &[usize], range_row_c: Range, range_column_c: Range, 64 | alpha: f64, beta: f64 65 | ) { 66 | unsafe{general_dgemm_f_( 67 | matr_a.as_ptr(),&(size_a[0] as i32),&(size_a[1] as i32), 68 | &(range_row_a.start as i32), &(range_row_a.len() as i32), 69 | &(range_column_a.start as i32), &(range_column_a.len() as i32), 70 | &(opa as std::ffi::c_char), 71 | 72 | matr_b.as_ptr(),&(size_b[0] as i32),&(size_b[1] as i32), 73 | &(range_row_b.start as i32), &(range_row_b.len() as i32), 74 | &(range_column_b.start as i32), &(range_column_b.len() as i32), 75 | &(opb as std::ffi::c_char), 76 | 77 | matr_c.as_mut_ptr(),&(size_c[0] as i32),&(size_c[1] as i32), 78 | &(range_row_c.start as i32), &(range_row_c.len() as i32), 79 | &(range_column_c.start as i32), &(range_column_c.len() as i32), 80 | 81 | &alpha, &beta 82 | )} 83 | } 84 | 85 | pub fn special_dgemm_f_01( 86 | ten3_a: &mut [f64], size_a: &[usize], range_x_a: Range, i_y:usize, range_z_a: Range, 87 | matr_b: &[f64], size_b: &[usize], range_row_b: Range, range_column_b: Range, 88 | alpha: f64, beta: f64 89 | ) { 90 | unsafe{special_dgemm_f_01_(ten3_a.as_mut_ptr(), 91 | &(size_a[0] as i32), &(size_a[1] as i32), &(size_a[2] as i32), 92 | &(range_x_a.start as i32), &(range_x_a.len() as i32), &(i_y as i32), 93 | &(range_z_a.start as i32), &(range_z_a.len() as i32), 94 | matr_b.as_ptr(), &(size_b[0] as i32), &(size_b[1] as i32), 95 | &(range_row_b.start as i32), &(range_row_b.len() as i32), 96 | &(range_column_b.start as i32), &(range_column_b.len() as i32), 97 | &alpha, &beta) 98 | } 99 | } 100 | 101 | /// from matr_a[(range_row_b, range_column_b)] to matr_b[(range_row_a,range_column_a)] 102 | pub fn matr_copy( 103 | matr_a: &[f64], size_a: &[usize], range_row_a: Range, range_column_a: Range, 104 | matr_b: &mut [f64], size_b: &[usize], range_row_b: Range, range_column_b: Range 105 | ) { 106 | let x_len = range_row_a.len(); 107 | let y_len = range_column_a.len(); 108 | if x_len == range_row_b.len() && y_len == range_column_b.len() { 109 | unsafe{copy_mm_( 110 | &(x_len as i32), &(y_len as i32), 111 | matr_a.as_ptr(),&(size_a[0] as i32), &(size_a[1] as i32), 112 | &(range_row_a.start as i32), &(range_column_a.start as i32), 113 | matr_b.as_mut_ptr(),&(size_b[0] as i32), &(size_b[1] as i32), 114 | &(range_row_b.start as i32), &(range_column_b.start as i32), 115 | 116 | )} 117 | } else { 118 | panic!("Error: the data block for copy has different size between two matrices"); 119 | } 120 | } 121 | 122 | /// copy data from ri_a to matr_b 123 | pub fn matr_copy_from_ri( 124 | ri_a: &[f64], size_a: &[usize], range_x_a: Range, range_y_a: Range, i_z_a: usize, copy_mod: usize, 125 | matr_b: &mut [f64], size_b: &[usize], range_row_b: Range, range_column_b: Range 126 | ) { 127 | let x_len = range_x_a.len(); 128 | let y_len = range_y_a.len(); 129 | if x_len == range_row_b.len() && y_len == range_column_b.len() { 130 | unsafe{copy_rm_( 131 | &(x_len as i32), &(y_len as i32), 132 | ri_a.as_ptr(), &(size_a[0] as i32), &(size_a[1] as i32), &(size_a[2] as i32), 133 | &(range_x_a.start as i32), &(range_y_a.start as i32), &(i_z_a as i32), &(copy_mod as i32), 134 | matr_b.as_mut_ptr(),&(size_b[0] as i32), &(size_b[1] as i32), 135 | &(range_row_b.start as i32), &(range_column_b.start as i32) 136 | )} 137 | } else { 138 | panic!("Error: the data block for copy has different size between matrix and ri-tensor"); 139 | } 140 | } 141 | 142 | 143 | 144 | /// copy data from matr_a to RI_b 145 | pub fn ri_copy_from_matr( 146 | matr_a: &[f64], size_a: &[usize], range_row_a: Range, range_column_a: Range, 147 | ri_b: &mut [f64], size_b: &[usize], range_row_b: Range, range_column_b: Range, 148 | i_high_b: usize, copy_mod: i32 149 | ) { 150 | let x_len = range_row_a.len(); 151 | let y_len = range_column_a.len(); 152 | if x_len == range_row_b.len() && y_len == range_column_b.len() { 153 | unsafe{copy_mr_( 154 | &(x_len as i32), &(y_len as i32), 155 | matr_a.as_ptr(),&(size_a[0] as i32), &(size_a[1] as i32), 156 | &(range_row_a.start as i32), &(range_column_a.start as i32), 157 | ri_b.as_mut_ptr(), 158 | &(size_b[0] as i32), &(size_b[1] as i32), &(size_b[2] as i32), 159 | &(range_row_b.start as i32), &(range_column_b.start as i32), 160 | &(i_high_b as i32), ©_mod) 161 | } 162 | } else { 163 | panic!("Error: the data block for copy has different size between the matrix and ri 3D-tensor"); 164 | } 165 | } 166 | 167 | /// copy data from ri_a to ri_b 168 | pub fn ri_copy_from_ri( 169 | ri_a: &[f64], size_a: &[usize], range_x_a: Range, range_y_a: Range, range_z_a: Range, 170 | ri_b: &mut [f64], size_b: &[usize], range_x_b: Range, range_y_b: Range, range_z_b: Range 171 | ) { 172 | let x_len = range_x_a.len(); 173 | let y_len = range_y_a.len(); 174 | let z_len = range_z_a.len(); 175 | if x_len == range_x_b.len() && y_len == range_y_b.len() && z_len == range_z_b.len() { 176 | unsafe{copy_rr_( 177 | &(x_len as i32), &(y_len as i32), &(z_len as i32), 178 | ri_a.as_ptr(), 179 | &(size_a[0] as i32), &(size_a[1] as i32), &(size_a[2] as i32), 180 | &(range_x_a.start as i32), &(range_y_a.start as i32), &(range_z_a.start as i32), 181 | ri_b.as_mut_ptr(), 182 | &(size_b[0] as i32), &(size_b[1] as i32), &(size_b[2] as i32), 183 | &(range_x_b.start as i32), &(range_y_b.start as i32), &(range_z_b.start as i32)) 184 | } 185 | } else { 186 | println!("{:?},{:?},{:?}", range_x_a, range_y_a, range_z_a); 187 | println!("{:?},{:?},{:?}", range_x_b, range_y_b, range_z_b); 188 | panic!("Error: the data block for copy has different size between ri 3D-tensors"); 189 | } 190 | } 191 | 192 | -------------------------------------------------------------------------------- /src/external_libs/restmatr.f90: -------------------------------------------------------------------------------- 1 | program test 2 | implicit none 3 | 4 | !real*8 :: eigenvector(10,10) 5 | !real*8 :: ri3fn(10,10,20) 6 | !real*8 :: ri3mo(20,10,10) 7 | 8 | !eigenvector(:,:) = 1.0d0 9 | !ri3fn(:,:,:) = 2.0d0 10 | !ri3mo(:,:,:) = 0.0d0 11 | 12 | !call ri_ao2mo_f(eigenvector,ri3fn,ri3mo,10,10,20) 13 | 14 | !write(*,*) ri3mo 15 | 16 | real*8 :: matr_a(3,3), matr_b(3,3), matr_c(3,3) 17 | real*8 :: smatr_a(2,2), smatr_b(2,2), smatr_c(2,2) 18 | integer :: i,j 19 | 20 | matr_c(:,:) = 2.0d0 21 | do i = 1, 3 22 | do j = 1,3 23 | matr_a(i,j) = i + 3.0*(j-1.0) 24 | matr_b(i,j) = matr_a(i,j) + 5.0 25 | end do 26 | end do 27 | !write(*,*) matr_a, matr_b, matr_c 28 | write(*,*) matr_a(2,2), matr_a(2,3) 29 | write(*,*) matr_a(3,2), matr_a(3,3) 30 | write(*,*) matr_b(1,1), matr_b(1,2) 31 | write(*,*) matr_b(2,1), matr_b(2,2) 32 | 33 | call dgemm('N', 'N', 2,2,2, & 34 | 1.0d0, & 35 | matr_a(2:3,2:3), 2, & 36 | matr_b(1:2,1:2), 2, & 37 | 1.0d0, & 38 | matr_c(2:3,1:2), 2 & 39 | ) 40 | !call general_dgemm_f(& 41 | ! matr_a, 3, 3, 1, 2, 1, 2, 'N', & 42 | ! matr_b, 3, 3, 0, 2, 0, 2, 'N', & 43 | ! matr_c, 3, 3, 1, 2, 0, 2, & 44 | ! 1.0d0, 1.0d0 & 45 | !) 46 | 47 | write(*,*) matr_c(2,1), matr_c(2,2) 48 | write(*,*) matr_c(3,1), matr_c(3,2) 49 | 50 | !smatr_a(:,:) = matr_a(2:3,2:3) 51 | !smatr_b(:,:) = matr_b(1:2,1:2) 52 | !smatr_c(:,:) = 2.0d0 53 | !call dgemm('N', 'N', 3,3,3, & 54 | ! 1.0d0, & 55 | ! matr_a(2:3,2:3), 3, & 56 | ! matr_b(1:2,1:2), 3, & 57 | ! 1.0d0, & 58 | ! matr_c(2:3,1:2), 3 & 59 | ! ) 60 | 61 | end program test 62 | 63 | subroutine general_dgemm_f( & 64 | matr_a, rows_a, columns_a, start_row_a, len_row_a, start_column_a, len_column_a, opa, & 65 | matr_b, rows_b, columns_b, start_row_b, len_row_b, start_column_b, len_column_b, opb, & 66 | matr_c, rows_c, columns_c, start_row_c, len_row_c, start_column_c, len_column_c, & 67 | alpha, beta & 68 | ) 69 | 70 | implicit none 71 | 72 | integer :: rows_a, columns_a, start_row_a, len_row_a, start_column_a, len_column_a 73 | integer :: rows_b, columns_b, start_row_b, len_row_b, start_column_b, len_column_b 74 | integer :: rows_c, columns_c, start_row_c, len_row_c, start_column_c, len_column_c 75 | 76 | integer :: k, lda, ldb; 77 | 78 | real*8 :: matr_a(rows_a,columns_a) 79 | real*8 :: matr_b(rows_b,columns_b) 80 | real*8 :: matr_c(rows_c,columns_c) 81 | real*8 :: alpha, beta 82 | 83 | character*1 :: opa, opb 84 | 85 | if (opa == 'N') then 86 | k = len_column_a 87 | lda = len_row_c 88 | else 89 | k = len_row_a 90 | lda = len_row_a 91 | endif 92 | 93 | if (opb == 'N') then 94 | ldb = len_row_b 95 | else 96 | ldb = len_column_c 97 | endif 98 | 99 | call dgemm(opa, opb, len_row_c, len_column_c, k, & 100 | alpha, & 101 | matr_a(start_row_a+1:len_row_a+start_row_a, start_column_a+1:len_column_a+start_column_a), lda, & 102 | matr_b(start_row_b+1:len_row_b+start_row_b, start_column_b+1:len_column_b+start_column_b), ldb, & 103 | beta, & 104 | matr_c(start_row_c+1:len_row_c+start_row_c, start_column_c+1:len_column_c+start_column_c), len_row_c & 105 | ) 106 | 107 | end subroutine general_dgemm_f 108 | 109 | ! for the performance of O_V*V^{-1/2} in the generation of ri3fn 110 | ! (rest::molecule_io::prepare_ri3fn_for_ri_v_rayon) 111 | subroutine special_dgemm_f_01 ( & 112 | ten3_a, x_a, y_a, z_a, start_x_a, len_x_a, i_y_a, start_z_a, len_z_a, & 113 | matr_b, rows_b, columns_b, start_row_b, len_row_b, start_column_b, len_column_b, & 114 | alpha, beta & 115 | ) 116 | 117 | implicit none 118 | 119 | integer :: x_a, y_a, z_a, start_x_a, len_x_a, i_y_a, start_z_a, len_z_a 120 | integer :: rows_b, columns_b, start_row_b, len_row_b, start_column_b, len_column_b 121 | 122 | integer :: i_state; 123 | 124 | real*8 :: ten3_a(x_a,y_a,z_a) 125 | real*8 :: matr_b(rows_b,columns_b) 126 | real*8 :: matr_c(rows_b,columns_b) 127 | real*8 :: alpha, beta 128 | 129 | 130 | do i_state = 1, y_a 131 | 132 | !call dgemm('N', 'N', len_x_a, len_column_b, len_z_a, & 133 | ! alpha, & 134 | ! ten3_a(start_x_a+1:len_x_a+start_x_a, i_state, start_z_a+1:len_z_a+start_z_a), len_x_a, & 135 | ! matr_b(start_row_b+1:len_row_b+start_row_b, start_column_b+1:len_column_b+start_column_b), len_z_a, & 136 | ! beta, & 137 | ! ten3_a(start_x_a+1:len_x_a+start_x_a, i_state, start_z_a+1:len_z_a+start_z_a), len_x_a & 138 | ! ) 139 | 140 | matr_c(:,:) = ten3_a(start_x_a+1:len_x_a+start_x_a, i_state, start_z_a+1:len_z_a+start_z_a) 141 | 142 | call dgemm('N', 'N', len_x_a, len_column_b, len_z_a, & 143 | alpha, & 144 | matr_c(:,:), len_x_a, & 145 | matr_b(start_row_b+1:len_row_b+start_row_b, start_column_b+1:len_column_b+start_column_b), len_z_a, & 146 | beta, & 147 | matr_c(:,:), len_x_a & 148 | ) 149 | 150 | ten3_a(start_x_a+1:len_x_a+start_x_a, i_state, start_z_a+1:len_z_a+start_z_a) = matr_c(:,:) 151 | 152 | end do 153 | 154 | end subroutine special_dgemm_f_01 155 | 156 | 157 | 158 | subroutine ri_ao2mo_f(eigenvector, ri3fn, ri3mo, num_states, num_basis, num_auxbas) 159 | 160 | implicit none 161 | 162 | integer :: num_states, num_basis, num_auxbas 163 | 164 | real*8 :: eigenvector(num_basis, num_states) 165 | real*8 :: ri3fn(num_basis, num_basis, num_auxbas) 166 | real*8 :: ri3mo(num_auxbas, num_states, num_states) 167 | 168 | !real*8, dimension(:,:), allocatable :: tmp_matr_1 169 | real*8, dimension(:,:), allocatable :: tmp_matr_2 170 | 171 | integer:: i_state, j_state, k_state 172 | 173 | !allocate(tmp_matr_1(num_basis, num_states)) 174 | allocate(tmp_matr_2(num_basis, num_states)) 175 | 176 | 177 | 178 | ri3mo(:,:,:) = 0.0d0 179 | 180 | do i_state = 1, num_auxbas, 1 181 | !tmp_matr_1(:,:) = ri3fn(:,:,i_state) 182 | tmp_matr_2(:,:) = 0.0d0 183 | call dgemm('N', 'N', num_basis, num_states, num_basis, 1.0d0, & 184 | ri3fn(:,:,i_state), num_basis, eigenvector(:,:), num_basis, & 185 | 0.0d0, tmp_matr_2(:,:), num_basis) 186 | call dgemm('T','N', num_states, num_states, num_basis, 1.0d0, & 187 | eigenvector(:,:), num_basis, tmp_matr_2, num_basis, & 188 | 0.0d0, ri3mo(i_state,:,:), num_basis) 189 | enddo 190 | 191 | !deallocate(tmp_matr_1, tmp_matr_2) 192 | deallocate(tmp_matr_2) 193 | 194 | end subroutine ri_ao2mo_f 195 | 196 | 197 | subroutine copy_mm(x_len, y_len, & 198 | f_matr, f_x_len, f_y_len, f_x_start, f_y_start, & 199 | t_matr, t_x_len, t_y_len, t_x_start, t_y_start) 200 | implicit none 201 | 202 | integer :: x_len, y_len 203 | integer :: f_x_len, f_y_len, f_x_start, f_y_start 204 | integer :: t_x_len, t_y_len, t_x_start, t_y_start 205 | 206 | real*8 :: f_matr(f_x_len, f_y_len) 207 | real*8 :: t_matr(t_x_len, t_y_len) 208 | 209 | t_matr(t_x_start+1:t_x_start+x_len, t_y_start+1:t_y_start+y_len) = & 210 | f_matr(f_x_start+1:f_x_start+x_len, f_y_start+1:f_y_start+y_len) 211 | 212 | end subroutine copy_mm 213 | 214 | ! copy a data block from a matrix into a rank-3 tensor 215 | subroutine copy_mr(x1_len, x2_len, & 216 | f_matr, f_x_len, f_y_len, f_x1_start, f_x2_start, & 217 | t_ri, t_x_len, t_y_len, t_z_len, t_x1_start, t_x2_start, t_x3, mod) 218 | implicit none 219 | 220 | integer :: x1_len, x2_len 221 | integer :: f_x_len, f_y_len, f_x1_start, f_x2_start 222 | integer :: t_x_len, t_y_len, t_z_len, t_x1_start, t_x2_start, t_x3, mod 223 | 224 | real*8 :: f_matr(f_x_len, f_y_len) 225 | real*8 :: t_ri(t_x_len, t_y_len, t_z_len) 226 | 227 | if (mod .eq. 0) then 228 | t_ri(t_x1_start+1:t_x1_start+x1_len, t_x2_start+1:t_x2_start+x2_len, t_x3+1) = & 229 | f_matr(f_x1_start+1:f_x1_start+x1_len, f_x2_start+1:f_x2_start+x2_len) 230 | else if (mod .eq. 1) then 231 | t_ri(t_x1_start+1:t_x1_start+x1_len, t_x3+1, t_x2_start+1:t_x2_start+x2_len) = & 232 | f_matr(f_x1_start+1:f_x1_start+x1_len, f_x2_start+1:f_x2_start+x2_len) 233 | else if (mod .eq. 2) then 234 | t_ri(t_x3+1, t_x1_start+1:t_x1_start+x1_len, t_x2_start+1:t_x2_start+x2_len) = & 235 | f_matr(f_x1_start+1:f_x1_start+x1_len, f_x2_start+1:f_x2_start+x2_len) 236 | endif 237 | 238 | end subroutine copy_mr 239 | 240 | ! copy a data block from a rank-3 tensor into a matrix 241 | subroutine copy_rm(x1_len, x2_len, & 242 | f_ri, f_x_len, f_y_len, f_z_len, f_x1_start, f_x2_start, f_x3, mod, & 243 | t_matr, t_x_len, t_y_len, t_x1_start, t_x2_start) 244 | implicit none 245 | 246 | integer :: x1_len, x2_len 247 | integer :: t_x_len, t_y_len, t_x1_start, t_x2_start 248 | integer :: f_x_len, f_y_len, f_z_len, f_x1_start, f_x2_start, f_x3, mod 249 | 250 | real*8 :: t_matr(t_x_len, t_y_len) 251 | real*8 :: f_ri(f_x_len, f_y_len, f_z_len) 252 | 253 | if (mod .eq. 0) then 254 | t_matr(t_x1_start+1:t_x1_start+x1_len, t_x2_start+1:t_x2_start+x2_len) = & 255 | f_ri(f_x1_start+1:f_x1_start+x1_len, f_x2_start+1:f_x2_start+x2_len, f_x3+1) 256 | else if (mod .eq. 1) then 257 | t_matr(t_x1_start+1:t_x1_start+x1_len, t_x2_start+1:t_x2_start+x2_len) = & 258 | f_ri(f_x1_start+1:f_x1_start+x1_len, f_x3+1, f_x2_start+1:f_x2_start+x2_len) 259 | else if (mod .eq. 2) then 260 | t_matr(t_x1_start+1:t_x1_start+x1_len, t_x2_start+1:t_x2_start+x2_len) = & 261 | f_ri(f_x3+1, f_x1_start+1:f_x1_start+x1_len, f_x2_start+1:f_x2_start+x2_len) 262 | endif 263 | 264 | end subroutine copy_rm 265 | 266 | subroutine copy_rr(x1_len, x2_len, x3_len,& 267 | f_ri, f_x1_len, f_x2_len, f_x3_len, f_x1_start, f_x2_start, f_x3_start,& 268 | t_ri, t_x1_len, t_x2_len, t_x3_len, t_x1_start, t_x2_start, t_x3_start) 269 | implicit none 270 | 271 | integer :: x1_len, x2_len, x3_len 272 | integer :: f_x1_len, f_x2_len, f_x3_len, f_x1_start, f_x2_start, f_x3_start 273 | integer :: t_x1_len, t_x2_len, t_x3_len, t_x1_start, t_x2_start, t_x3_start 274 | 275 | real*8 :: f_ri(f_x1_len, f_x2_len, f_x3_len) 276 | real*8 :: t_ri(t_x1_len, t_x2_len, t_x3_len) 277 | 278 | t_ri(t_x1_start+1:t_x1_start+x1_len, & 279 | t_x2_start+1:t_x2_start+x2_len, & 280 | t_x3_start+1:t_x3_start+x3_len) = & 281 | f_ri(f_x1_start+1:f_x1_start+x1_len, & 282 | f_x2_start+1:f_x2_start+x2_len, & 283 | f_x3_start+1:f_x3_start+x3_len) 284 | 285 | end subroutine copy_rr -------------------------------------------------------------------------------- /src/index.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Display, ops::{IndexMut,Index}, slice::SliceIndex}; 2 | 3 | 4 | //use crate::{ERIFull, ERIFold4, MatrixFull, MatrixFullSliceMut, MatrixFullSlice, MatrixUpperSliceMut, MatrixUpper, MatrixUpperSlice, RIFull, TensorOpt}; 5 | 6 | use crate::*; 7 | //use crate::matrix::*; 8 | //use crate::matrix::matrixfull::*; 9 | //use crate::matrix::matrixfullslice::*; 10 | //use crate::matrix::matrixupper::*; 11 | 12 | 13 | fn contain_of(a:&[usize],b:&[usize]) -> bool { 14 | a.iter().zip(b.iter()).fold(true, |flg,(aa,bb)| flg && bb usize; 21 | fn indexing_mat(&self, position:&[usize]) -> usize; 22 | fn reverse_indexing(&self, position:usize) -> Vec; 23 | } 24 | pub trait IndexingHP { 25 | fn indexing_last2rank(&self, position:&[usize]) -> usize; 26 | } 27 | //======================================================== 28 | 29 | 30 | /// TODO:: at present, all TensorIndex traits do not make the bound check for each dimension. 31 | /// it is extremely danger, which should be addressed SOON!!!!!! 32 | 33 | pub trait TensorIndex { 34 | // Indexing for regulear tensors, for example, ERIFull, MatrixFull 35 | fn index1d(&self, position:usize) -> Option {None} 36 | fn index2d(&self, position:[usize;2]) -> Option {None} 37 | fn index3d(&self, position:[usize;3]) -> Option {None} 38 | fn index4d(&self, position:[usize;4]) -> Option {None} 39 | } 40 | 41 | pub trait TensorIndexUncheck { 42 | // Indexing for the tensors with the elements in the upper block 43 | fn index2d_uncheck(&self, position:[usize;2]) -> Option {None} 44 | fn index4d_uncheck(&self, position:[usize;4]) -> Option {None} 45 | } 46 | 47 | 48 | impl TensorIndex for ERIFull { 49 | #[inline] 50 | fn index1d(&self, position: usize) -> Option 51 | { 52 | if position Option 60 | { 61 | if contain_of(&self.size, &position) { 62 | Some(position.iter() 63 | .zip(self.indicing.iter()) 64 | .map(|(pi,interval)| pi*interval) 65 | .sum()) 66 | } else { 67 | None 68 | } 69 | } 70 | } 71 | 72 | impl TensorIndex for ERIFold4 { 73 | #[inline] 74 | fn index1d(&self, position: usize) -> Option 75 | { 76 | if position Option 84 | { 85 | if contain_of(&self.size, &position) { 86 | Some(position.iter() 87 | .zip(self.indicing.iter()) 88 | .map(|(pi,interval)| pi*interval) 89 | .sum()) 90 | } else { 91 | None 92 | } 93 | } 94 | #[inline] 95 | fn index4d(&self, position: [usize;4]) -> Option 96 | { 97 | let mut tp:[usize;2] = [0;2]; 98 | tp[0] = if position[0] <= position[1] { 99 | position[1]*(position[1]+1)/2+position[0] 100 | } else { 101 | position[0]*(position[0]+1)/2+position[1] 102 | }; 103 | tp[1] = if position[2] <= position[3] { 104 | position[3]*(position[3]+1)/2+position[2] 105 | } else { 106 | position[2]*(position[2]+1)/2+position[3] 107 | }; 108 | if contain_of(&self.size, &tp) { 109 | Some(tp.iter() 110 | .zip(self.indicing.iter()) 111 | .map(|(pi,interval)| pi*interval) 112 | .sum()) 113 | } else { 114 | None 115 | } 116 | } 117 | } 118 | impl TensorIndexUncheck for ERIFold4 { 119 | fn index4d_uncheck(&self, position:[usize;4]) -> Option { 120 | let rp = [(position[1]+1)*position[1]/2+position[0], 121 | (position[3]+1)*position[3]/2+position[2]]; 122 | if contain_of(&self.size,&rp) { 123 | Some(rp.iter() 124 | .zip(self.indicing.iter()) 125 | .map(|(pi,interval)| pi*interval) 126 | .sum()) 127 | } else { 128 | None 129 | } 130 | } 131 | } 132 | 133 | /// Indexing for MatrixFull and its (mut) borrowed variants. 134 | /// MatrixFullSlice and MatrixFullMut 135 | impl TensorIndex for MatrixFull { 136 | #[inline] 137 | fn index1d(&self, position: usize) -> Option 138 | { 139 | if position Option 147 | { 148 | if contain_of(&self.size,&position) { 149 | Some(position.iter() 150 | .zip(self.indicing.iter()) 151 | .map(|(pi,interval)| pi*interval) 152 | .sum()) 153 | } else { 154 | None 155 | } 156 | } 157 | } 158 | 159 | impl <'a, T> TensorIndex for MatrixFullSliceMut<'a,T> { 160 | #[inline] 161 | fn index1d(&self, position: usize) -> Option 162 | { 163 | if position Option 171 | { 172 | if contain_of(&self.size[..2],&position) { 173 | Some(position.iter() 174 | .zip(self.indicing[..2].iter()) 175 | .map(|(pi,interval)| pi*interval) 176 | .sum()) 177 | } else { 178 | None 179 | } 180 | } 181 | } 182 | 183 | impl <'a, T> TensorIndex for MatrixFullSlice<'a,T> { 184 | #[inline] 185 | fn index1d(&self, position: usize) -> Option 186 | { 187 | if position Option 195 | { 196 | if contain_of(&self.size[..2],&position) { 197 | Some(position.iter() 198 | .zip(self.indicing[..2].iter()) 199 | .map(|(pi,interval)| pi*interval) 200 | .sum()) 201 | } else { 202 | None 203 | } 204 | } 205 | } 206 | 207 | /// Indexing for MatrixUpper and its (mut) borrowed variants. 208 | /// MatrixUpperSlice and MatrixUpperMut 209 | impl TensorIndex for MatrixUpper { 210 | #[inline] 211 | fn index1d(&self, position: usize) -> Option 212 | { 213 | if position Option 221 | { 222 | let (i,j) = if position[0] <= position[1] {(position[0],position[1])} else {(position[1],position[0])}; 223 | let tp = (j+1)*j/2+i; 224 | if tp < self.data.len() {Some(tp)} else {None} 225 | } 226 | } 227 | impl TensorIndexUncheck for MatrixUpper { 228 | #[inline] 229 | fn index2d_uncheck(&self, position:[usize;2]) -> Option { 230 | let tp = (position[1]+1)*position[1]/2+position[0]; 231 | if tp < self.data.len() {Some(tp)} else {None} 232 | } 233 | } 234 | impl <'a, T> TensorIndex for MatrixUpperSliceMut<'a,T> { 235 | #[inline] 236 | fn index1d(&self, position: usize) -> Option 237 | { 238 | if position Option 246 | { 247 | let (i,j) = if position[0] <= position[1] {(position[0],position[1])} else {(position[1],position[0])}; 248 | let tp = (j+1)*j/2+i; 249 | if tp < self.data.len() {Some(tp)} else {None} 250 | } 251 | } 252 | impl <'a, T> TensorIndexUncheck for MatrixUpperSliceMut<'a,T> { 253 | #[inline] 254 | fn index2d_uncheck(&self, position:[usize;2]) -> Option { 255 | let tp = (position[1]+1)*position[1]/2+position[0]; 256 | if tp < self.data.len() {Some(tp)} else {None} 257 | } 258 | } 259 | impl <'a, T> TensorIndex for MatrixUpperSlice<'a,T> { 260 | #[inline] 261 | fn index1d(&self, position: usize) -> Option 262 | { 263 | if position Option 271 | { 272 | let (i,j) = if position[0] <= position[1] {(position[0],position[1])} else {(position[1],position[0])}; 273 | let tp = (j+1)*j/2+i; 274 | if tp < self.data.len() {Some(tp)} else {None} 275 | } 276 | 277 | fn index3d(&self, position:[usize;3]) -> Option {None} 278 | 279 | fn index4d(&self, position:[usize;4]) -> Option {None} 280 | } 281 | impl <'a, T> TensorIndexUncheck for MatrixUpperSlice<'a,T> { 282 | #[inline] 283 | fn index2d_uncheck(&self, position:[usize;2]) -> Option { 284 | let tp = (position[1]+1)*position[1]/2+position[0]; 285 | if tp < self.data.len() {Some(tp)} else {None} 286 | } 287 | } 288 | 289 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # rest_tensors 2 | //! 3 | //! **rest_tensors** is a linear algebra library, which aims at providing efficient tensor operations for the Rust-based electronic structure tool (REST). 4 | //! 5 | //! ### Using rest_tensors 6 | //! 7 | //! - Several global environment variables should be specified 8 | //! 1) REST_BLAS_DIR: The path to the openblas library: `libopenblas.so` 9 | //! 2) REST_FORTRAN_COMPILER: The compiler to build a fortran library for effcient tensor operations: `restmatr.f90` -> `librestmatr.so` 10 | //! 3) REST_EXT_DIR: The path to store the fortran library: `librestmatr.so` after compilation 11 | //! 4) LD_LIBRARY_PATH: attach REST_BLAS_DIR and REST_EXT_DIR to LD_LIBRARY_PATH: `export LD_LIBRARY_PATH="$REST_BLAS_DIR:$REST_EXT_DIR:$LD_LIBRARY_PATH"` 12 | //! 13 | //! - Simply add the following to your Carto.toml file: 14 | //! ```ignore 15 | //! [dependencies] 16 | //! // replace the * by the latest version 17 | //! rest_tensors = "*" 18 | //! ``` 19 | //! 20 | //! ### Fetures 21 | //! 22 | //! * [`MatrixFull`](MatrixFull): the `column-major` rank-2 tensor, i.e. `matrix`, which is used for the molecular geometries, 23 | //! orbital coefficients, density matrix, and most of intermediate data for REST. 24 | //! There are several relevant structures for matrix, which share the same trait, namely 25 | //! [`BasicMatrix`](BasicMatrix), [`BasicMatrixOpt`](BasicMatrixOpt), [`MathMatrix`](MathMatrix) and so forth. 26 | //! * [`MatrixUpper`](MatrixUpper): the structure storing the upper triangle of the matrix, which is used for Hamiltonian matrix, and many other Hermitian matrices in the REST package. 27 | //! * [`RIFull`](RIFull): the `column-major` rank-3 tensor structure, which is used for the three-center integrals 28 | //! in the resoution-of-identity approximation (RI). For example, ri3ao, ri3mo, and so forth. 29 | //! **NOTE**:: Although RIFull is created for very specific purpose use in REST, most of the relevant operations provided here are quite general and can be easily extended to any other 3-rank tensors 30 | //! * [`ERIFull`](ERIFull): the `column-major` 4-dimention tensors for electronic repulsive integrals (ERI). 31 | //! **NOTE**:: ERIFull is created to handle the analytic electronic-repulsive integrals in REST. 32 | //! Because REST mainly uses the Resolution-of-Identity (RI) technique. The analytic ERI is provided for benchmark, and thus is not fully optimized. 33 | //! 34 | //! 35 | //! * Detailed usage of [`MatrixFull`](MatrixFull) can be find in the corresponding pages; while those of [`RIFull`] and [`ERIFull`] are not yet ready. 36 | //! 37 | //! ### To-Do-List 38 | //! 39 | //! * Introduce more LAPACK and BLAS functions to the 2-dimention matrix struct in rest-tensors, like [`MatrixFull`](MatrixFull), [`MatrixFullSlice`](MatrixFullSlice), [`SubMatrixFull`](SubMatrixFull) and so forth. 40 | //! * Reoptimize the API for the rank-3 tensor, mainly [`RIFull`](RIFull) and complete the detailed usage accordingly. 41 | //! * Enable the ScaLAPCK (scalable linear algebra package) functions to the 2-dimention matrix struct in rest-tensors, like [`MatrixFull`](MatrixFull). 42 | //! * Conversions between `rest_tensors` and `numpy` in python 43 | //! 44 | //! 45 | #![allow(unused)] 46 | extern crate blas; 47 | extern crate lapack; 48 | //extern crate blas_src; 49 | //extern crate lapack_src; 50 | 51 | use std::fmt::Display; 52 | use std::marker::PhantomData; 53 | use std::ops::{Add, AddAssign, Sub, SubAssign}; 54 | use anyhow; 55 | 56 | use lapack::{dsyev,dspevx,dspgvx, dlamch}; 57 | use blas::dgemm; 58 | //mod tensors_slice; 59 | pub mod matrix; 60 | pub mod eri; 61 | pub mod ri; 62 | pub mod external_libs; 63 | //mod tensors; 64 | pub mod tensor_basic_operation; 65 | pub mod davidson; 66 | 67 | //pub mod matrix_blas_lapack; 68 | mod index; 69 | //use typenum::{U1,U2,U3,U4}; 70 | //use crate::tensors_slice::{TensorsSliceMut,TensorsSlice}; 71 | //use itertools::iproduct; 72 | 73 | 74 | pub use crate::tensor_basic_operation::*; 75 | //pub use crate::tensors::*; 76 | pub use crate::eri::*; 77 | pub use crate::matrix::*; 78 | pub use crate::matrix::matrixfull::*; 79 | pub use crate::matrix::matrixfullslice::*; 80 | pub use crate::matrix::matrixupper::*; 81 | pub use crate::matrix::submatrixfull::*; 82 | pub use crate::ri::*; 83 | pub use crate::davidson::*; 84 | 85 | #[derive(Clone,Debug,PartialEq)] 86 | pub struct Tensors4D { 87 | /// Coloum-major Tensors with the rank of 4 at most, 88 | /// designed for quantum chemistry calculations specifically. 89 | pub data : Vec, 90 | pub size : [usize;4], 91 | pub indicing: [usize;4], 92 | pub rank : D, 93 | //pub store_format : MatFormat, 94 | //pub size : Vec, 95 | //pub indicing: [usize;4], 96 | } 97 | 98 | 99 | const SAFE_MINIMUM:f64 = 1.0E-16; 100 | 101 | //recursive wrapper 102 | struct RecFn(Box,(T,T)) -> (T,T)>); 103 | impl RecFn { 104 | fn call(&self, f: &RecFn, n: (T,T)) -> (T,T) { 105 | (self.0(f,n)) 106 | } 107 | } 108 | 109 | 110 | #[cfg(test)] 111 | mod tests { 112 | use itertools::{iproduct, Itertools}; 113 | use libc::access; 114 | 115 | use crate::{index::Indexing, MatrixFull, RIFull, MatrixUpper, print_vec}; 116 | //#[test] 117 | //fn test_matrix_index() { 118 | // let size_a:Vec=vec![3,3]; 119 | // let mut tmp_a = vec![ 120 | // 3.0,1.0,1.0, 121 | // 1.0,3.0,1.0, 122 | // 1.0,1.0,3.0]; 123 | // let mut my_a = Tensors::from_vec('F', size_a, tmp_a); 124 | // println!("{}",my_a[(0usize,0usize)]); 125 | //} 126 | #[test] 127 | fn test_operator_overloading() { 128 | let a = MatrixFull::from_vec([3,3],vec![0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0]).unwrap(); 129 | let b = MatrixFull::from_vec([3,3],vec![8.0,3.0,4.0,2.0,6.0,3.0,9.0,16.0,6.0]).unwrap(); 130 | println!("a:{:?}, b:{:?}", a,b); 131 | let c = a+b; 132 | println!("c:{:?}", c); 133 | println!("c:[{:8.4},{:8.4},{:8.4}]", c[[0,0]],c[[0,1]],c[[0,2]]); 134 | println!("c:[{:8.4},{:8.4},{:8.4}]", c[[1,0]],c[[1,1]],c[[1,2]]); 135 | println!("c:[{:8.4},{:8.4},{:8.4}]", c[[2,0]],c[[2,1]],c[[2,2]]); 136 | //let a = MatrixFull::new([3,3],1); 137 | //let b = MatrixFull::new([2,2],2); 138 | //let c = b+a; 139 | //println!("c:{:?}", c); 140 | //let a = MatrixFull::from_vec([2,2],vec![3,2,1,3]).unwrap(); 141 | //let b = MatrixFull::from_vec([2,2],vec![2,3,3,1]).unwrap(); 142 | //let c = b-a; 143 | //println!("c:{:?}", &c); 144 | //println!("c:[{},{}]", c[[0,0]],c[[1,0]]); 145 | //println!("c:[{},{}]", c[[0,1]],c[[1,1]]); 146 | } 147 | #[test] 148 | fn test_slice_concat() { 149 | let mut orig_a = vec![1,2,3,4,5,6,7]; 150 | let mut orig_b = vec![10,12,15,27,31,3,1]; 151 | let mut a = &mut orig_a[2..5]; 152 | let mut b = &mut orig_b[2..5]; 153 | let mut c = vec![a,b].into_iter().flatten(); 154 | c.for_each(|i| {*i = *i+2}); 155 | 156 | println!("{:?}", orig_a); 157 | println!("{:?}", orig_b); 158 | 159 | let dd = 2..6; 160 | println!("{},{},{}",dd.start, dd.end, dd.len()); 161 | println!("{},{},{}",dd.start, dd.end, dd.len()); 162 | dd.for_each(|i| {println!("{}",i)}); 163 | 164 | //c.enumerate().for_each(|i| { 165 | // println!{"index: {}, value: {}", i.0,i.1}; 166 | //}) 167 | } 168 | //} 169 | #[test] 170 | fn matfull_inverse_and_power() { 171 | let orig_a = vec![1.0, 0.2350377623170771, 0.00000000000000014780661935396685, 0.0000000000000001230564920088275, 0.0, 0.05732075877050055, 0.05732075877577619, 0.05732075876606951, 0.2350377623170771, 1.0000000000000002, 0.0000000000000006843048497264658, -0.0000000000000006063573786851014, 0.0, 0.4899272978714807, 0.4899272978956163, 0.48992729785120903, 0.00000000000000014780661935396685, 0.0000000000000006843048497264658, 1.0000000000000004, -0.00000000000000000000000000000030065232611750355, 0.0, 0.43694556071760393, -0.14564693387985528, -0.14564891678026162, 0.0000000000000001230564920088275, -0.0000000000000006063573786851014, -0.00000000000000000000000000000030065232611750355, 1.0000000000000004, 0.0, -0.00000000000000019929707359479999, -0.3447708719127397, 0.367656510461097, 0.0, 0.0, 0.0, 0.0, 1.0000000000000004, 0.0, -0.22548046384235368, -0.1858400020910537, 0.05732075877050055, 0.4899272978714807, 0.43694556071760393, -0.00000000000000019929707359479999, 0.0, 1.0000000000000002, 0.20144562931480953, 0.20144477338087088, 0.05732075877577619, 0.4899272978956163, -0.14564693387985528, -0.3447708719127397, -0.22548046384235368, 0.20144562931480953, 1.0000000000000002, 0.20144477335123845, 0.05732075876606951, 0.48992729785120903, -0.14564891678026162, 0.367656510461097, -0.1858400020910537, 0.20144477338087088, 0.20144477335123845, 1.0000000000000002]; 172 | 173 | let mut tmp_mat = MatrixFull::from_vec([8,8],orig_a.clone()).unwrap(); 174 | println!("tmp_mat:"); 175 | print_vec(&tmp_mat.data, tmp_mat.size[0]); 176 | 177 | let mut inv_tmp_mat = tmp_mat.lapack_inverse().unwrap(); 178 | println!("inv_tmp_mat:"); 179 | print_vec(&inv_tmp_mat.data, inv_tmp_mat.size[0]); 180 | 181 | //let mut tmp_mat_2 = MatrixFull::new([8,8],0.0); 182 | //tmp_mat_2.lapack_dgemm(&mut tmp_mat, &mut inv_tmp_mat, 'N', 'N', 1.0, 0.0); 183 | //println!("tmp_mat * inv_tmp_mat:"); 184 | //print_vec(&tmp_mat_2.data, tmp_mat_2.size[0]); 185 | 186 | let mut inv_tmp_mat_2 = tmp_mat.lapack_power(-0.5, 10.0E-6).unwrap(); 187 | println!("inv_tmp_mat:"); 188 | print_vec(&inv_tmp_mat_2.data, inv_tmp_mat_2.size[0]); 189 | 190 | } 191 | } 192 | 193 | 194 | fn print_vec(buf: &Vec, len_per_line: usize) { 195 | buf.chunks(len_per_line).for_each(|value| { 196 | let mut tmp_str = String::new(); 197 | value.into_iter().enumerate().for_each(|x| { 198 | if x.0 == 0 { 199 | tmp_str = format!("{:16.8}",x.1); 200 | } else { 201 | tmp_str = format!("{},{:16.8}",tmp_str,x.1); 202 | } 203 | }); 204 | println!("{}",tmp_str); 205 | }); 206 | } 207 | -------------------------------------------------------------------------------- /src/matrix/einsum.rs: -------------------------------------------------------------------------------- 1 | use crate::{BasicMatrix, MatrixFull, matrix_blas_lapack::_dgemm_full}; 2 | 3 | pub fn _einsum_general<'a, A, B>(mat_a: &A, mat_b: &B, opt: &str) -> MatrixFull 4 | where A: BasicMatrix<'a,f64>, 5 | B: BasicMatrix<'a,f64> 6 | { 7 | match opt { 8 | "ij,j->ij" => _einsum_01_general(mat_a, mat_b), 9 | "ip,ip->p" => _einsum_02_general(mat_a, mat_b), 10 | "i,j->ij" => _einsum_03_general(mat_a, mat_b), 11 | "ij,ji->ij" => _einsum_04_general(mat_a, mat_b), 12 | _ => panic!("Not implemented for einsum: {}", opt), 13 | } 14 | } 15 | 16 | #[inline] 17 | // einsum ip, ip -> p 18 | pub fn _einsum_02_general<'a, A, B>(mat_a: &A, mat_b: &B) -> MatrixFull 19 | where A: BasicMatrix<'a,f64>, 20 | B: BasicMatrix<'a,f64> 21 | { 22 | let a_y = mat_a.size()[1]; 23 | let b_y = mat_b.size()[1]; 24 | let a_x = mat_a.size()[0]; 25 | let b_x = mat_b.size()[0]; 26 | if (a_x == 0 || b_x ==0) {return MatrixFull::new([a_y.min(b_y),1],0.0)}; 27 | let mut out_vec = vec![0.0;a_y.min(b_y)]; 28 | 29 | mat_a.data_ref().unwrap().chunks_exact(a_x).zip(mat_b.data_ref().unwrap().chunks_exact(b_x)) 30 | .zip(out_vec.iter_mut()) 31 | .for_each(|((mat_a_p,mat_b_p),out_vec_p)| { 32 | *out_vec_p = mat_a_p.iter().zip(mat_b_p.iter()) 33 | .fold(0.0, |acc, (mat_a_ip, mat_b_ip)| 34 | {acc + mat_a_ip*mat_b_ip}); 35 | }); 36 | MatrixFull::from_vec([a_y.min(b_y),1],out_vec).unwrap() 37 | } 38 | #[inline] 39 | // einsum: ij,j->ij 40 | pub fn _einsum_01_general<'a, A, B>(mat_a: &A, mat_b: &B) -> MatrixFull 41 | where A: BasicMatrix<'a,f64>, 42 | B: BasicMatrix<'a,f64> 43 | { 44 | let vec_b = mat_b.data_ref().unwrap(); 45 | let i_len = mat_a.size()[0]; 46 | let j_len = vec_b.len(); 47 | if (i_len == 0 || j_len ==0) {return MatrixFull::new([i_len,j_len],0.0)}; 48 | let mut om = MatrixFull::new([i_len,j_len],0.0); 49 | 50 | om.iter_columns_full_mut().zip(mat_a.data_ref().unwrap().chunks_exact(i_len)) 51 | .map(|(om_j,mat_a_j)| {(om_j,mat_a_j)}) 52 | .zip(vec_b.iter()) 53 | .for_each(|((om_j,mat_a_j),vec_b_j)| { 54 | om_j.iter_mut().zip(mat_a_j.iter()).for_each(|(om_ij,mat_a_ij)| { 55 | *om_ij = *mat_a_ij*vec_b_j 56 | }); 57 | }); 58 | om 59 | } 60 | 61 | //i, j -> ij 62 | //vec_a: column vec of i rows, vec_b: row vec of j columns 63 | //produces matrix of i,j 64 | pub fn _einsum_03_general<'a, A, B>(mat_a: &A, mat_b: &B) -> MatrixFull 65 | where A: BasicMatrix<'a,f64>, 66 | B: BasicMatrix<'a,f64> 67 | { 68 | 69 | let i_len = mat_a.data_ref().unwrap().len(); 70 | let j_len = mat_b.data_ref().unwrap().len(); 71 | if (i_len == 0 || j_len ==0) {return MatrixFull::new([i_len,j_len],0.0)}; 72 | let mut om = MatrixFull::new([i_len,j_len],0.0); 73 | 74 | om.iter_columns_full_mut().zip(mat_b.data_ref().unwrap().iter()) 75 | .map(|(om_j, vec_b)| {(om_j, vec_b)}) 76 | .for_each(|(om_j, vec_b)| { 77 | om_j.iter_mut().zip(mat_a.data_ref().unwrap().iter()).for_each(|(om_ij, vec_a)| { 78 | *om_ij = *vec_a*vec_b 79 | }); 80 | }); 81 | 82 | om 83 | } 84 | 85 | //ij,jk,->ik 86 | // simply call a dgemm function 87 | pub fn _einsum_04_general<'a, A, B>(mat_a: &A, mat_b: &B) -> MatrixFull 88 | where A: BasicMatrix<'a,f64>, 89 | B: BasicMatrix<'a,f64> 90 | { 91 | let mut mat_c = MatrixFull::new([mat_a.size()[0],mat_b.size()[1]],0.0); 92 | _dgemm_full(mat_a,'N',mat_b,'N',&mut mat_c,1.0,0.0); 93 | mat_c 94 | } -------------------------------------------------------------------------------- /src/matrix/matrix_trait.rs: -------------------------------------------------------------------------------- 1 | use std::ops::Range; 2 | 3 | pub struct IncreaseStepBy { 4 | pub iter: I, 5 | step: usize, 6 | increase: usize, 7 | first_take: bool, 8 | } 9 | 10 | impl IncreaseStepBy { 11 | pub fn new(iter: I, step: usize, increase: usize) -> IncreaseStepBy { 12 | assert!(step!=0); 13 | IncreaseStepBy {iter, step, first_take: true, increase } 14 | } 15 | } 16 | 17 | impl Iterator for IncreaseStepBy 18 | where I: Iterator, 19 | { 20 | type Item = I::Item; 21 | #[inline] 22 | fn next(&mut self) -> Option { 23 | if self.first_take { 24 | self.first_take = false; 25 | //self.step -= self.increase; 26 | self.iter.next() 27 | } else { 28 | let cur_step = self.step; 29 | self.step += self.increase; 30 | self.iter.nth(cur_step) 31 | } 32 | } 33 | } 34 | 35 | 36 | 37 | pub struct SubMatrixStepBy { 38 | pub iter: I, 39 | rows: Range, 40 | columns: Range, 41 | size: [usize;2], 42 | step: usize, 43 | max: usize, 44 | position: usize, 45 | first_take: bool, 46 | } 47 | impl SubMatrixStepBy { 48 | pub fn new(iter: I, rows: Range, columns: Range, size:[usize;2]) -> SubMatrixStepBy { 49 | let position =columns.start*size[0] + rows.start; 50 | let step = size[0]-rows.end+rows.start; 51 | let max = (columns.end-1)*size[0] + rows.end-1; 52 | SubMatrixStepBy{iter, rows, columns, size, step, position,max,first_take: true} 53 | } 54 | } 55 | 56 | 57 | impl Iterator for SubMatrixStepBy 58 | where I:Iterator, 59 | { 60 | type Item = I::Item; 61 | #[inline] 62 | fn next(&mut self) -> Option { 63 | 64 | // MARK:: change by Igor 24-05-11, need double check 65 | let curr_row = self.position%unsafe{self.size.get_unchecked(0)}; 66 | //let curr_column = self.position/unsafe{self.size.get_unchecked(0)}; 67 | 68 | let is_in_row_range = curr_row >= self.rows.start && curr_row < self.rows.end; 69 | //let is_in_col_range = curr_column >= self.columns.start && curr_column < self.columns.end; 70 | 71 | //let is_in_range = curr_row >= self.rows.start && curr_row < self.rows.end && 72 | // curr_column >= self.columns.start && curr_column < self.columns.end; 73 | if self.position > self.max { 74 | None 75 | } else if self.first_take { 76 | self.position += 1; 77 | self.first_take = false; 78 | self.iter.nth(self.position-1) 79 | } else if is_in_row_range { 80 | //self.step -= self.increase; 81 | self.position += 1; 82 | self.iter.next() 83 | } else { 84 | self.position += self.step+1; 85 | self.iter.nth(self.step) 86 | } 87 | } 88 | } 89 | 90 | pub struct SubMatrixInUpperStepBy { 91 | pub iter: I, 92 | rows: Range, 93 | columns: Range, 94 | size: [usize;2], 95 | step: usize, 96 | max: Option, 97 | position: usize, 98 | first_take: bool, 99 | } 100 | impl SubMatrixInUpperStepBy { 101 | pub fn new(iter: I, rows: Range, columns: Range, size:[usize;2]) -> SubMatrixInUpperStepBy { 102 | //let position =columns.start*size[0] + rows.start; 103 | let step = size[0]-rows.end+rows.start; 104 | //let max = (columns.end-1)*size[0] + rows.end-1; 105 | let position =if rows.start<=columns.start { 106 | columns.start*size[0] + rows.start 107 | //columns.start*(columns.start+1)/2 + rows.start 108 | } else { 109 | rows.start*size[0] + rows.start 110 | }; 111 | let max = if rows.start>columns.end-1 { 112 | None 113 | } else if columns.end >= rows.end { 114 | Some((columns.end-1)*size[0] + rows.end-1) 115 | //Some((columns.end-1)*columns.end/2 + rows.end-1) 116 | } else { 117 | Some((columns.end-1)*size[0] + columns.end-1) 118 | //Some((columns.end-1)*columns.end/2 + columns.end-1) 119 | }; 120 | SubMatrixInUpperStepBy{iter, rows, columns, size, step, position,max,first_take: true} 121 | } 122 | } 123 | 124 | 125 | impl Iterator for SubMatrixInUpperStepBy 126 | where I:Iterator, 127 | { 128 | type Item = I::Item; 129 | #[inline] 130 | fn next(&mut self) -> Option { 131 | 132 | let curr_row = self.position%unsafe{self.size.get_unchecked(0)}; 133 | let curr_column = self.position/unsafe{self.size.get_unchecked(0)}; 134 | 135 | let is_in_row_range = curr_row >= self.rows.start && curr_row < self.rows.end; 136 | 137 | //let is_in_range = 138 | // curr_row >= self.rows.start && curr_row < self.rows.end && 139 | // curr_column >= self.columns.start && curr_column < self.columns.end; 140 | 141 | let is_in_upper = curr_row <= curr_column; 142 | 143 | if let Some(max) = self.max { 144 | if self.position > max { 145 | None 146 | } else if self.first_take { 147 | self.position += 1; 148 | self.first_take = false; 149 | self.iter.nth(self.position-1) 150 | } else if is_in_row_range { 151 | if is_in_upper { 152 | //self.step -= self.increase; 153 | self.position += 1; 154 | self.iter.next() 155 | } else { 156 | let step = (curr_column+1)*self.size[0] + self.rows.start - self.position; 157 | self.position += step + 1; 158 | self.iter.nth(step) 159 | } 160 | } else { 161 | self.position += self.step+1; 162 | self.iter.nth(self.step) 163 | } 164 | } else { 165 | None 166 | } 167 | } 168 | } 169 | 170 | pub struct MatrixUpperStepBy { 171 | pub iter: I, 172 | size: [usize;2], 173 | step: usize, 174 | position: usize, 175 | first_take: bool, 176 | } 177 | 178 | impl MatrixUpperStepBy { 179 | pub fn new(iter: I, size:[usize;2]) -> MatrixUpperStepBy { 180 | let position =0; 181 | let step = size[0]; 182 | MatrixUpperStepBy{iter, size, step, position,first_take: true} 183 | } 184 | pub fn new_shift(iter: I, size:[usize;2], shift: usize) -> MatrixUpperStepBy { 185 | let position =shift; 186 | let step = size[0]; 187 | MatrixUpperStepBy{iter, size, step, position,first_take: true} 188 | } 189 | } 190 | 191 | impl Iterator for MatrixUpperStepBy 192 | where I:Iterator, 193 | { 194 | type Item = I::Item; 195 | #[inline] 196 | fn next(&mut self) -> Option { 197 | 198 | let curr_row = self.position%unsafe{self.size.get_unchecked(0)}; 199 | let curr_column = self.position/unsafe{self.size.get_unchecked(0)}; 200 | 201 | let is_in_range = curr_row <= curr_column; 202 | 203 | if self.first_take { 204 | self.position += 1; 205 | self.first_take = false; 206 | self.iter.nth(self.position-1) 207 | } else if is_in_range { 208 | //self.step -= self.increase; 209 | self.position += 1; 210 | self.iter.next() 211 | } else { 212 | let step = self.size[0]-curr_column; 213 | self.position += step; 214 | self.iter.nth(step-1) 215 | } 216 | } 217 | } 218 | 219 | pub trait MatrixIterator: Iterator { 220 | type Item; 221 | fn step_by_increase(self, step:usize, increase: usize) -> IncreaseStepBy 222 | where Self:Sized { 223 | IncreaseStepBy::new(self, step, increase) 224 | } 225 | //pub fn new(iter: I, rows: Range, columns: Range, size:[usize;2]) -> SubMatrixStepBy { 226 | fn submatrix_step_by(self, rows: Range, columns: Range, size:[usize;2]) -> SubMatrixStepBy 227 | where Self:Sized { 228 | SubMatrixStepBy::new(self, rows, columns, size) 229 | } 230 | fn matrixupper_step_by(self, size:[usize;2]) -> MatrixUpperStepBy 231 | where Self:Sized { 232 | MatrixUpperStepBy::new(self, size) 233 | } 234 | fn submatrix_in_upper_step_by(self, rows: Range, columns: Range, size:[usize;2]) -> SubMatrixInUpperStepBy 235 | where Self:Sized { 236 | SubMatrixInUpperStepBy::new(self, rows, columns, size) 237 | } 238 | fn matrixupper_step_by_shift(self, size:[usize;2], shift: usize) -> MatrixUpperStepBy 239 | where Self:Sized { 240 | MatrixUpperStepBy::new_shift(self, size, shift) 241 | } 242 | } 243 | 244 | impl<'a,T> MatrixIterator for std::slice::Iter<'a,T> { 245 | type Item = T; 246 | } 247 | impl<'a,T> MatrixIterator for std::slice::IterMut<'a,T> { 248 | type Item = T; 249 | } -------------------------------------------------------------------------------- /src/matrix/matrixconst.rs: -------------------------------------------------------------------------------- 1 | use crate::matrix::{MatrixFull, BasicMatrix, MatFormat}; 2 | use crate::index::*; 3 | use crate::tensor_basic_operation::*; 4 | use crate::matrix::matrixfullslice::*; 5 | use crate::matrix::matrixupper::*; 6 | use crate::matrix::submatrixfull::*; 7 | 8 | pub struct DMatrix3x3 { 9 | pub size: [usize;2], 10 | pub indicing: [usize;2], 11 | pub data: [f64;9] 12 | } 13 | 14 | pub struct DMatrix5x6 { 15 | size: [usize;2], 16 | indicing: [usize;2], 17 | data: [f64;30] 18 | } 19 | 20 | pub struct DMatrix7x10 { 21 | size: [usize;2], 22 | indicing: [usize;2], 23 | data: [f64;70] 24 | } 25 | 26 | impl<'a> BasicMatrix<'a, f64> for DMatrix3x3 { 27 | fn size(&self) -> &[usize] { 28 | &self.size 29 | } 30 | 31 | fn indicing(&self) -> &[usize] { 32 | &self.indicing 33 | } 34 | 35 | fn data_ref(&self) -> Option<&[f64]> { 36 | Some(self.data.as_ref()) 37 | } 38 | 39 | fn data_ref_mut(&mut self) -> Option<&mut [f64]> { 40 | Some(&mut self.data[..]) 41 | } 42 | 43 | fn is_matr(&self) -> bool { 44 | self.size().len() == 2 && self.indicing().len() == 2 45 | } 46 | 47 | fn is_contiguous(&self) -> bool {true} 48 | } -------------------------------------------------------------------------------- /src/matrix/matrixfullslice.rs: -------------------------------------------------------------------------------- 1 | //#![warn(missing_docs)] 2 | use std::{cell::RefCell, collections::binary_heap::Iter, convert, fmt::Display, iter::{Filter,Flatten, Map, StepBy}, marker, mem::ManuallyDrop, ops::{Deref, DerefMut, Div, DivAssign, IndexMut, MulAssign, RangeFull}, slice::{self, ChunksExact, ChunksExactMut}, thread::panicking}; 3 | use std::ops::{Add, Sub, Mul, AddAssign, SubAssign, Index, Range}; 4 | use libc::{CLOSE_RANGE_CLOEXEC, SYS_userfaultfd}; 5 | use typenum::{U2, Pow}; 6 | use rayon::{prelude::*, collections::btree_map::IterMut, iter::Enumerate}; 7 | use std::vec::IntoIter; 8 | 9 | use crate::{MatrixFull, BasicMatrix, MatFormat, BasicMatrixOpt, MathMatrix, ParMathMatrix}; 10 | use crate::index::*; 11 | use crate::tensor_basic_operation::*; 12 | use crate::matrix::matrixfull::*; 13 | //use crate::matrix::matrixupper::*; 14 | 15 | use super::matrix_trait::*; 16 | 17 | 18 | #[derive(Debug,PartialEq)] 19 | pub struct MatrixFullSliceMut<'a,T> { 20 | pub size : &'a [usize], 21 | pub indicing: &'a [usize], 22 | pub data : &'a mut [T] 23 | //pub data : [&'a mut T] 24 | } 25 | 26 | impl <'a, T> BasicMatrix<'a, T> for MatrixFullSliceMut<'a,T> { 27 | #[inline] 28 | /// `matr_a.size()' return &matr_a.size; 29 | fn size(&self) -> &[usize] { 30 | &self.size 31 | } 32 | #[inline] 33 | /// `matr_a.indicing()' return &matr_a.indicing; 34 | fn indicing(&self) -> &[usize] { 35 | &self.indicing 36 | } 37 | 38 | fn data_ref(&self) -> Option<&[T]> { 39 | Some(&self.data) 40 | } 41 | fn data_ref_mut(&mut self) -> Option<&mut [T]> { 42 | Some(self.data) 43 | } 44 | } 45 | 46 | impl<'a, T> BasicMatrixOpt<'a, T> for MatrixFullSliceMut<'a, T> where T: Copy + Clone {} 47 | 48 | impl<'a, T> MathMatrix<'a, T> for MatrixFullSliceMut<'a, T> where T: Copy + Clone {} 49 | 50 | impl<'a, T> ParMathMatrix<'a, T> for MatrixFullSliceMut<'a, T> where T: Copy + Clone + Send + Sync {} 51 | 52 | impl <'a, T: Copy + Clone + Display + Send + Sync> MatrixFullSliceMut<'a, T> { 53 | //pub fn from_slice(sl: &[T]) -> MatrixFullSlice<'a, T> { 54 | // MatrixFullSliceMut { 55 | // size 56 | // } 57 | //} 58 | #[inline] 59 | pub fn get_column_mut(&mut self, j: usize) -> &mut [T] { 60 | let start = self.size[0]*j; 61 | let end = start + self.size[0]; 62 | &mut self.data[start..end] 63 | } 64 | #[inline] 65 | pub fn iter_column_mut(&mut self, j: usize) -> std::slice::IterMut { 66 | let start = self.size[0]*j; 67 | let end = start + self.size[0]; 68 | self.data[start..end].iter_mut() 69 | } 70 | #[inline] 71 | pub fn iter_mut_j(&mut self, j: usize) -> std::slice::IterMut { 72 | let start = self.size[0]*j; 73 | let end = start + self.size[0]; 74 | self.data[start..end].iter_mut() 75 | } 76 | #[inline] 77 | pub fn par_iter_mut_j(&mut self, j: usize) -> rayon::slice::IterMut { 78 | let start = self.indicing[1]*j; 79 | let end = start + self.indicing[1]; 80 | self.data[start..end].par_iter_mut() 81 | } 82 | #[inline] 83 | pub fn iter_columns_full(&self) -> ChunksExact{ 84 | self.data.chunks_exact(self.size[0]) 85 | } 86 | #[inline] 87 | pub fn iter_mut_columns(&mut self,range_column: Range) -> Option>{ 88 | if let Some(n_chunk) = self.size.get(0) { 89 | Some(self.data[n_chunk*range_column.start..n_chunk*range_column.end].chunks_exact_mut(*n_chunk)) 90 | } else { 91 | None 92 | } 93 | } 94 | #[inline] 95 | pub fn iter_submatrix_mut_old(& mut self, x: Range, y: Range) -> Flatten> { 96 | let mut tmp_slices: Vec<&mut [T]> = Vec::with_capacity(y.len()); 97 | let mut dd = self.data.split_at_mut(0).1; 98 | let len_slices_x = x.len(); 99 | let len_y = self.indicing[1]; 100 | y.fold((dd,0_usize),|(ee, offset), y| { 101 | let start = x.start + y*len_y; 102 | let gg = ee.split_at_mut(start-offset).1.split_at_mut(len_slices_x); 103 | tmp_slices.push(gg.0); 104 | (gg.1,start+len_slices_x) 105 | }); 106 | tmp_slices.into_iter().flatten() 107 | } 108 | 109 | #[inline] 110 | pub fn iter(&self) -> slice::Iter { 111 | self.data.iter() 112 | } 113 | #[inline] 114 | pub fn iter_submatrix(&self, x: Range, y: Range) -> SubMatrixStepBy>{ 115 | self.iter().submatrix_step_by(x, y, [self.size[0],self.size[1]]) 116 | } 117 | #[inline] 118 | pub fn iter_matrixupper_submatrix(&self, x: Range, y: Range) -> SubMatrixInUpperStepBy>{ 119 | self.iter().submatrix_in_upper_step_by(x, y, [self.size[0],self.size[1]]) 120 | } 121 | #[inline] 122 | pub fn iter_diagonal(&self) -> Option>> { 123 | //let [x,y] = self.size; 124 | let x = self.size[0]; 125 | let y = self.size[1]; 126 | if x==0 || y==0 || x!=y { 127 | return None 128 | } else { 129 | return Some(self.iter().step_by(x+1)) 130 | } 131 | } 132 | #[inline] 133 | pub fn iter_matrixupper(&self) -> Option>> { 134 | //let [x,y] = self.size; 135 | let x = self.size[0]; 136 | let y = self.size[1]; 137 | if x==0 || y==0 || x!=y { 138 | return None 139 | } else { 140 | return Some(self.iter().matrixupper_step_by([x,y])) 141 | } 142 | } 143 | 144 | #[inline] 145 | pub fn iter_mut(&mut self) -> slice::IterMut { 146 | self.data.iter_mut() 147 | } 148 | #[inline] 149 | pub fn iter_submatrix_mut(&mut self, x: Range, y: Range) -> SubMatrixStepBy>{ 150 | let size = [self.size[0],self.size[1]]; 151 | self.iter_mut().submatrix_step_by(x, y, size) 152 | } 153 | #[inline] 154 | pub fn iter_matrixupper_submatrix_mut(&mut self, x: Range, y: Range) -> SubMatrixInUpperStepBy>{ 155 | let size = [self.size[0],self.size[1]]; 156 | self.iter_mut().submatrix_in_upper_step_by(x, y, size) 157 | } 158 | pub fn iter_diagonal_mut(&mut self) -> Option>> { 159 | //let [x,y] = self.size; 160 | let x = self.size[0]; 161 | let y = self.size[1]; 162 | if x==0 || y==0 || x!=y { 163 | return None 164 | } else { 165 | return Some(self.iter_mut().step_by(x+1)) 166 | } 167 | } 168 | #[inline] 169 | pub fn iter_matrixupper_mut(& mut self) -> Option>> { 170 | //let [x,y] = self.size; 171 | let x = self.size[0]; 172 | let y = self.size[1]; 173 | if x==0 || y==0 || x!=y { 174 | return None 175 | } else { 176 | return Some(self.iter_mut().matrixupper_step_by([x,y])) 177 | } 178 | } 179 | } 180 | 181 | 182 | 183 | #[derive(Clone,Debug,PartialEq)] 184 | pub struct MatrixFullSlice<'a,T> { 185 | pub size : &'a [usize], 186 | pub indicing: &'a [usize], 187 | pub data : &'a [T] 188 | } 189 | 190 | impl <'a, T> BasicMatrix<'a, T> for MatrixFullSlice<'a,T> { 191 | #[inline] 192 | /// `matr_a.size()' return &matr_a.size; 193 | fn size(&self) -> &[usize] { 194 | &self.size 195 | } 196 | #[inline] 197 | /// `matr_a.indicing()' return &matr_a.indicing; 198 | fn indicing(&self) -> &[usize] { 199 | &self.indicing 200 | } 201 | 202 | fn data_ref(&self) -> Option<&[T]> { 203 | Some(&self.data) 204 | } 205 | 206 | fn data_ref_mut(&mut self) -> Option<&mut [T]> {None} 207 | } 208 | 209 | impl<'a, T> BasicMatrixOpt<'a, T> for MatrixFullSlice<'a, T> where T: Copy + Clone {} 210 | 211 | impl<'a, T> MathMatrix<'a, T> for MatrixFullSlice<'a, T> where T: Copy + Clone {} 212 | 213 | impl<'a, T> ParMathMatrix<'a, T> for MatrixFullSlice<'a, T> where T: Copy + Clone + Send + Sync {} 214 | 215 | impl <'a, T: Copy + Clone + Display + Send + Sync> MatrixFullSlice<'a, T> { 216 | #[inline] 217 | pub fn iter(&self) -> slice::Iter { 218 | self.data.iter() 219 | } 220 | #[inline] 221 | pub fn iter_submatrix(&self, x: Range, y: Range) -> SubMatrixStepBy>{ 222 | self.iter().submatrix_step_by(x, y, [self.size[0],self.size[1]]) 223 | } 224 | #[inline] 225 | pub fn iter_matrixupper_submatrix(&self, x: Range, y: Range) -> SubMatrixInUpperStepBy>{ 226 | self.iter().submatrix_in_upper_step_by(x, y, [self.size[0],self.size[1]]) 227 | } 228 | #[inline] 229 | pub fn iter_diagonal(&self) -> Option>> { 230 | //let [x,y] = self.size; 231 | let x = self.size[0]; 232 | let y = self.size[1]; 233 | if x==0 || y==0 || x!=y { 234 | return None 235 | } else { 236 | return Some(self.iter().step_by(x+1)) 237 | } 238 | } 239 | #[inline] 240 | pub fn iter_matrixupper(&self) -> Option>> { 241 | //let [x,y] = self.size; 242 | let x = self.size[0]; 243 | let y = self.size[1]; 244 | if x==0 || y==0 || x!=y { 245 | return None 246 | } else { 247 | return Some(self.iter().matrixupper_step_by([x,y])) 248 | } 249 | } 250 | #[inline] 251 | pub fn iter_matrixupper_shift(&self, shift: usize) -> Option>> { 252 | //let [x,y] = self.size; 253 | let x = self.size[0]; 254 | let y = self.size[1]; 255 | if x==0 || y==0 || x!=y { 256 | return None 257 | } else { 258 | return Some(self.iter().matrixupper_step_by_shift([x,y], shift)) 259 | } 260 | } 261 | #[inline] 262 | pub fn iter_j(&self, j: usize) -> std::slice::Iter { 263 | let start = self.size[0]*j; 264 | let end = start + self.size[0]; 265 | self.data[start..end].iter() 266 | } 267 | #[inline] 268 | pub fn iter_columns(&self, range_column: Range) -> Option>{ 269 | if let Some(n_chunk) = self.size.get(0) { 270 | Some(self.data[n_chunk*range_column.start..n_chunk*range_column.end].chunks_exact(*n_chunk)) 271 | } else { 272 | None 273 | } 274 | } 275 | #[inline] 276 | pub fn iter_columns_full(&self) -> ChunksExact{ 277 | self.data.chunks_exact(self.size[0]) 278 | } 279 | #[inline] 280 | pub fn par_iter_columns_full(&self) -> rayon::slice::ChunksExact{ 281 | self.data.par_chunks_exact(self.size[0]) 282 | } 283 | #[inline] 284 | pub fn par_iter_columns(&self, range_column: Range) -> Option>{ 285 | if let Some(n_chunk) = self.size.get(0) { 286 | Some(self.data[n_chunk*range_column.start..n_chunk*range_column.end].par_chunks_exact(*n_chunk)) 287 | } else { 288 | None 289 | } 290 | } 291 | #[inline] 292 | pub fn get_slice_x(&self, y: usize) -> & [T] { 293 | let start = self.indicing[1]*y; 294 | let end = self.indicing[1]*(y+1); 295 | & self.data[start..end] 296 | } 297 | #[inline] 298 | pub fn transpose(&self) -> MatrixFull { 299 | let x_len = self.size[0]; 300 | let y_len = self.size[1]; 301 | //let [x_len,y_len] = *self.size; 302 | let mut trans_mat = MatrixFull { 303 | size: [y_len,x_len], 304 | indicing: [0usize;2], 305 | data: self.data.to_vec() 306 | }; 307 | let mut len = trans_mat.size.iter() 308 | .zip(trans_mat.indicing.iter_mut()) 309 | .fold(1usize,|len,(di,ii)| { 310 | *ii = len; 311 | len * di 312 | }); 313 | self.iter_columns_full().enumerate().for_each(|(i,c)| { 314 | trans_mat.iter_submatrix_mut(i..i+1,0..x_len).zip(c) 315 | .for_each(|(to,from)| {*to = *from}) 316 | }); 317 | trans_mat 318 | } 319 | #[inline] 320 | pub fn transpose_and_drop(self) -> MatrixFull { 321 | let x_len = self.size[0]; 322 | let y_len = self.size[1]; 323 | //let [x_len,y_len] = *self.size; 324 | let mut trans_mat = MatrixFull { 325 | size: [y_len,x_len], 326 | indicing: [0usize;2], 327 | data: self.data.to_vec() 328 | }; 329 | let mut len = trans_mat.size.iter() 330 | .zip(trans_mat.indicing.iter_mut()) 331 | .fold(1usize,|len,(di,ii)| { 332 | *ii = len; 333 | len * di 334 | }); 335 | self.iter_columns_full().enumerate().for_each(|(i,c)| { 336 | trans_mat.iter_submatrix_mut(i..i+1,0..x_len).zip(c) 337 | .for_each(|(to,from)| {*to = *from}) 338 | }); 339 | trans_mat 340 | } 341 | } 342 | 343 | #[derive(Debug,PartialEq)] 344 | pub struct SubMatrixFullSlice<'a,T> { 345 | pub size : [usize;2], 346 | pub indicing: [usize;2], 347 | pub data : &'a [T] 348 | //pub data : [&'a mut T] 349 | } 350 | 351 | impl <'a, T> BasicMatrix<'a, T> for SubMatrixFullSlice<'a,T> { 352 | #[inline] 353 | /// `matr_a.size()' return &matr_a.size; 354 | fn size(&self) -> &[usize] { 355 | &self.size 356 | } 357 | #[inline] 358 | /// `matr_a.indicing()' return &matr_a.indicing; 359 | fn indicing(&self) -> &[usize] { 360 | &self.indicing 361 | } 362 | 363 | fn data_ref(&self) -> Option<&[T]> { 364 | Some(&self.data) 365 | } 366 | fn data_ref_mut(&mut self) -> Option<&mut [T]> { 367 | None 368 | } 369 | } -------------------------------------------------------------------------------- /src/matrix/mod.rs: -------------------------------------------------------------------------------- 1 | //#![warn(missing_docs)] 2 | use std::{fmt::Display, collections::binary_heap::Iter, iter::{Filter,Flatten, Map}, convert, slice::{ChunksExact,ChunksExactMut, self}, mem::ManuallyDrop, marker, cell::RefCell, ops::{IndexMut, RangeFull, MulAssign, DivAssign, Div, DerefMut, Deref}, thread::panicking}; 3 | use std::ops::{Add, Sub, Mul, AddAssign, SubAssign, Index, Range}; 4 | use libc::{CLOSE_RANGE_CLOEXEC, SYS_userfaultfd}; 5 | use typenum::{U2, Pow}; 6 | use rayon::{prelude::*, collections::btree_map::IterMut, iter::Enumerate}; 7 | use std::vec::IntoIter; 8 | 9 | use crate::{index::{TensorIndex, TensorIndexUncheck}, TensorOpt, TensorOptMut, TensorSlice, TensorSliceMut, TensorOptUncheck, TensorSliceUncheck, TensorSliceMutUncheck, TensorOptMutUncheck}; 10 | use crate::matrix; 11 | 12 | pub mod matrixfull; 13 | pub mod matrixfullslice; 14 | pub mod matrixupper; 15 | pub mod submatrixfull; 16 | pub mod matrixconst; 17 | pub mod matrix_blas_lapack; 18 | pub mod einsum; 19 | mod matrix_trait; 20 | 21 | 22 | use crate::matrix::matrixfull::*; 23 | use crate::matrix::matrixconst::*; 24 | use crate::matrix::matrixfullslice::*; 25 | use crate::matrix::matrixupper::*; 26 | use crate::matrix::submatrixfull::*; 27 | use crate::matrix::matrix_blas_lapack::*; 28 | 29 | 30 | /// **MatrixFull** is a `column-major` 2D array designed for quantum chemistry calculations. 31 | /// 32 | /// #### Basic Usage for General-purpose Use 33 | /// 34 | /// - [Construction](#construction) 35 | /// 36 | /// - [Indexing](#indexing) 37 | /// 38 | /// - [Mathatics](#math-operations) 39 | /// 40 | /// - [Iterators](#iterators) 41 | /// 42 | /// - [Slicing](#slicing) 43 | /// 44 | /// #### Usage for Advanced and/or Specific Uses 45 | /// 46 | /// - [Mathmatic operations `MathMatrix::scaled_add`...](#more-math-operations-for-the-rest-package) 47 | /// 48 | /// - [Matrix operations `MatrixFull::transpose`...](#more-matrix-operations-needed-by-the-rest-package) 49 | /// 50 | /// - Also provides wrappers to lapack and blas functions, including: [`matrix::matrix_blas_lapack`] 51 | /// 1) perform the matrix-matrix operation for C = alpha\*op( A )\*op( B ) + beta\*C: [`_dgemm`] 52 | /// 2) compute the eigenvalues and, optionally, eigenvectors: [`_dsyev`] 53 | /// 3) compute the Cholesky factorization of a real symmetric positive definite matrix A: [`_dpotrf`] 54 | /// 4) perform the matrix inversion: [`_dinverse`] 55 | /// 5) many others ... 56 | /// **NOTE**:: all functions in lapack and blas libraries can be imported in the similar way for any matrix struct with the [`BasicMatrix`] trait . 57 | /// 58 | /// 59 | /// # Construction 60 | /// There are several ways to construct a matrix from different sources 61 | /// 62 | /// 1. Create a new matrix filled with a given element. 63 | /// ``` 64 | /// use rest_tensors::MatrixFull; 65 | /// let matr = MatrixFull::new([3,4],1.0f64); 66 | /// //| 1.0 | 1.0 | 1.0 | 1.0 | 67 | /// //| 1.0 | 1.0 | 1.0 | 1.0 | 68 | /// //| 1.0 | 1.0 | 1.0 | 1.0 | 69 | /// ``` 70 | /// 2. Cenerate a new matrix from a vector. For example, a 3x4 matrix from a vector with 12 elements 71 | /// ``` 72 | /// use rest_tensors::MatrixFull; 73 | /// let new_vec = vec![ 74 | /// 1.0, 2.0, 3.0, 75 | /// 4.0, 5.0, 6.0, 76 | /// 7.0, 8.0, 9.0, 77 | /// 10.0, 11.0, 12.0]; 78 | /// let matr = MatrixFull::from_vec([3,4],new_vec).unwrap(); 79 | /// assert_eq!(matr[(1,2)],8.0) 80 | /// //| 1.0 | 4.0 | 7.0 |10.0 | 81 | /// //| 2.0 | 5.0 | 8.0 |11.0 | 82 | /// //| 3.0 | 6.0 | 9.0 |12.0 | 83 | /// ``` 84 | /// # Indexing 85 | /// The [`MatrixFull`](MatrixFull) struct allows to access values by index, based on the [`Index`](Index) trait. 86 | /// 1. For any matrix element, it is accessable via `[usize;2]` or `(usize, usize)` in the order of `row and column` 87 | /// ``` 88 | /// use rest_tensors::MatrixFull; 89 | /// let mut matr = MatrixFull::new([2,2],0.0); 90 | /// matr[[0,0]] = 1.0; 91 | /// matr[(1,1)] = 1.0; 92 | /// assert_eq!(matr, MatrixFull::from_vec([2,2],vec![ 93 | /// 1.0,0.0, 94 | /// 0.0,1.0]).unwrap()); 95 | /// ``` 96 | /// 2. It is also accessable via `get2d(_mut)` and `set2d` in the traits of [`TensorOpt`](TensorOpt) and/or [`TensorOptMut`](TensorOptMut) 97 | /// ``` 98 | /// use rest_tensors::MatrixFull; 99 | /// use rest_tensors::TensorOptMut; 100 | /// let mut matr = MatrixFull::new([2,2],0.0); 101 | /// let mut mat00 = matr.get2d_mut([0,0]).unwrap(); 102 | /// *mat00 = 1.0; 103 | /// matr.set2d([1,1],1.0); 104 | /// assert_eq!(matr, MatrixFull::from_vec([2,2],vec![ 105 | /// 1.0,0.0, 106 | /// 0.0,1.0]).unwrap()); 107 | /// ``` 108 | /// 3. For all or part of elements in the a given column, they are accessable via `(Range,usize)` or `(RangeFull, usize)`, and return as a **slice** 109 | /// ``` 110 | /// use rest_tensors::MatrixFull; 111 | /// let new_vec = vec![ 112 | /// 1.0, 2.0, 3.0, 113 | /// 4.0, 5.0, 6.0, 114 | /// 7.0, 8.0, 9.0, 115 | /// 10.0, 11.0, 12.0]; 116 | /// let mut matr = MatrixFull::from_vec([3,4],new_vec).unwrap(); 117 | /// let mut part_column_2nd = &mut matr[(0..2,2)]; 118 | /// assert_eq!(part_column_2nd, &[7.0,8.0]); 119 | /// let mut full_column_2nd = &mut matr[(..,2)]; 120 | /// assert_eq!(full_column_2nd, &[7.0,8.0,9.0]); 121 | /// // _______ 122 | /// //| 1.0 | 4.0 || 7.0 ||10.0 | 123 | /// //| 2.0 | 5.0 || 8.0 ||11.0 | 124 | /// //| 3.0 | 6.0 || 9.0 ||12.0 | 125 | /// // ------- 126 | /// ``` 127 | /// 4. For the elements in several continued columns, they are accessable via `(RangeFull, Range)`, and return as a **slice** 128 | /// ``` 129 | /// use rest_tensors::MatrixFull; 130 | /// let new_vec = vec![ 131 | /// 1.0, 2.0, 3.0, 132 | /// 4.0, 5.0, 6.0, 133 | /// 7.0, 8.0, 9.0, 134 | /// 10.0, 11.0, 12.0]; 135 | /// let mut matr = MatrixFull::from_vec([3,4],new_vec).unwrap(); 136 | /// let mut columns23 = &mut matr[(..,1..3)]; 137 | /// assert_eq!(columns23, &[4.0,5.0,6.0,7.0,8.0,9.0]); 138 | /// // _____________ 139 | /// //| 1.0 || 4.0 | 7.0 ||10.0 | 140 | /// //| 2.0 || 5.0 | 8.0 ||11.0 | 141 | /// //| 3.0 || 6.0 | 9.0 ||12.0 | 142 | /// // ------------- 143 | /// ``` 144 | /// 5. In general, a sub matrix in the area of `(Range, Range)` is accessable via `get_submatrix()` and `get_submatrix_mut()`, 145 | /// and return as a [`SubMatrixFull`] and [`SubMatrixFullMut`] 146 | /// ``` 147 | /// use rest_tensors::MatrixFull; 148 | /// use rest_tensors::SubMatrixFull; 149 | /// let new_vec = vec![ 150 | /// 1.0, 2.0, 3.0, 151 | /// 4.0, 5.0, 6.0, 152 | /// 7.0, 8.0, 9.0, 153 | /// 10.0, 11.0, 12.0]; 154 | /// let mut matr = MatrixFull::from_vec([3,4],new_vec).unwrap(); 155 | /// let mut sub_matr = matr.get_submatrix(0..2,2..4); 156 | /// assert_eq!(sub_matr.data(), vec![7.0,8.0,10.0,11.0]); 157 | /// // _____________ 158 | /// //| 1.0 | 4.0 || 7.0 |10.0 || 159 | /// //| 2.0 | 5.0 || 8.0 |11.0 || 160 | /// // ----------- 161 | /// //| 3.0 | 6.0 | 9.0 |12.0 | 162 | /// ``` 163 | /// # Math Operations 164 | /// The [`MatrixFull`](MatrixFull) struct enables the basic mathmatic operations, including `+`, `+=`, `-`, `-=`, `*`, `*=`, `/`, and `/=` 165 | /// based on the traits of [`Add`](Add), [`AddAssign`](AddAssign), [`Sub`](Sub), [`SubAssign`](SubAssign), 166 | /// [`Mul`](Mul), [`MulAssign`](MulAssign), [`Div`](Div), [`DivAssign`](DivAssign), respectively 167 | /// 1. Add or subtract for two matrices: `MatrixFull` +/- `MatrixFull`. 168 | /// **NOTE**: 1) The size of two matrices should be the same. Otherwise, the program stops with **panic!** 169 | /// 170 | /// ``` 171 | /// use rest_tensors::MatrixFull; 172 | /// let vec_a = vec![ 173 | /// 1.0, 2.0, 3.0, 174 | /// 4.0, 5.0, 6.0, 175 | /// 7.0, 8.0, 9.0, 176 | /// 10.0, 11.0, 12.0]; 177 | /// let matr_a = MatrixFull::from_vec([3,4],vec_a).unwrap(); 178 | /// // | 1.0 | 4.0 | 7.0 | 10.0 | 179 | /// //matr_a = | 2.0 | 5.0 | 8.0 | 11.0 | 180 | /// // | 3.0 | 6.0 | 9.0 | 12.0 | 181 | /// 182 | /// let vec_b = (13..25).map(|x| x as f64).collect::>(); 183 | /// let matr_b = MatrixFull::from_vec([3,4],vec_b).unwrap(); 184 | /// // | 13.0 | 16.0 | 19.0 | 22.0 | 185 | /// //matr_b = | 14.0 | 17.0 | 20.0 | 23.0 | 186 | /// // | 15.0 | 18.0 | 21.0 | 24.0 | 187 | /// 188 | /// // matr_c = matr_a + matr_b; 189 | /// // NOTE: both matr_a and matr_b are consumed after `+` and `-` operations, 190 | /// let mut matr_c = matr_a.clone() + matr_b.clone(); 191 | /// // | 14.0 | 20.0 | 26.0 | 32.0 | 192 | /// //matr_c = | 16.0 | 22.0 | 28.0 | 34.0 | 193 | /// // | 18.0 | 24.0 | 30.0 | 36.0 | 194 | /// assert_eq!(matr_c[(..,3)], [32.0,34.0,36.0]); 195 | /// 196 | /// // matr_c = matr_c - matr_b = matr_a 197 | /// // NOTE: matr_b is consumed after `+` and `-` operations, 198 | /// matr_c -= matr_b; 199 | /// assert_eq!(matr_c, matr_a); 200 | /// ``` 201 | /// 2. Add or subtract between two matrices with different types: `MatrixFull +/- (Sub)MatrixFull` 202 | /// NOTE: the matrix: `MatrixFull<&T>` should be used after the operators of ‘+’ and ‘-’. 203 | /// ``` 204 | /// use rest_tensors::MatrixFull; 205 | /// let vec_a = vec![ 206 | /// 1.0, 2.0, 3.0, 207 | /// 4.0, 5.0, 6.0, 208 | /// 7.0, 8.0, 9.0, 209 | /// 10.0, 11.0, 12.0]; 210 | /// let matr_a = MatrixFull::from_vec([3,4],vec_a).unwrap(); 211 | /// // | 1.0 | 4.0 | 7.0 | 10.0 | 212 | /// //matr_a = | 2.0 | 5.0 | 8.0 | 11.0 | with the type of MatrixFull 213 | /// // | 3.0 | 6.0 | 9.0 | 12.0 | 214 | /// 215 | /// let matr_b = matr_a.get_submatrix(0..2,1..3); 216 | /// //matr_b = | 4.0 | 7.0 | 217 | /// // | 5.0 | 8.0 | with the type of SubMatrixFull(MatrixFull<&f64>) 218 | /// 219 | /// let vec_c = (5..9).map(|x| x as f64).collect::>(); 220 | /// let matr_c = MatrixFull::from_vec([2,2],vec_c).unwrap(); 221 | /// //matr_c = | 5.0 | 7.0 | 222 | /// // | 6.0 | 8.0 | with the type of MatrixFull 223 | /// 224 | /// // matr_d = matr_b: `SubMatrixFull` + matr_c: `MatrixFull`; 225 | /// // NOTE: both matr_c and matr_b are dropped after the add operation. 226 | /// let mut matr_d = matr_b + matr_c.clone(); 227 | /// //matr_d = | 9.0 | 14.0 | 228 | /// // | 11.0 | 16.0 | with the type of MatrixFull 229 | /// assert_eq!(matr_d.data(), vec![9.0,11.0,14.0,16.0]); 230 | /// 231 | /// // matr_d: `MatrixFull` -= matr_c: `SubMatrixFull` = matr_c; 232 | /// // NOTE: both matr_c and matr_b are dropped after the add operation. 233 | /// let matr_b = matr_a.get_submatrix(0..2,1..3); 234 | /// //matr_b = | 4.0 | 7.0 | 235 | /// // | 5.0 | 8.0 | with the type of SubMatrixFull<&f64> 236 | /// matr_d -= matr_b; 237 | /// assert_eq!(matr_d, matr_c) 238 | /// ``` 239 | /// 3. Enable `SubMatrixFullMut` the operations of '+=' and '-=' with `(Sub)MatrixFull` 240 | /// 241 | /// ``` 242 | /// use rest_tensors::MatrixFull; 243 | /// let vec_a = vec![ 244 | /// 1.0, 2.0, 3.0, 245 | /// 4.0, 5.0, 6.0, 246 | /// 7.0, 8.0, 9.0, 247 | /// 10.0, 11.0, 12.0]; 248 | /// let mut matr_a = MatrixFull::from_vec([3,4],vec_a).unwrap(); 249 | /// // | 1.0 | 4.0 | 7.0 | 10.0 | 250 | /// //matr_a = | 2.0 | 5.0 | 8.0 | 11.0 | with the type of MatrixFull 251 | /// // | 3.0 | 6.0 | 9.0 | 12.0 | 252 | /// 253 | /// let mut matr_b = matr_a.get_submatrix_mut(0..2,1..3); 254 | /// //matr_b = | 4.0 | 7.0 | 255 | /// // | 5.0 | 8.0 | with the type of MatrixFull<&f64> 256 | /// 257 | /// let vec_c = (5..9).map(|x| x as f64).collect::>(); 258 | /// let matr_c = MatrixFull::from_vec([2,2],vec_c).unwrap(); 259 | /// //matr_c = | 5.0 | 7.0 | 260 | /// // | 6.0 | 8.0 | with the type of MatrixFull 261 | /// 262 | /// // matr_a[(0..2, 1..3)] += matr_c 263 | /// matr_b += matr_c; 264 | /// // | 1.0 | 9.0 | 14.0 | 10.0 | 265 | /// //matr_a = | 2.0 | 11.0 | 16.0 | 11.0 | with the type of MatrixFull 266 | /// // | 3.0 | 6.0 | 9.0 | 12.0 | 267 | /// assert_eq!(matr_a.get_submatrix(0..2,1..3).data(), vec![9.0,11.0,14.0,16.0]); 268 | /// ``` 269 | /// 270 | /// 4. Enable `(Sub)MatrixFull` +/- ``, and `(Sub)MatrixFull(Mut)` +=/-= `` 271 | /// ``` 272 | /// use rest_tensors::MatrixFull; 273 | /// let vec_a = vec![ 274 | /// 1.0, 2.0, 3.0, 275 | /// 4.0, 5.0, 6.0, 276 | /// 7.0, 8.0, 9.0, 277 | /// 10.0, 11.0, 12.0]; 278 | /// let mut matr_a = MatrixFull::from_vec([3,4],vec_a).unwrap(); 279 | /// // matr_b = matr_a + 2.0 280 | /// let mut matr_b = matr_a.clone() + 2.0; 281 | /// assert_eq!(matr_b[(..,0)], [3.0, 4.0, 5.0]); 282 | /// // matr_b = matr_b - 2.0 = matr_a 283 | /// matr_b -= 2.0; 284 | /// assert_eq!(matr_b, matr_a); 285 | /// 286 | /// let mut matr_b = matr_a.get_submatrix_mut(0..2,1..3); 287 | /// //matr_b = | 4.0 | 7.0 | 288 | /// // | 5.0 | 8.0 | with the type of MatrixFull<&f64> 289 | /// matr_b += 2.0; 290 | /// // | 1.0 | 6.0 | 9.0 | 10.0 | 291 | /// //matr_a = | 2.0 | 7.0 | 10.0 | 11.0 | with the type of MatrixFull 292 | /// // | 3.0 | 6.0 | 9.0 | 12.0 | 293 | /// assert_eq!(matr_a.get_submatrix(0..2,1..3).data(), vec![6.0,7.0,9.0,10.0]); 294 | /// ``` 295 | /// 5. Enable `(Sub)MatrixFull` *(/) ``, and `(Sub)MatrixFull(Mut)` +=(/=) `` 296 | /// ``` 297 | /// use rest_tensors::MatrixFull; 298 | /// let vec_a = vec![ 299 | /// 1.0, 2.0, 3.0, 300 | /// 4.0, 5.0, 6.0, 301 | /// 7.0, 8.0, 9.0, 302 | /// 10.0, 11.0, 12.0]; 303 | /// let matr_a = MatrixFull::from_vec([3,4],vec_a).unwrap(); 304 | /// // matr_b = matr_a * 2.0 305 | /// // NOTE: 2.0 should be located after the operator '*' and '/' 306 | /// let mut matr_b = matr_a.clone() * 2.0; 307 | /// assert_eq!(matr_b[(..,0)], [2.0, 4.0, 6.0]); 308 | /// // matr_b = matr_b / 2.0 = matr_a 309 | /// matr_b /= 2.0; 310 | /// assert_eq!(matr_b, matr_a); 311 | /// 312 | /// // | 1.0 | 4.0 | 7.0 | 10.0 | 313 | /// //matr_b = | 2.0 | 5.0 | 8.0 | 11.0 | with the type of MatrixFull 314 | /// // | 3.0 | 6.0 | 9.0 | 12.0 | 315 | /// let mut matr_c = matr_b.get_submatrix_mut(0..2,1..3); 316 | /// matr_c *= 2.0; 317 | /// // after the multiply operation 318 | /// // | 1.0 | 8.0 | 14.0 | 10.0 | 319 | /// //matr_b = | 2.0 | 10.0 | 16.0 | 11.0 | with the type of MatrixFull 320 | /// // | 3.0 | 6.0 | 9.0 | 12.0 | 321 | /// assert_eq!(matr_b.get_submatrix(0..2,1..3).data(), vec![8.0,10.0,14.0,16.0]); 322 | /// 323 | /// ``` 324 | /// # Iterators 325 | /// 1. The [`MatrixFull`](MatrixFull) struct implements the standard iterators of `iter()`, `iter_mut()`, and `into_iter()`, 326 | /// which are nothing but the wrappers to the iterators of `MatrixFull.data: Vec` 327 | /// ``` 328 | /// use rest_tensors::MatrixFull; 329 | /// let mut matr_a = MatrixFull::from_vec( 330 | /// [3,4], 331 | /// (1..13).collect::>() 332 | /// ).unwrap(); 333 | /// 334 | /// let mut vec_a = (1..13).collect::>(); 335 | /// matr_a.into_iter().zip(vec_a.iter()).for_each(|(m_item, v_item)| { 336 | /// assert_eq!(m_item, v_item); 337 | /// }); 338 | /// matr_a.iter_mut().zip(vec_a.iter()).for_each(|(m_item, v_item)| { 339 | /// assert_eq!(m_item, v_item) 340 | /// }); 341 | /// ``` 342 | /// As a column-major 2-dimention tensor, [`MatrixFull`](MatrixFull) also provides special iterators with 343 | /// respect to rows and/or columns: 344 | /// 345 | /// 2. `iter_column(j)` and `iter_column_mut(j)` provides the standard iterators for 346 | /// the immutable and mutable elements, respectively, in the `j`th column. 347 | /// ``` 348 | /// use rest_tensors::MatrixFull; 349 | /// let matr_a = MatrixFull::from_vec( 350 | /// [3,4], 351 | /// (1..13).collect::>() 352 | /// ).unwrap(); 353 | /// // _____ 354 | /// // | 1 || 4 || 7 | 10 | 355 | /// //matr_a = | 2 || 5 || 8 | 11 | with the type of MatrixFull 356 | /// // | 3 || 6 || 9 | 12 | 357 | /// // ----- 358 | /// let column_j = MatrixFull::from_vec([3,1],vec![4,5,6]).unwrap(); 359 | /// matr_a.iter_column(1).zip(column_j.iter()).for_each(|(m_item, c_item)| { 360 | /// assert_eq!(m_item, c_item) 361 | /// }) 362 | /// ``` 363 | /// 3. `iter_columns(Range)` and `iter_columns_mut(Range)` provides the chunck iterators 364 | /// for a set of columns within `Range`. The iteratior gives the elements of different columns 365 | /// chunck by chunck. 366 | /// 367 | /// `iter_columns_full()` and `iter_columns_full_mut()` are the specific cases, which iterate over all columns 368 | /// ``` 369 | /// use rest_tensors::MatrixFull; 370 | /// let matr_a = MatrixFull::from_vec( 371 | /// [3,4], 372 | /// (1..13).collect::>() 373 | /// ).unwrap(); 374 | /// // __________ 375 | /// // | 1 || 4 | 7 || 10 | 376 | /// //matr_a = | 2 || 5 | 8 || 11 | with the type of MatrixFull 377 | /// // | 3 || 6 | 9 || 12 | 378 | /// // ---------- 379 | /// let columns = vec![[4,5,6],[7,8,9]]; 380 | /// matr_a.iter_columns(1..3).zip(columns.iter()).for_each(|(m_item, c_item)| { 381 | /// assert_eq!(m_item, c_item) 382 | /// }) 383 | /// ``` 384 | /// 4. `iter_submatrix()` and `iter_submatrix_mut()` provide home-made StepBy iterators in the column-major order for 385 | /// the immutable and mutable elements in the sub-matrix, respectively. 386 | /// 387 | /// ``` 388 | /// use rest_tensors::MatrixFull; 389 | /// let matr_a = MatrixFull::from_vec( 390 | /// [3,4], 391 | /// (1..13).collect::>() 392 | /// ).unwrap(); 393 | /// // __________ 394 | /// // | 1 || 4 | 7 || 10 | 395 | /// //matr_a = | 2 || 5 | 8 || 11 | with the type of MatrixFull 396 | /// // ---------- 397 | /// // | 3 | 6 | 9 | 12 | 398 | /// let smatr_a = MatrixFull::from_vec([2,2],vec![4,5,7,8]).unwrap(); 399 | /// matr_a.iter_submatrix(0..2,1..3).zip(smatr_a.iter()).for_each(|(m_item, sm_item)| { 400 | /// assert_eq!(m_item, sm_item) 401 | /// }) 402 | /// ``` 403 | /// 5. Based on `iter_submatrix()` and `iter_submatrix_mut()`, [`MatrixFull`] provides flatten iterators for rows, i.e. 404 | /// `iter_row()`, `iter_row_mut()`, `iter_rows()`, `iter_rows_mut()` 405 | /// ``` 406 | /// use rest_tensors::MatrixFull; 407 | /// let matr_a = MatrixFull::from_vec( 408 | /// [3,4], 409 | /// (1..13).collect::>() 410 | /// ).unwrap(); 411 | /// // | 1 | 4 | 7 | 10 | 412 | /// // _____________________ 413 | /// //matr_a = | 2 | 5 | 8 | 11 | with the type of MatrixFull 414 | /// // ------ -------------- 415 | /// // | 3 | 6 | 9 | 12 | 416 | /// // 417 | /// let row_1 = vec![&2,&5,&8,&11]; 418 | /// let from_iter_row = matr_a.iter_row(1).collect::>(); 419 | /// assert_eq!(row_1, from_iter_row); 420 | /// ``` 421 | /// 6. Iterate the diagonal terms using `iter_diagonal().unwrap()` and `iter_diagonal_mut().unwrap()` 422 | /// ``` 423 | /// use rest_tensors::MatrixFull; 424 | /// let matr_a = MatrixFull::from_vec( 425 | /// [4,4], 426 | /// (1..17).collect::>() 427 | /// ).unwrap(); 428 | /// // | 1 | 5 | 9 | 13 | 429 | /// //matr_a = | 2 | 6 | 10 | 14 | with the type of MatrixFull 430 | /// // | 3 | 7 | 11 | 15 | 431 | /// // | 4 | 8 | 12 | 16 | 432 | /// // 433 | /// let diagonal = vec![&1,&6,&11,&16]; 434 | /// let from_diagonal_iter = matr_a.iter_diagonal().unwrap().collect::>(); 435 | /// assert_eq!(from_diagonal_iter, diagonal); 436 | /// ``` 437 | /// 7. Iterate the upper part of the matrix using `iter_matrixupper().unwrap()` and `iter_matrixupper_mut().unwrap()` 438 | /// ``` 439 | /// use rest_tensors::MatrixFull; 440 | /// let matr_a = MatrixFull::from_vec( 441 | /// [4,4], 442 | /// (1..17).collect::>() 443 | /// ).unwrap(); 444 | /// // | 1 | 5 | 9 | 13 | 445 | /// //matr_a = | 2 | 6 | 10 | 14 | with the type of MatrixFull 446 | /// // | 3 | 7 | 11 | 15 | 447 | /// // | 4 | 8 | 12 | 16 | 448 | /// // 449 | /// let upper = vec![&1,&5,&6,&9,&10,&11,&13,&14,&15,&16]; 450 | /// let from_upper_iter = matr_a.iter_matrixupper().unwrap().collect::>(); 451 | /// assert_eq!(from_upper_iter, upper); 452 | /// ``` 453 | /// # Slicing 454 | /// The [`MatrixFull`](MatrixFull) struct provides the tools to slice the data for a given column or a set of continued columns: 455 | /// `slice_column(j:usize)->&[T]`, `slice_column_mut(j:usize)->&mut[T]`, 456 | /// and `slice_columns(j:Range)->&[T]`, `slice_columns_mut(j:Range)->&mut[T]` 457 | /// ``` 458 | /// use rest_tensors::MatrixFull; 459 | /// let matr_a = MatrixFull::from_vec( 460 | /// [3,4], 461 | /// (1..13).collect::>() 462 | /// ).unwrap(); 463 | /// // | 1 | 4 | 7 | 10 | 464 | /// //matr_a = | 2 | 5 | 8 | 11 | with the type of MatrixFull 465 | /// // | 3 | 6 | 9 | 12 | 466 | /// 467 | /// let column_1 = matr_a.slice_column(1); 468 | /// assert_eq!(column_1, &[4,5,6]); 469 | /// let column_12 = matr_a.slice_columns(1..3); 470 | /// assert_eq!(column_12, &[4,5,6,7,8,9]); 471 | /// ``` 472 | #[derive(Clone,Debug, PartialEq)] 473 | pub struct MatrixFull { 474 | /// the number of row and column, column major 475 | pub size : [usize;2], 476 | /// indicing is defined to facilitate the element nevigation, in particular for 3-rank tensors, RIFull 477 | pub indicing: [usize;2], 478 | /// the data stored in the [`Vec`](Vec) struct 479 | pub data : Vec, 480 | } 481 | 482 | #[derive(Clone, Copy,Debug, PartialEq)] 483 | // MatFormat: used for matrix printing 484 | pub enum MatFormat { 485 | Full, 486 | Upper, 487 | Lower 488 | } 489 | 490 | pub trait BasicMatrix<'a, T> { 491 | 492 | fn size(&self) -> &[usize]; 493 | 494 | fn indicing(&self) -> &[usize]; 495 | 496 | fn is_matr(&self) -> bool {self.size().len() == 2 && self.indicing().len() == 2} 497 | 498 | /// by default, the matrix should be contiguous, unless specify explicitly. 499 | fn is_contiguous(&self) -> bool {true} 500 | 501 | fn data_ref(&self) -> Option<&[T]>; 502 | 503 | fn data_ref_mut(&mut self) -> Option<&mut [T]>; 504 | 505 | } 506 | 507 | pub trait BasicMatrixOpt<'a, T> where Self: BasicMatrix<'a, T>, T: Copy + Clone { 508 | fn to_matrixfull(&self) -> Option> 509 | where T: Copy + Clone { 510 | if let Some(data) = self.data_ref() { 511 | return Some(MatrixFull { 512 | size: [self.size()[0],self.size()[1]], 513 | indicing: [self.indicing()[0],self.indicing()[1]], 514 | data: data.iter().map(|x| *x).collect::>(), 515 | }) 516 | } else { 517 | None 518 | } 519 | } 520 | } 521 | 522 | 523 | pub fn basic_check_shape(size_a: &[usize], size_b: &[usize]) -> bool { 524 | size_a.iter().zip(size_b.iter()).fold(true, |check,size| { 525 | check && size.0==size.1 526 | }) 527 | } 528 | 529 | pub fn check_shape<'a, Q,P,T>(matr_a:&'a Q, matr_b: &'a P) -> bool 530 | where Q: BasicMatrix<'a, T>, 531 | P: BasicMatrix<'a, T> 532 | { 533 | matr_a.size().iter().zip(matr_b.size().iter()).fold(true, |check,size| { 534 | check && size.0==size.1 535 | }) 536 | } 537 | 538 | pub fn general_check_shape<'a, Q,P,T>(matr_a:&'a Q, matr_b: &'a P,opa: char, opb: char) -> bool 539 | where Q: BasicMatrix<'a, T>, 540 | P: BasicMatrix<'a, T> 541 | { 542 | crate::matrix::matrix_blas_lapack::general_check_shape(matr_a, matr_b, opa, opb) 543 | } 544 | 545 | pub trait MathMatrix<'a, T> where Self: BasicMatrix<'a, T> + BasicMatrixOpt<'a, T>, T: Copy + Clone { 546 | fn add(&'a self, other: &'a Q) -> Option> 547 | where T: Add + AddAssign, 548 | Q: BasicMatrix<'a, T>, Self: Sized 549 | { 550 | if check_shape(self, other) { 551 | let mut new_tensors = self.to_matrixfull(); 552 | if let Some(out_tensors) = &mut new_tensors { 553 | out_tensors.data_ref_mut().unwrap().iter_mut() 554 | .zip(other.data_ref().unwrap().iter()).for_each(|(t,f)| {*t += *f}); 555 | new_tensors 556 | } else {None} 557 | } else { 558 | None 559 | } 560 | } 561 | fn scaled_add(&'a self, other: &'a Q,scale_factor: T) -> Option> 562 | where T: Add + AddAssign + Mul, 563 | Q: BasicMatrix<'a, T>, Self: Sized 564 | { 565 | if check_shape(self, other) { 566 | let mut new_tensors = self.to_matrixfull(); 567 | if let Some(out_tensors) = &mut new_tensors { 568 | out_tensors.data_ref_mut().unwrap().iter_mut() 569 | .zip(other.data_ref().unwrap().iter()).for_each(|(t,f)| {*t += scale_factor * (*f)}); 570 | new_tensors 571 | } else {None} 572 | } else { 573 | None 574 | } 575 | } 576 | /// For A += B 577 | fn self_add(&'a mut self, bm: &'a Q) 578 | where T: Add + AddAssign, 579 | Q: BasicMatrix<'a, T>, Self:Sized 580 | { 581 | let size_a = [self.size()[0], self.size()[1]]; 582 | /// A = A + B 583 | if ! basic_check_shape(&size_a, bm.size()) { 584 | panic!("Error: Shape inconsistency happens when plus two matrices"); 585 | } 586 | self.data_ref_mut().unwrap().iter_mut() 587 | .zip(bm.data_ref().unwrap().iter()) 588 | .for_each(|(c,p)| {*c += *p}); 589 | } 590 | /// For A += c*B where c is a scale factor 591 | fn self_scaled_add(&'a mut self, bm: &'a Q, b: T) 592 | where T: Add + AddAssign + Mul, 593 | Q: BasicMatrix<'a, T>, Self:Sized 594 | { 595 | let size_a = [self.size()[0], self.size()[1]]; 596 | if basic_check_shape(&size_a, bm.size()) { 597 | self.data_ref_mut().unwrap().iter_mut() 598 | .zip(bm.data_ref().unwrap().iter()) 599 | .for_each(|(c,p)| {*c +=*p*b}); 600 | } else { 601 | panic!("Error: Shape inconsistency happens when plus two matrices"); 602 | } 603 | } 604 | /// For a*A + b*B -> A 605 | fn self_general_add(&'a mut self, bm: &'a Q,a: T, b:T) 606 | where T: Add + AddAssign + Mul, 607 | Q: BasicMatrix<'a, T>, Self:Sized 608 | { 609 | let size_a = [self.size()[0], self.size()[1]]; 610 | if basic_check_shape(&size_a, bm.size()) { 611 | //let mut new_tensors: MatrixFull = self.clone(); 612 | self.data_ref_mut().unwrap().iter_mut().zip(bm.data_ref().unwrap().iter()).for_each(|(c,p)| {*c =*c*a+(*p)*b}); 613 | } else { 614 | panic!("Error: Shape inconsistency happens when plus two matrices"); 615 | } 616 | } 617 | /// For A - B -> C 618 | fn sub(&'a self, other: &'a Q) -> Option> 619 | where T: Sub + SubAssign, Q: BasicMatrix<'a, T>, Self: Sized 620 | { 621 | if check_shape(self,other) { 622 | let mut new_tensors = self.to_matrixfull(); 623 | if let Some(out_tensors) = &mut new_tensors { 624 | out_tensors.data_ref_mut().unwrap().iter_mut().zip(other.data_ref().unwrap().iter()).for_each(|(c,p)| {*c -= *p}); 625 | new_tensors 626 | } else {None} 627 | } else {None} 628 | } 629 | /// For A - B -> A 630 | fn self_sub(&'a mut self, bm: &'a Q) 631 | where T: Sub + SubAssign, Q: BasicMatrix<'a, T>, Self: Sized 632 | { 633 | let size_a = [self.size()[0], self.size()[1]]; 634 | if basic_check_shape(&size_a, bm.size()) { 635 | self.data_ref_mut().unwrap().iter_mut().zip(bm.data_ref().unwrap().iter()).for_each(|(c,p)| {*c -= *p}); 636 | } else { 637 | panic!("Error: Shape inconsistency happens when subtract two matrices"); 638 | } 639 | } 640 | /// For a*A -> A 641 | #[inline] 642 | fn self_multiple(&mut self, a: T) 643 | where T: Mul + MulAssign, 644 | { 645 | self.data_ref_mut().unwrap().iter_mut().for_each(|c| {*c *= a}); 646 | } 647 | 648 | } 649 | 650 | pub trait ParMathMatrix<'a, T> 651 | where Self: Sized + BasicMatrix<'a, T> + BasicMatrixOpt<'a, T>, 652 | T: Copy + Clone + Send + Sync 653 | { 654 | /// Parallel version for C = A + B, 655 | fn par_add(&'a self, other: &'a Q) -> Option> 656 | where T: Add + AddAssign, 657 | Q: BasicMatrix<'a, T>, Self: Sized 658 | { 659 | if check_shape(self, other) { 660 | let mut new_tensors = self.to_matrixfull(); 661 | if let Some(out_tensors) = &mut new_tensors { 662 | out_tensors.data_ref_mut().unwrap().par_iter_mut() 663 | .zip(other.data_ref().unwrap().par_iter()).for_each(|(t,f)| {*t += *f}); 664 | new_tensors 665 | } else {None} 666 | } else { 667 | None 668 | } 669 | } 670 | /// Parallel version for C = A + c*B, 671 | fn par_scaled_add(&'a self, other: &'a Q, fac: T) -> Option> 672 | where T: Add + AddAssign + Mul + MulAssign, 673 | Q: BasicMatrix<'a, T>, Self: Sized 674 | { 675 | if check_shape(self, other) { 676 | let mut new_tensors = self.to_matrixfull(); 677 | if let Some(out_tensors) = &mut new_tensors { 678 | out_tensors.data_ref_mut().unwrap().par_iter_mut() 679 | .zip(other.data_ref().unwrap().par_iter()).for_each(|(t,f)| {*t += *f*fac}); 680 | new_tensors 681 | } else {None} 682 | } else { 683 | None 684 | } 685 | } 686 | /// Parallel version for A + B -> A 687 | fn par_self_add(&mut self, bm: &'a Q) 688 | where T: Add + AddAssign, 689 | Q: BasicMatrix<'a, T>, 690 | Self: Sized 691 | { 692 | let size_a = [self.size()[0], self.size()[1]]; 693 | if basic_check_shape(&size_a, bm.size()) { 694 | self.data_ref_mut().unwrap().par_iter_mut().zip(bm.data_ref().unwrap().par_iter()) 695 | .for_each(|(c,p)| {*c += *p}); 696 | } else { 697 | panic!("Error: Shape inconsistency happens when plus two matrices"); 698 | } 699 | } 700 | /// Parallel version for A + b*B -> A 701 | fn par_self_scaled_add(&mut self, bm: &'a Q, b: T) 702 | where T: Add + AddAssign + Mul + MulAssign, 703 | Q: BasicMatrix<'a, T> 704 | { 705 | let size_a = [self.size()[0], self.size()[1]]; 706 | if basic_check_shape(&size_a, bm.size()) { 707 | self.data_ref_mut().unwrap().par_iter_mut().zip(bm.data_ref().unwrap().par_iter()).for_each(|(c,p)| {*c += *p*b}); 708 | } else { 709 | panic!("Error: Shape inconsistency happens when plus two matrices"); 710 | } 711 | } 712 | /// Parallel version for a*A + b*B -> A 713 | fn par_self_general_add(&'a mut self, bm: &'a Q,a: T, b:T) 714 | where T: Add + AddAssign + Mul, 715 | Q: BasicMatrix<'a, T> 716 | { 717 | let size_a = [self.size()[0], self.size()[1]]; 718 | if basic_check_shape(&size_a, bm.size()) { 719 | //let mut new_tensors: MatrixFull = self.clone(); 720 | self.data_ref_mut().unwrap().par_iter_mut().zip(bm.data_ref().unwrap().par_iter()).for_each(|(c,p)| {*c =*c*a+(*p)*b}); 721 | } else { 722 | panic!("Error: Shape inconsistency happens when plus two matrices"); 723 | } 724 | } 725 | /// Parallel version for A - B -> C 726 | fn par_sub(&'a self, other: &'a Q) -> Option> 727 | where T: Sub + SubAssign, Q: BasicMatrix<'a, T> 728 | { 729 | if check_shape(self,other) { 730 | let mut new_tensors = self.to_matrixfull(); 731 | if let Some(out_tensors) = &mut new_tensors { 732 | out_tensors.data_ref_mut().unwrap().par_iter_mut().zip(other.data_ref().unwrap().par_iter()).for_each(|(c,p)| {*c -= *p}); 733 | new_tensors 734 | } else {None} 735 | } else {None} 736 | } 737 | /// Parallel version for A - B -> A 738 | fn par_self_sub(&mut self, bm: &'a Q) 739 | where T: Sub + SubAssign, 740 | Q: BasicMatrix<'a, T> 741 | { 742 | let size_a = [self.size()[0], self.size()[1]]; 743 | if basic_check_shape(&size_a, bm.size()) { 744 | self.data_ref_mut().unwrap().par_iter_mut().zip(bm.data_ref().unwrap().par_iter()).for_each(|(c,p)| {*c -= *p}); 745 | } else { 746 | panic!("Error: Shape inconsistency happens when plus two matrices"); 747 | } 748 | } 749 | /// Parallel version for a*A -> A 750 | fn par_self_multiple(&mut self, a: T) 751 | where T: Mul + MulAssign 752 | { 753 | self.data_ref_mut().unwrap().par_iter_mut().for_each(|c| {*c *= a}); 754 | } 755 | 756 | } 757 | -------------------------------------------------------------------------------- /src/matrix/submatrixfull.rs: -------------------------------------------------------------------------------- 1 | //#![warn(missing_docs)] 2 | use std::{fmt::Display, collections::binary_heap::Iter, iter::{Filter,Flatten, Map}, convert, slice::{ChunksExact,ChunksExactMut, self}, mem::ManuallyDrop, marker, cell::RefCell, ops::{IndexMut, RangeFull, MulAssign, DivAssign, Div, DerefMut, Deref}, thread::panicking}; 3 | use std::ops::{Add, Sub, Mul, AddAssign, SubAssign, Index, Range}; 4 | use libc::{CLOSE_RANGE_CLOEXEC, SYS_userfaultfd}; 5 | use typenum::{U2, Pow}; 6 | use rayon::{prelude::*, collections::btree_map::IterMut, iter::Enumerate}; 7 | use std::vec::IntoIter; 8 | 9 | use crate::matrix::{MatrixFull, BasicMatrix, MatFormat, BasicMatrixOpt, MathMatrix, ParMathMatrix}; 10 | use crate::index::*; 11 | use crate::tensor_basic_operation::*; 12 | use crate::matrix::matrixfullslice::*; 13 | use crate::matrix::matrixupper::*; 14 | //{Indexing,Tensors4D}; 15 | 16 | pub enum SubMatrixFull<'a,T> { 17 | Contiguous(MatrixFullSlice<'a, T>), 18 | Detached(MatrixFull<&'a T>), 19 | } 20 | pub enum SubMatrixFullMut<'a,T> { 21 | Contiguous(MatrixFullSliceMut<'a, T>), 22 | Detached(MatrixFull<&'a mut T>), 23 | } 24 | 25 | impl <'a, T: Copy + Clone> BasicMatrix<'a, T> for SubMatrixFull<'a, T> { 26 | #[inline] 27 | /// `matr_a.size()' return &matr_a.size; 28 | fn size(&self) -> &[usize] { 29 | match &self { 30 | Self::Contiguous(matr) => {matr.size}, 31 | Self::Detached(matr) => {&matr.size} 32 | } 33 | } 34 | #[inline] 35 | /// `matr_a.indicing()' return &matr_a.indicing; 36 | fn indicing(&self) -> &[usize] { 37 | match &self { 38 | Self::Contiguous(matr) => {matr.indicing}, 39 | Self::Detached(matr) => {&matr.indicing} 40 | } 41 | } 42 | 43 | fn is_contiguous(&self) -> bool { 44 | match &self { 45 | Self::Contiguous(_) => {true}, 46 | Self::Detached(_) => {false}, 47 | } 48 | } 49 | 50 | fn data_ref(&self) -> Option<&[T]> { 51 | match &self { 52 | Self::Contiguous(matr) => {Some(matr.data)}, 53 | Self::Detached(_) => {None}, 54 | } 55 | } 56 | 57 | fn data_ref_mut(&mut self) -> Option<&mut [T]> {None} 58 | 59 | } 60 | 61 | impl<'a, T> BasicMatrixOpt<'a, T> for SubMatrixFull<'a, T> where T: Copy + Clone {} 62 | 63 | impl<'a, T> MathMatrix<'a, T> for SubMatrixFull<'a, T> where T: Copy + Clone {} 64 | 65 | impl<'a, T> ParMathMatrix<'a, T> for SubMatrixFull<'a, T> where T: Copy + Clone + Send + Sync {} 66 | 67 | 68 | impl <'a, T> BasicMatrix<'a, T> for SubMatrixFullMut<'a, T> { 69 | #[inline] 70 | /// `matr_a.size()' return &matr_a.size; 71 | fn size(&self) -> &[usize] { 72 | match &self { 73 | Self::Contiguous(matr) => {matr.size}, 74 | Self::Detached(matr) => {&matr.size} 75 | } 76 | } 77 | #[inline] 78 | /// `matr_a.indicing()' return &matr_a.indicing; 79 | fn indicing(&self) -> &[usize] { 80 | match &self { 81 | Self::Contiguous(matr) => {matr.indicing}, 82 | Self::Detached(matr) => {&matr.indicing} 83 | } 84 | } 85 | 86 | fn data_ref(&self) -> Option<&[T]> { 87 | match &self { 88 | Self::Contiguous(matr) => {Some(matr.data)}, 89 | Self::Detached(_) => {None}, 90 | } 91 | } 92 | fn data_ref_mut(&mut self) -> Option<&mut [T]> { 93 | match self { 94 | Self::Contiguous(matr) => {Some(matr.data)}, 95 | Self::Detached(_) => {None}, 96 | } 97 | } 98 | fn is_contiguous(&self) -> bool { 99 | match &self { 100 | Self::Contiguous(_) => {true}, 101 | Self::Detached(_) => {false}, 102 | } 103 | } 104 | } 105 | 106 | impl<'a, T> BasicMatrixOpt<'a, T> for SubMatrixFullMut<'a, T> where T: Copy + Clone {} 107 | 108 | impl<'a, T> MathMatrix<'a, T> for SubMatrixFullMut<'a, T> where T: Copy + Clone {} 109 | 110 | impl<'a, T> ParMathMatrix<'a, T> for SubMatrixFullMut<'a, T> where T: Copy + Clone + Send + Sync {} 111 | 112 | impl <'a, T: Copy + Clone> SubMatrixFull<'a, T> { 113 | pub fn data(&self) -> Vec { 114 | match &self { 115 | SubMatrixFull::Contiguous(matr) => {matr.data.to_vec()}, 116 | SubMatrixFull::Detached(matr) => { 117 | matr.data.iter().map(|x| **x).collect::>() 118 | }, 119 | } 120 | } 121 | pub fn c2d(self) -> SubMatrixFull<'a, T> { 122 | if let SubMatrixFull::Contiguous(matr) = self { 123 | let size = matr.size(); 124 | let size = [size[0],size[1]]; 125 | let indc = matr.indicing(); 126 | let indicing = [indc[0],indc[1]]; 127 | SubMatrixFull::Detached(MatrixFull { 128 | size, 129 | indicing, 130 | data: matr.data.iter().collect::>() 131 | }) 132 | } else { 133 | self 134 | } 135 | } 136 | } 137 | impl <'a, T: Copy + Clone> SubMatrixFullMut<'a, T> { 138 | pub fn data(&self) -> Vec { 139 | match &self { 140 | Self::Contiguous(matr) => {matr.data.to_vec()}, 141 | Self::Detached(matr) => { 142 | matr.data.iter().map(|x| **x).collect::>() 143 | }, 144 | } 145 | } 146 | pub fn c2d(self) -> SubMatrixFullMut<'a, T> { 147 | if let SubMatrixFullMut::Contiguous(matr) = self { 148 | let size = matr.size(); 149 | let size = [size[0],size[1]]; 150 | let indc = matr.indicing(); 151 | let indicing = [indc[0],indc[1]]; 152 | SubMatrixFullMut::Detached(MatrixFull { 153 | size, 154 | indicing, 155 | data: matr.data.iter_mut().collect::>() 156 | }) 157 | } else { 158 | self 159 | } 160 | } 161 | } -------------------------------------------------------------------------------- /src/ri.rs: -------------------------------------------------------------------------------- 1 | use std::ffi::{c_double, c_int}; 2 | use std::fmt::Debug; 3 | use std::iter::Flatten; 4 | use std::slice::{ChunksExactMut,ChunksExact}; 5 | use std::vec::IntoIter; 6 | use std::{fmt::Display, collections::binary_heap::Iter, iter::Filter, convert}; 7 | use std::ops::{Add, Sub, Mul, AddAssign, SubAssign, Index, Range, MulAssign, DivAssign}; 8 | use typenum::{U2, Pow}; 9 | use rayon::{prelude::*,slice}; 10 | use itertools::{iproduct, Itertools}; 11 | 12 | 13 | use crate::external_libs::{ri_ao2mo_f, ri_copy_from_ri, ri_copy_from_matr}; 14 | use crate::matrix_blas_lapack::{_dgemm_nn,_dgemm_tn, _dgemm_tn_v02}; 15 | use crate::{MatrixFullSliceMut, MatrixFullSlice, MatrixFull, BasicMatrix, SubMatrixFullSlice}; 16 | use crate::{index::{TensorIndex, TensorIndexUncheck}, Tensors4D, TensorOpt, TensorOptMut, TensorSlice, TensorSliceMut, TensorOptUncheck, TensorSliceUncheck, TensorSliceMutUncheck, TensorOptMutUncheck}; 17 | 18 | #[derive(Clone,Debug,PartialEq)] 19 | pub struct RIFull { 20 | /// Coloum-major 4-D ERI designed for quantum chemistry calculations specifically. 21 | pub size : [usize;3], 22 | pub indicing: [usize;3], 23 | pub data : Vec 24 | } 25 | 26 | impl RIFull { 27 | pub fn new(size: [usize;3], new_default: T) -> RIFull { 28 | let mut indicing = [0usize;3]; 29 | let mut len = size.iter().zip(indicing.iter_mut()).fold(1usize,|len,(sizei,indicing_i)| { 30 | *indicing_i = len; 31 | len * sizei 32 | }); 33 | //if let Some(value)=size.get(2) {len *= value}; 34 | //println!("{}", len); 35 | RIFull { 36 | size, 37 | indicing, 38 | data: vec![new_default; len] 39 | } 40 | } 41 | pub fn empty() -> RIFull { 42 | RIFull { size: [0,0,0], indicing: [0,0,0], data: Vec::new() } 43 | } 44 | pub unsafe fn from_vec_unchecked(size: [usize;3], new_vec: Vec) -> RIFull { 45 | let mut indicing = [0usize;3]; 46 | let mut len = size.iter().zip(indicing.iter_mut()).fold(1usize,|len,(sizei,indicing_i)| { 47 | *indicing_i = len; 48 | len * sizei 49 | }); 50 | //if let Some(value)=size.get(2) {len *= value}; 51 | RIFull { 52 | size, 53 | indicing, 54 | data: new_vec 55 | } 56 | } 57 | pub fn from_vec(size: [usize;3], new_vec: Vec) -> Option> { 58 | unsafe{ 59 | let tmp_tensor = RIFull::from_vec_unchecked(size, new_vec); 60 | let len = tmp_tensor.size.iter().fold(1_usize,|acc,x| {acc*x}); 61 | if len>tmp_tensor.data.len() { 62 | panic!("Error: inconsistency happens when formating a tensor from a given vector, (length from size, length of new vector) = ({},{})",len,tmp_tensor.data.len()); 63 | None 64 | } else { 65 | if len Option> { 72 | let p_length = if let Some(value) = self.indicing.get(2) {value} else {return None}; 73 | let p_start = p_length * i_reduced; 74 | Some(MatrixFullSliceMut { 75 | size: &self.size[0..2], 76 | indicing: &self.indicing[0..2], 77 | data : &mut self.data[p_start..p_start+p_length]} 78 | ) 79 | } 80 | 81 | //pub fn get_reducing_matrix_mut(&mut self, i_reduced: usize) -> Option> { 82 | // let p_length = if let Some(value) = self.indicing.get(2) {value} else {return None}; 83 | // let p_start = p_length * i_reduced; 84 | // Some(MatrixFullSliceMut { 85 | // size: &self.size[0..2], 86 | // indicing: &self.indicing[0..2], 87 | // data : &mut self.data[p_start..p_start+p_length]} 88 | // ) 89 | //} 90 | 91 | #[inline] 92 | pub fn get_reducing_matrix(&self, i_reduced: usize) -> Option> { 93 | let p_length = if let Some(value) = self.indicing.get(2) {value} else {return None}; 94 | let p_start = p_length * i_reduced; 95 | Some(MatrixFullSlice { 96 | size: &self.size[0..2], 97 | indicing: &self.indicing[0..2], 98 | data : &self.data[p_start..p_start+p_length]} 99 | ) 100 | } 101 | #[inline] 102 | pub fn get_reducing_matrix_columns(&self, range_columns:Range, i_reduced: usize) -> Option> { 103 | let z_length = if let Some(value) = self.indicing.get(2) {value} else {return None}; 104 | let y_length = if let Some(value) = self.indicing.get(1) {value} else {return None}; 105 | let start = z_length * i_reduced + y_length* range_columns.start; 106 | let end = start + y_length*range_columns.len(); 107 | let size = [self.size[0],range_columns.len()]; 108 | let indicing = [1,size[1]]; 109 | //let p_start = p_length * i_reduced; 110 | Some(SubMatrixFullSlice { 111 | size, 112 | indicing, 113 | data : &self.data[start..end]} 114 | ) 115 | } 116 | #[inline] 117 | pub fn get_slices(&self, x: Range, y: Range, z: Range) -> Flatten> { 118 | let mut tmp_slices = vec![&self.data[..]; y.len()*z.len()]; 119 | let len_slices_x = x.len(); 120 | let len_y = self.indicing[1]; 121 | let len_z = self.indicing[2]; 122 | tmp_slices.iter_mut().zip(iproduct!(z,y)).for_each(|(t,(z,y))| { 123 | let start = x.start + y*len_y + z*len_z; 124 | //println!("start: {}, end: {}, y:{}, z:{}", start, start+x.len(), y,z); 125 | *t = &self.data[start..start + len_slices_x]; 126 | }); 127 | tmp_slices.into_iter().flatten() 128 | } 129 | #[inline] 130 | pub fn get_slices_mut(& mut self, x: Range, y: Range, z: Range) -> Flatten> { 131 | self.get_slices_mut_v01(x,y,z) 132 | } 133 | #[inline] 134 | pub fn get_slices_mut_v01(& mut self, x: Range, y: Range, z: Range) -> Flatten> { 135 | //let mut tmp_slices: Vec<&mut [T]> = vec![]; 136 | let length = y.len()*z.len(); 137 | let mut tmp_slices: Vec<&mut [T]> = Vec::with_capacity(length); 138 | let mut dd = self.data.split_at_mut(0).1; 139 | let len_slices_x = x.len(); 140 | let len_y = self.indicing[1]; 141 | let len_z = self.indicing[2]; 142 | iproduct!(z,y).fold((dd,0_usize),|(ee, offset), (z,y)| { 143 | let start = x.start + y*len_y + z*len_z; 144 | let gg = ee.split_at_mut(start-offset).1.split_at_mut(len_slices_x); 145 | tmp_slices.push(gg.0); 146 | (gg.1,start+len_slices_x) 147 | }); 148 | tmp_slices.into_iter().flatten() 149 | } 150 | #[inline] 151 | pub fn get_slices_mut_v02(& mut self, x: Range, y: Range, z: Range) -> Flatten> { 152 | let length = y.len()*z.len(); 153 | let mut tmp_slices: Vec<&mut [T]> = Vec::with_capacity(length); 154 | unsafe{tmp_slices.set_len(length)} 155 | let mut dd = self.data.split_at_mut(0).1; 156 | let len_slices_x = x.len(); 157 | let len_y = self.indicing[1]; 158 | let len_z = self.indicing[2]; 159 | iproduct!(z,y).zip(tmp_slices.iter_mut()).fold((dd,0_usize),|(ee, offset), ((z,y),to_slice)| { 160 | let start = x.start + y*len_y + z*len_z; 161 | let gg = ee.split_at_mut(start-offset).1.split_at_mut(len_slices_x); 162 | *to_slice = gg.0; 163 | (gg.1,start+len_slices_x) 164 | }); 165 | tmp_slices.into_iter().flatten() 166 | } 167 | #[inline] 168 | pub fn iter_slices_x(&self, y: usize, z: usize) -> std::slice::Iter { 169 | let start = z*self.indicing[2] + y*self.indicing[1]; 170 | let end = start + self.indicing[1]; 171 | self.data[start..end].iter() 172 | } 173 | #[inline] 174 | pub fn par_iter_slices_x(&self, y: usize, z: usize) -> rayon::slice::Iter { 175 | let start = z*self.indicing[2] + y*self.indicing[1]; 176 | let end = start + self.indicing[1]; 177 | self.data[start..end].par_iter() 178 | } 179 | #[inline] 180 | pub fn iter_mut_auxbas(&mut self, auxbas_range: Range) -> Option> { 181 | if let Some(x) = self.size.get(0) { 182 | if let Some(y) = self.size.get(1) { 183 | let chunk_size = x*y; 184 | Some(self.data[chunk_size*auxbas_range.start..chunk_size*auxbas_range.end] 185 | .chunks_exact_mut(chunk_size)) 186 | } else {None} 187 | } else {None} 188 | } 189 | #[inline] 190 | pub fn iter_auxbas(&self, auxbas_range: Range) -> Option> { 191 | if let Some(x) = self.size.get(0) { 192 | if let Some(y) = self.size.get(1) { 193 | let chunk_size = x*y; 194 | Some(self.data[chunk_size*auxbas_range.start..chunk_size*auxbas_range.end] 195 | .chunks_exact(chunk_size)) 196 | } else {None} 197 | } else {None} 198 | } 199 | #[inline] 200 | pub fn par_iter_mut_auxbas(&mut self, auxbas_range: Range) -> Option> { 201 | if let Some(x) = self.size.get(0) { 202 | if let Some(y) = self.size.get(1) { 203 | let chunk_size = x*y; 204 | Some(self.data[chunk_size*auxbas_range.start..chunk_size*auxbas_range.end] 205 | .par_chunks_exact_mut(chunk_size)) 206 | } else {None} 207 | } else {None} 208 | } 209 | #[inline] 210 | pub fn par_iter_auxbas(&self, auxbas_range: Range) -> Option> { 211 | if let Some(x) = self.size.get(0) { 212 | if let Some(y) = self.size.get(1) { 213 | let chunk_size = x*y; 214 | Some(self.data[chunk_size*auxbas_range.start..chunk_size*auxbas_range.end] 215 | .par_chunks_exact(chunk_size)) 216 | } else {None} 217 | } else {None} 218 | } 219 | #[inline] 220 | pub fn check_shape(&self, other:&RIFull) -> bool { 221 | self.size.iter().zip(other.size.iter()).fold(true, |check,size| { 222 | check && size.0==size.1 223 | }) 224 | } 225 | #[inline] 226 | /// [i,j,k] -> [j,i,k] 227 | pub fn transpose_jik(&self) -> RIFull 228 | where T:Clone+Copy 229 | { 230 | let i = self.size[0]; 231 | let j = self.size[1]; 232 | let k = self.size[2]; 233 | let data_new: Vec = self.data.chunks_exact(i*j) 234 | .map(|v| {let mat = MatrixFull::from_vec([i,j], v.to_vec()).unwrap().transpose(); 235 | mat.data}).flatten().collect(); 236 | let ri = RIFull::from_vec([j,i,k], data_new).unwrap(); 237 | ri 238 | } 239 | #[inline] 240 | /// [i,j,k] -> [j,k,i] 241 | pub fn transpose_jki(&self) -> RIFull 242 | where T:Clone+Copy 243 | { 244 | let i = self.size[0]; 245 | let j = self.size[1]; 246 | let k = self.size[2]; 247 | let mut data = vec![]; 248 | for ii in 0..i { 249 | let mut tmp: Vec = self.data.iter().enumerate() 250 | .filter(|(idx,v)| ((*idx as isize - ii as isize).abs() as usize )%i == 0 ) 251 | .map(|(idx, v)| *v).collect(); 252 | data.append(&mut tmp); 253 | } 254 | let ri = RIFull::from_vec([j,k,i], data).unwrap(); 255 | ri 256 | } 257 | #[inline] 258 | /// [i,j,k] -> [k,j,i] 259 | pub fn transpose_kji(&self) -> RIFull 260 | where T:Clone+Copy 261 | { 262 | let i = self.size[0]; 263 | let j = self.size[1]; 264 | let k = self.size[2]; 265 | let mut data = vec![]; 266 | let ri_new = self.transpose_jik(); 267 | for ij in 0..i*j { 268 | let mut tmp: Vec = ri_new.data.iter().enumerate() 269 | .filter(|(idx,v)| ((*idx as isize - ij as isize).abs() as usize )%(i*j) == 0 ).map(|(idx, v)| *v).collect(); 270 | data.append(&mut tmp); 271 | } 272 | 273 | let ri = RIFull::from_vec([k,j,i], data).unwrap(); 274 | ri 275 | } 276 | #[inline] 277 | /// [i,j,k] -> [i,k,j] 278 | pub fn transpose_ikj(&self) -> RIFull 279 | where T:Clone+Copy+Debug 280 | { 281 | let i = self.size[0]; 282 | let j = self.size[1]; 283 | let k = self.size[2]; 284 | let mut data = vec![]; 285 | for jj in 0..j { 286 | let data_new: Vec = self.data.chunks_exact(i*j) 287 | .map(|v| {let mat = MatrixFull::from_vec([i,j], v.to_vec()).unwrap(); 288 | mat.iter_column(jj).map(|v| *v).collect_vec()}).flatten().collect(); 289 | data.push(data_new) 290 | } 291 | let data_new = data.into_iter().flatten().collect(); 292 | let ri = RIFull::from_vec([i,k,j], data_new).unwrap(); 293 | ri 294 | } 295 | 296 | /// Reduce [nao,nao,naux] RIFull to [nao*(nao+1)/2, naux] MatrixFull According to symmetry 297 | pub fn rifull_to_matfull_symm(&self) -> MatrixFull 298 | where T:Clone+Copy { 299 | let nao = self.size[0]; 300 | let naux = self.size[2]; 301 | //let mut result = MatrixFull::new([nao*(nao+1)/2, naux], 0.0_f64); 302 | let mut chunk_by_aux = self.data.chunks_exact(nao*nao); 303 | let mut data: Vec = chunk_by_aux.into_iter().map(|chunk| richunk_reduce_by_symm(chunk, nao)).flatten().collect(); 304 | let result = MatrixFull::from_vec([nao*(nao+1)/2, naux], data).unwrap(); 305 | result 306 | } 307 | 308 | /// Reduce [i,j,k] RIFull to [i*j, k] MatrixFull 309 | pub fn rifull_to_matfull_ij_k(&self) -> MatrixFull 310 | where T:Clone+Copy { 311 | let i = self.size[0]; 312 | let j = self.size[1]; 313 | let k = self.size[2]; 314 | let result = MatrixFull::from_vec([i*j, k], self.data.clone()).unwrap(); 315 | result 316 | } 317 | 318 | /// [i,j,k] -> [i,j*k] 319 | pub fn rifull_to_matfull_i_jk(&self) -> MatrixFull 320 | where T:Clone+Copy { 321 | let i = self.size[0]; 322 | let j = self.size[1]; 323 | let k = self.size[2]; 324 | let result = MatrixFull::from_vec([i, j*k], self.data.clone()).unwrap(); 325 | result 326 | } 327 | 328 | } 329 | 330 | fn richunk_reduce_by_symm(chunk: &[T], nao: usize) -> Vec 331 | where T: Clone + Copy { 332 | //let mut result = vec![0.0_f64; nao*(nao+1)/2]; 333 | let mut result = vec![]; 334 | let mut chunk_by_nao = chunk.chunks_exact(nao); 335 | for i in 0..nao { 336 | let chunk = chunk_by_nao.next().unwrap(); 337 | let slice = &chunk[0..(i+1)]; 338 | result.append(&mut slice.to_vec()); 339 | } 340 | result 341 | } 342 | 343 | impl RIFull { 344 | #[inline] 345 | pub fn self_scaled_add(&mut self, bm: &RIFull, b: f64) { 346 | /// A = A + b*B 347 | if self.check_shape(bm) { 348 | self.data.iter_mut() 349 | .zip(bm.data.iter()) 350 | .for_each(|(c,p)| {*c +=p*b}); 351 | } else { 352 | panic!("Error: Shape inconsistency happens when plus two matrices"); 353 | } 354 | } 355 | #[inline] 356 | pub fn ao2mo(&self, eigenvector: &MatrixFull) -> anyhow::Result> { 357 | self.ao2mo_v02(eigenvector) 358 | } 359 | #[inline] 360 | pub fn ao2mo_v01(&self, eigenvector: &MatrixFull) -> anyhow::Result> { 361 | /// AO(num_basis, num_basis, num_auxbas) -> MO(num_auxbas, num_state, num_state) 362 | let num_basis = eigenvector.size.get(0).unwrap().clone(); 363 | let num_state = eigenvector.size.get(1).unwrap().clone(); 364 | let num_auxbas = self.size.get(2).unwrap().clone(); 365 | let mut rimo = RIFull::new([num_auxbas,num_basis,num_state],0.0); 366 | 367 | for i_auxbas in (0..num_auxbas) { 368 | let i_aux = &self.get_reducing_matrix(i_auxbas).unwrap(); 369 | let tmp_aux = _dgemm_nn(i_aux, &eigenvector.to_matrixfullslice()); 370 | //_dgemm_tn_v02(&eigenvector.to_matrixfullslice(), 371 | // &tmp_aux.to_matrixfullslice(), 372 | // rimo.get_slices_mut_old(i_auxbas..i_auxbas+1, 0..num_basis, 0..num_state) 373 | // ) 374 | let tmp_aux2 = _dgemm_tn(&eigenvector.to_matrixfullslice(), &tmp_aux.to_matrixfullslice()); 375 | rimo.get_slices_mut_v01(i_auxbas..i_auxbas+1, 0..num_basis, 0..num_state) 376 | .zip(tmp_aux2.data.iter()).for_each(|(to, from)| {*to = *from}); 377 | } 378 | Ok(rimo) 379 | } 380 | #[inline] 381 | /// AO(num_basis, num_basis, num_auxbas) -> MO(num_auxbas, num_state, num_state) 382 | pub fn ao2mo_v02(&self, eigenvector: &MatrixFull) -> anyhow::Result> { 383 | let num_basis = eigenvector.size.get(0).unwrap().clone(); 384 | let num_states = eigenvector.size.get(1).unwrap().clone(); 385 | let num_auxbas = self.size.get(2).unwrap().clone(); 386 | let mut ri3mo = RIFull::new([num_auxbas,num_states,num_states],0.0); 387 | //let mut buf = vec![0.0,num_basis*num_states*num_auxbas]; 388 | //let (c_buf, buf_len, buf_cap) = (buf.as_mut_ptr() as *mut f64, buf.len(), buf.capacity()); 389 | 390 | ri_ao2mo_f(&eigenvector.data_ref().unwrap(), 391 | &self.data[..], 392 | &mut ri3mo.data[..], 393 | num_states, num_basis, num_auxbas 394 | ); 395 | //unsafe{ 396 | // let eigenvector_ptr = eigenvector.data.as_ptr(); 397 | // let ri3fn_ptr = self.data.as_ptr(); 398 | // let ri3mo_ptr = ri3mo.data.as_mut_ptr(); 399 | // ri_ao2mo_f_(eigenvector_ptr, 400 | // ri3fn_ptr, 401 | // ri3mo_ptr, 402 | // &(num_states as i32), 403 | // &(num_basis as i32), 404 | // &(num_auxbas as i32)); 405 | //} 406 | 407 | Ok(ri3mo) 408 | } 409 | #[inline] 410 | pub fn copy_from_ri(&mut self, range_x:Range, range_y:Range,range_z:Range, 411 | from_ri: &RIFull,f_range_x:Range, f_range_y:Range, f_range_z:Range) { 412 | 413 | let self_size = self.size.clone(); 414 | 415 | ri_copy_from_ri( 416 | &from_ri.data, &from_ri.size, f_range_x,f_range_y,f_range_z, 417 | &mut self.data, &self_size, range_x,range_y,range_z 418 | ) 419 | } 420 | #[inline] 421 | pub fn copy_from_matr<'a, T>(&mut self, range_x:Range, range_y:Range, i_z: usize, copy_mod:i32, 422 | from_matr: & T,f_range_x:Range, f_range_y:Range) 423 | where T: BasicMatrix<'a,f64> 424 | { 425 | 426 | let self_size = self.size.clone(); 427 | 428 | ri_copy_from_matr( 429 | from_matr.data_ref().unwrap(), from_matr.size(), 430 | f_range_x,f_range_y, 431 | &mut self.data, &self_size, range_x,range_y, i_z, copy_mod 432 | ) 433 | } 434 | } 435 | 436 | #[test] 437 | fn test_transpose_ijk(){ 438 | let data = (0..12).collect_vec(); 439 | let ri = RIFull::from_vec([3,2,2], data).unwrap(); 440 | println!("ri = {:?}", ri); 441 | let ri_t = ri.transpose_jki(); 442 | let ri_t2 = ri.transpose_jik(); 443 | let ri_t3 = ri.transpose_kji(); 444 | let ri_t4 = ri.transpose_ikj(); 445 | println!("ri_t = {:?}", ri_t); 446 | println!("ri_t2 = {:?}", ri_t2); 447 | println!("ri_t3 = {:?}", ri_t3); 448 | println!("ri_t4 = {:?}", ri_t4); 449 | } 450 | 451 | #[test] 452 | fn test_transpose(){ 453 | let data_a = (0..54).map(|v| v as f64).collect_vec(); 454 | let data_b = (0..18).map(|v| v as f64).collect_vec(); 455 | let a = RIFull::from_vec([9,3,2], data_a).unwrap(); 456 | println!("a = {:?}", a); 457 | let b = RIFull::from_vec([3,2,3], data_b).unwrap(); 458 | println!("b = {:?}", b); 459 | let data_c = (0..54).map(|v| v as f64).collect_vec(); 460 | let data_d = (0..18).map(|v| v as f64).collect_vec(); 461 | let c = MatrixFull::from_vec([9,6], data_c).unwrap().transpose(); 462 | println!("c = {:?}", c); 463 | let d = MatrixFull::from_vec([6,3], data_d).unwrap().transpose(); 464 | println!("d = {:?}", d); 465 | 466 | let a_mat = a.rifull_to_matfull_i_jk().transpose(); 467 | let b_mat = b.rifull_to_matfull_ij_k().transpose(); 468 | let x = _dgemm_nn( &b_mat.to_matrixfullslice(),&a_mat.to_matrixfullslice()); 469 | let y = _dgemm_nn(&d.to_matrixfullslice(),&c.to_matrixfullslice()); 470 | println!("x = {:?}", x); 471 | println!("y = {:?}", y); 472 | 473 | 474 | 475 | 476 | } 477 | -------------------------------------------------------------------------------- /src/tensor_basic_operation.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Display, ops::{Index, IndexMut, Range, RangeFull}, slice::SliceIndex}; 2 | 3 | use crate::{index::{TensorIndex, TensorIndexUncheck}, 4 | ERIFull, ERIFold4, MatrixFull, MatrixFullSliceMut, 5 | MatrixFullSlice, MatrixUpperSliceMut, MatrixUpper, 6 | MatrixUpperSlice, RIFull}; 7 | 8 | 9 | /// Trait definitions for tensor basic operations, mainly including 10 | /// getting a (mutable) number, or a (mutable) slice from a defined tensor 11 | pub trait TensorOpt where Self: TensorIndex { 12 | fn get1d(&self, position:usize) -> Option<&T> {None} 13 | fn get2d(&self, position:[usize;2]) -> Option<&T> {None} 14 | fn get3d(&self, position:[usize;3]) -> Option<&T> {None} 15 | fn get4d(&self, position:[usize;4]) -> Option<&T> {None} 16 | fn get(&self, position:&[usize]) -> Option<&T> {None} 17 | } 18 | 19 | pub trait TensorOptUncheck where Self: TensorIndexUncheck { 20 | // For MatrixUpper 21 | fn get2d_uncheck(&self, position:[usize;2]) -> Option<&T> {None} 22 | // For ERIFold4 23 | fn get4d_uncheck(&self, position:[usize;4]) -> Option<&T> {None} 24 | } 25 | 26 | pub trait TensorOptMut<'a, T> where Self: TensorIndex { 27 | fn get1d_mut(&mut self, position:usize) -> Option<&mut T> {None} 28 | fn get2d_mut(&mut self, position:[usize;2]) -> Option<&mut T> {None} 29 | fn get3d_mut(&mut self, position:[usize;3]) -> Option<&mut T> {None} 30 | fn get4d_mut(&mut self, position:[usize;4]) -> Option<&mut T> {None} 31 | fn get_mut(&mut self, position:&[usize]) -> Option<&mut T> {None} 32 | 33 | fn set1d(&mut self, position:usize, new_data: T) {} 34 | fn set2d(&mut self, position:[usize;2], new_data: T) {} 35 | fn set3d(&mut self, position:[usize;3], new_data: T) {} 36 | fn set4d(&mut self, position:[usize;4], new_data: T) {} 37 | fn set(&mut self, position:&[usize], new_data: T) {} 38 | } 39 | 40 | pub trait TensorOptMutUncheck<'a, T> where Self: TensorIndexUncheck { 41 | // For MatrixUpperMut 42 | fn get2d_mut_uncheck(&mut self, position:[usize;2]) -> Option<&mut T> {None} 43 | // For ERIFold4 44 | fn get4d_mut_uncheck(&mut self, position:[usize;4]) -> Option<&mut T> {None} 45 | // For MatrixUpperMut 46 | fn set2d_uncheck(&mut self, position:[usize;2], new_data: T) {} 47 | // For ERIFold4 48 | fn set4d_uncheck(&mut self, position:[usize;4], new_data: T) {} 49 | } 50 | 51 | pub trait TensorSlice where Self: TensorIndex+TensorOpt { 52 | fn get1d_slice(&self, position:usize, length: usize) -> Option<&[T]> {None} 53 | fn get2d_slice(&self, position:[usize;2], length: usize) -> Option<&[T]> {None} 54 | fn get3d_slice(&self, position:[usize;3], length: usize) -> Option<&[T]> {None} 55 | fn get4d_slice(&self, position:[usize;4], length: usize) -> Option<&[T]> {None} 56 | fn get_slice(&self, position:&[usize], length: usize) -> Option<&[T]> {None} 57 | } 58 | 59 | pub trait TensorSliceUncheck where Self: TensorIndexUncheck+TensorOptUncheck { 60 | // For MatrixUpper 61 | fn get2d_slice_uncheck(&self, position:[usize;2],length: usize) -> Option<&[T]> {None} 62 | // For ERIFold4 63 | fn get4d_slice_uncheck(&self, position:[usize;4],length: usize) -> Option<&[T]> {None} 64 | } 65 | pub trait TensorSliceMut<'a, T> where Self: TensorIndex+TensorOptMut<'a,T> { 66 | fn get1d_slice_mut(&mut self, position:usize, length: usize) -> Option<&mut [T]> {None} 67 | fn get2d_slice_mut(&mut self, position:[usize;2], length: usize) -> Option<&mut [T]> {None} 68 | fn get3d_slice_mut(&mut self, position:[usize;3], length: usize) -> Option<&mut [T]> {None} 69 | fn get4d_slice_mut(&mut self, position:[usize;4], length: usize) -> Option<&mut [T]> {None} 70 | fn get_slice_mut(&mut self, position:&[usize], length: usize) -> Option<&mut [T]> {None} 71 | } 72 | 73 | /// Define for upper-formated tensors specifically, like 74 | /// ERIFold4 with the indice of `[i,j,k,l]`, and 75 | /// MatrixUpper with the indice of `[i,j]`. 76 | /// According to the upper-formated tensor definition, in principle, i= where Self: TensorIndexUncheck+TensorOptMutUncheck<'a,T> { 80 | // For MatrixUpperMut 81 | fn get2d_slice_mut_uncheck(&mut self, position:[usize;2], length: usize) -> Option<&mut [T]> {None} 82 | // For ERIFold4 83 | fn get4d_slice_mut_uncheck(&mut self, position:[usize;4], length: usize) -> Option<&mut [T]> {None} 84 | } 85 | 86 | 87 | /// Implementation of the traits for specific tensor structures 88 | impl TensorOpt for ERIFull { 89 | #[inline] 90 | fn get1d(&self, position:usize) -> Option<&T> { 91 | self.data.get(position) 92 | } 93 | #[inline] 94 | fn get4d(&self, position:[usize;4]) -> Option<&T> { 95 | self.data.get(self.index4d(position).unwrap()) 96 | } 97 | #[inline] 98 | fn get(&self, position:&[usize]) -> Option<&T> { 99 | let tp = [position[0],position[1],position[2],position[3]]; 100 | self.data.get(self.index4d(tp).unwrap()) 101 | } 102 | } 103 | impl<'a, T> TensorOptMut<'a, T> for ERIFull { 104 | #[inline] 105 | fn get1d_mut(&mut self, position:usize) -> Option<&mut T> { 106 | self.data.get_mut(position) 107 | } 108 | #[inline] 109 | fn get4d_mut(&mut self, position:[usize;4]) -> Option<&mut T> { 110 | let tp = self.index4d(position).unwrap(); 111 | self.data.get_mut(tp) 112 | } 113 | #[inline] 114 | fn get_mut(&mut self, position:&[usize]) -> Option<&mut T> { 115 | let tp = self.index4d([position[0],position[1],position[2],position[3]]).unwrap(); 116 | self.data.get_mut(tp) 117 | } 118 | #[inline] 119 | fn set1d(&mut self, position:usize, new_data: T) { 120 | if let Some(tmp_value) = self.data.get_mut(position) { 121 | *tmp_value = new_data 122 | } else { 123 | panic!("Error in setting the tensor element located at the position of {:?}", position); 124 | }; 125 | } 126 | #[inline] 127 | fn set4d(&mut self, position:[usize;4], new_data: T) { 128 | let tp = self.index4d(position).unwrap(); 129 | if let Some(tmp_value) = self.data.get_mut(tp) { 130 | *tmp_value = new_data 131 | } else { 132 | panic!("Error in setting the tensor element located at the position of {:?}", position); 133 | }; 134 | } 135 | #[inline] 136 | fn set(&mut self, position:&[usize], new_data: T) { 137 | //let tp = self.index4d([position[0],position[1],position[2],position[3]]); 138 | self.set4d([position[0],position[1],position[2],position[3]], new_data); 139 | 140 | } 141 | } 142 | 143 | impl TensorSlice for ERIFull { 144 | #[inline] 145 | fn get1d_slice(&self, position:usize, length: usize) -> Option<&[T]> { 146 | Some(&self.data[position..position+length]) 147 | } 148 | #[inline] 149 | fn get4d_slice(&self, position:[usize;4], length: usize) -> Option<&[T]> { 150 | let tp = self.index4d(position).unwrap(); 151 | Some(&self.data[tp..tp+length]) 152 | } 153 | #[inline] 154 | fn get_slice(&self, position:&[usize], length: usize) -> Option<&[T]> { 155 | self.get4d_slice([position[0],position[1],position[2],position[3]], length) 156 | } 157 | } 158 | impl<'a, T> TensorSliceMut<'a,T> for ERIFull { 159 | #[inline] 160 | fn get1d_slice_mut(&mut self, position:usize, length: usize) -> Option<&mut [T]> { 161 | Some(&mut self.data[position..position+length]) 162 | } 163 | #[inline] 164 | fn get4d_slice_mut(&mut self, position:[usize;4], length: usize) -> Option<&mut [T]> { 165 | let tp = self.index4d(position).unwrap(); 166 | Some(&mut self.data[tp..tp+length]) 167 | } 168 | #[inline] 169 | fn get_slice_mut(&mut self, position:&[usize], length: usize) -> Option<&mut [T]> { 170 | self.get4d_slice_mut([position[0],position[1],position[2],position[3]], length) 171 | } 172 | } 173 | 174 | /// For ERIFold4 175 | impl TensorOpt for ERIFold4 { 176 | #[inline] 177 | fn get1d(&self, position:usize) -> Option<&T> { 178 | self.data.get(position) 179 | } 180 | #[inline] 181 | fn get2d(&self, position:[usize;2]) -> Option<&T> { 182 | self.data.get(self.index2d(position).unwrap()) 183 | } 184 | #[inline] 185 | fn get4d(&self, position:[usize;4]) -> Option<&T> { 186 | self.data.get(self.index4d(position).unwrap()) 187 | } 188 | #[inline] 189 | fn get(&self, position:&[usize]) -> Option<&T> { 190 | let tp = [position[0],position[1],position[2],position[3]]; 191 | self.data.get(self.index4d(tp).unwrap()) 192 | } 193 | 194 | fn get3d(&self, position:[usize;3]) -> Option<&T> {None} 195 | } 196 | impl TensorOptUncheck for ERIFold4 { 197 | #[inline] 198 | fn get4d_uncheck(&self, position:[usize;4]) -> Option<&T> { 199 | self.data.get(self.index4d_uncheck(position).unwrap()) 200 | } 201 | } 202 | impl<'a, T> TensorOptMut<'a, T> for ERIFold4 { 203 | #[inline] 204 | fn get1d_mut(&mut self, position:usize) -> Option<&mut T> { 205 | self.data.get_mut(position) 206 | } 207 | #[inline] 208 | fn get2d_mut(&mut self, position:[usize;2]) -> Option<&mut T> { 209 | let tp = self.index2d(position).unwrap(); 210 | self.data.get_mut(tp) 211 | } 212 | #[inline] 213 | fn get4d_mut(&mut self, position:[usize;4]) -> Option<&mut T> { 214 | let tp = self.index4d(position).unwrap(); 215 | self.data.get_mut(tp) 216 | } 217 | #[inline] 218 | fn get_mut(&mut self, position:&[usize]) -> Option<&mut T> { 219 | let tp = self.index4d([position[0],position[1],position[2],position[3]]).unwrap(); 220 | self.data.get_mut(tp) 221 | } 222 | #[inline] 223 | fn set1d(&mut self, position:usize, new_data: T) { 224 | if let Some(tmp_value) = self.data.get_mut(position) { 225 | *tmp_value = new_data 226 | } else { 227 | panic!("Error in setting the tensor element located at the position of {:?}", position); 228 | }; 229 | } 230 | #[inline] 231 | fn set2d(&mut self, position:[usize;2], new_data: T) { 232 | let tp = self.index2d(position).unwrap(); 233 | if let Some(tmp_value) = self.data.get_mut(tp) { 234 | *tmp_value = new_data 235 | } else { 236 | panic!("Error in setting the tensor element located at the position of {:?}", position); 237 | }; 238 | } 239 | #[inline] 240 | fn set4d(&mut self, position:[usize;4], new_data: T) { 241 | let tp = self.index4d(position).unwrap(); 242 | if let Some(tmp_value) = self.data.get_mut(tp) { 243 | *tmp_value = new_data 244 | } else { 245 | panic!("Error in setting the tensor element located at the position of {:?}", position); 246 | }; 247 | } 248 | #[inline] 249 | fn set(&mut self, position:&[usize], new_data: T) { 250 | self.set4d([position[0],position[1],position[2],position[3]], new_data); 251 | 252 | } 253 | } 254 | impl<'a, T> TensorOptMutUncheck<'a, T> for ERIFold4 { 255 | #[inline] 256 | fn set4d_uncheck(&mut self, position:[usize;4], new_data: T) { 257 | let tp = self.index4d_uncheck(position).unwrap(); 258 | if let Some(tmp_value) = self.data.get_mut(tp) { 259 | *tmp_value = new_data 260 | } else { 261 | panic!("Error in setting the tensor element located at the position of {:?}", position); 262 | }; 263 | } 264 | #[inline] 265 | fn get4d_mut_uncheck(&mut self, position:[usize;4]) -> Option<&mut T> { 266 | let tp = self.index4d_uncheck(position).unwrap(); 267 | self.data.get_mut(tp) 268 | } 269 | } 270 | 271 | impl TensorSlice for ERIFold4 { 272 | #[inline] 273 | fn get1d_slice(&self, position:usize, length: usize) -> Option<&[T]> { 274 | Some(&self.data[position..position+length]) 275 | } 276 | #[inline] 277 | fn get2d_slice(&self, position:[usize;2], length: usize) -> Option<&[T]> { 278 | let tp = self.index2d(position).unwrap(); 279 | Some(&self.data[tp..tp+length]) 280 | } 281 | #[inline] 282 | fn get4d_slice(&self, position:[usize;4], length: usize) -> Option<&[T]> { 283 | let tp = self.index4d(position).unwrap(); 284 | Some(&self.data[tp..tp+length]) 285 | } 286 | #[inline] 287 | fn get_slice(&self, position:&[usize], length: usize) -> Option<&[T]> { 288 | self.get4d_slice([position[0],position[1],position[2],position[3]], length) 289 | } 290 | } 291 | 292 | impl TensorSliceUncheck for ERIFold4 { 293 | #[inline] 294 | fn get4d_slice_uncheck(&self, position:[usize;4], length: usize) -> Option<&[T]> { 295 | let tp = self.index4d_uncheck(position).unwrap(); 296 | Some(&self.data[tp..tp+length]) 297 | } 298 | } 299 | 300 | impl<'a, T> TensorSliceMut<'a,T> for ERIFold4 { 301 | #[inline] 302 | fn get1d_slice_mut(&mut self, position:usize, length: usize) -> Option<&mut [T]> { 303 | Some(&mut self.data[position..position+length]) 304 | } 305 | #[inline] 306 | fn get2d_slice_mut(&mut self, position:[usize;2], length: usize) -> Option<&mut [T]> { 307 | let tp = self.index2d(position).unwrap(); 308 | Some(&mut self.data[tp..tp+length]) 309 | } 310 | #[inline] 311 | fn get4d_slice_mut(&mut self, position:[usize;4], length: usize) -> Option<&mut [T]> { 312 | let tp = self.index4d(position).unwrap(); 313 | Some(&mut self.data[tp..tp+length]) 314 | } 315 | #[inline] 316 | fn get_slice_mut(&mut self, position:&[usize], length: usize) -> Option<&mut [T]> { 317 | self.get4d_slice_mut([position[0],position[1],position[2],position[3]], length) 318 | } 319 | } 320 | 321 | impl<'a, T> TensorSliceMutUncheck<'a,T> for ERIFold4 { 322 | #[inline] 323 | fn get4d_slice_mut_uncheck(&mut self, position:[usize;4], length: usize) -> Option<&mut [T]> { 324 | let tp = self.index4d_uncheck(position).unwrap(); 325 | Some(&mut self.data[tp..tp+length]) 326 | } 327 | } 328 | 329 | // Trait implementations for MatrixFull 330 | impl<'a,T> TensorOpt for MatrixFull { 331 | #[inline] 332 | fn get1d(&self, position:usize) -> Option<&T> { 333 | self.data.get(position) 334 | } 335 | #[inline] 336 | fn get2d(&self, position:[usize;2]) -> Option<&T> { 337 | self.data.get(self.index2d(position).unwrap()) 338 | } 339 | #[inline] 340 | fn get(&self, position:&[usize]) -> Option<&T> { 341 | let tp = [position[0],position[1]]; 342 | self.data.get(self.index2d(tp).unwrap()) 343 | } 344 | } 345 | impl<'a, T> TensorSlice for MatrixFull { 346 | #[inline] 347 | fn get1d_slice(&self, position:usize, length: usize) -> Option<&[T]> { 348 | Some(&self.data[position..position+length]) 349 | } 350 | #[inline] 351 | fn get2d_slice(&self, position:[usize;2], length: usize) -> Option<&[T]> { 352 | let tp = self.index2d(position).unwrap(); 353 | Some(&self.data[tp..tp+length]) 354 | } 355 | #[inline] 356 | fn get_slice(&self, position:&[usize], length: usize) -> Option<&[T]> { 357 | self.get2d_slice([position[0],position[1]], length) 358 | } 359 | } 360 | 361 | impl<'a, T> TensorOptMut<'a, T> for MatrixFull { 362 | #[inline] 363 | fn get1d_mut(&mut self, position:usize) -> Option<&mut T> { 364 | self.data.get_mut(position) 365 | } 366 | #[inline] 367 | fn get2d_mut(&mut self, position:[usize;2]) -> Option<&mut T> { 368 | let tp = self.index2d(position).unwrap(); 369 | self.data.get_mut(tp) 370 | } 371 | #[inline] 372 | fn get_mut(&mut self, position:&[usize]) -> Option<&mut T> { 373 | let tp = self.index2d([position[0],position[1]]).unwrap(); 374 | self.data.get_mut(tp) 375 | } 376 | #[inline] 377 | fn set1d(&mut self, position:usize, new_data: T) { 378 | if let Some(tmp_value) = self.data.get_mut(position) { 379 | *tmp_value = new_data 380 | } else { 381 | panic!("Error in setting the tensor element located at the position of {:?}", position); 382 | }; 383 | } 384 | #[inline] 385 | fn set2d(&mut self, position:[usize;2], new_data: T) { 386 | let tp = self.index2d(position).unwrap(); 387 | if let Some(tmp_value) = self.data.get_mut(tp) { 388 | *tmp_value = new_data 389 | } else { 390 | panic!("Error in setting the tensor element located at the position of {:?}", position); 391 | }; 392 | } 393 | #[inline] 394 | fn set(&mut self, position:&[usize], new_data: T) { 395 | //let tp = self.index4d([position[0],position[1],position[2],position[3]]); 396 | self.set2d([position[0],position[1]], new_data); 397 | 398 | } 399 | 400 | fn get3d_mut(&mut self, position:[usize;3]) -> Option<&mut T> {None} 401 | 402 | fn get4d_mut(&mut self, position:[usize;4]) -> Option<&mut T> {None} 403 | 404 | fn set3d(&mut self, position:[usize;3], new_data: T) {} 405 | 406 | fn set4d(&mut self, position:[usize;4], new_data: T) {} 407 | } 408 | impl<'a, T> TensorSliceMut<'a, T> for MatrixFull { 409 | #[inline] 410 | fn get1d_slice_mut(&mut self, position:usize, length: usize) -> Option<&mut [T]> { 411 | Some(&mut self.data[position..position+length]) 412 | } 413 | #[inline] 414 | fn get2d_slice_mut(&mut self, position:[usize;2], length: usize) -> Option<&mut [T]> { 415 | let tp = self.index2d(position).unwrap(); 416 | Some(&mut self.data[tp..tp+length]) 417 | } 418 | #[inline] 419 | fn get_slice_mut(&mut self, position:&[usize], length: usize) -> Option<&mut [T]> { 420 | self.get2d_slice_mut([position[0],position[1]], length) 421 | } 422 | } 423 | 424 | // Trait implementations for MatrixFullSliceMut 425 | impl<'a, T> TensorOptMut<'a, T> for MatrixFullSliceMut<'a,T> { 426 | #[inline] 427 | fn get1d_mut(&mut self, position:usize) -> Option<&mut T> { 428 | self.data.get_mut(position) 429 | } 430 | #[inline] 431 | fn get2d_mut(&mut self, position:[usize;2]) -> Option<&mut T> { 432 | let tp = self.index2d(position).unwrap(); 433 | self.data.get_mut(tp) 434 | } 435 | #[inline] 436 | fn get_mut(&mut self, position:&[usize]) -> Option<&mut T> { 437 | let tp = self.index2d([position[0],position[1]]).unwrap(); 438 | self.data.get_mut(tp) 439 | } 440 | #[inline] 441 | fn set1d(&mut self, position:usize, new_data: T) { 442 | if let Some(tmp_value) = self.data.get_mut(position) { 443 | *tmp_value = new_data 444 | } else { 445 | panic!("Error in setting the tensor element located at the position of {:?}", position); 446 | }; 447 | } 448 | #[inline] 449 | fn set2d(&mut self, position:[usize;2], new_data: T) { 450 | let tp = self.index2d(position).unwrap(); 451 | if let Some(tmp_value) = self.data.get_mut(tp) { 452 | *tmp_value = new_data 453 | } else { 454 | panic!("Error in setting the tensor element located at the position of {:?}", position); 455 | }; 456 | } 457 | #[inline] 458 | fn set(&mut self, position:&[usize], new_data: T) { 459 | //let tp = self.index4d([position[0],position[1],position[2],position[3]]); 460 | self.set2d([position[0],position[1]], new_data); 461 | 462 | } 463 | } 464 | impl<'a, T> TensorSliceMut<'a, T> for MatrixFullSliceMut<'a, T> { 465 | #[inline] 466 | fn get1d_slice_mut(&mut self, position:usize, length: usize) -> Option<&mut [T]> { 467 | Some(&mut self.data[position..position+length]) 468 | } 469 | #[inline] 470 | fn get2d_slice_mut(&mut self, position:[usize;2], length: usize) -> Option<&mut [T]> { 471 | let tp = self.index2d(position).unwrap(); 472 | Some(&mut self.data[tp..tp+length]) 473 | } 474 | #[inline] 475 | fn get_slice_mut(&mut self, position:&[usize], length: usize) -> Option<&mut [T]> { 476 | self.get2d_slice_mut([position[0],position[1]], length) 477 | } 478 | } 479 | 480 | //Trait implementations for MatrixFullSlice 481 | impl<'a,T> TensorOpt for MatrixFullSlice<'a,T> { 482 | #[inline] 483 | fn get1d(&self, position:usize) -> Option<&T> { 484 | self.data.get(position) 485 | } 486 | #[inline] 487 | fn get2d(&self, position:[usize;2]) -> Option<&T> { 488 | self.data.get(self.index2d(position).unwrap()) 489 | } 490 | #[inline] 491 | fn get(&self, position:&[usize]) -> Option<&T> { 492 | let tp = [position[0],position[1]]; 493 | self.data.get(self.index2d(tp).unwrap()) 494 | } 495 | } 496 | impl<'a, T> TensorSlice for MatrixFullSlice<'a, T> { 497 | #[inline] 498 | fn get1d_slice(&self, position:usize, length: usize) -> Option<&[T]> { 499 | Some(&self.data[position..position+length]) 500 | } 501 | #[inline] 502 | fn get2d_slice(&self, position:[usize;2], length: usize) -> Option<&[T]> { 503 | let tp = self.index2d(position).unwrap(); 504 | Some(&self.data[tp..tp+length]) 505 | } 506 | #[inline] 507 | fn get_slice(&self, position:&[usize], length: usize) -> Option<&[T]> { 508 | self.get2d_slice([position[0],position[1]], length) 509 | } 510 | } 511 | 512 | //Trait implementations for MatrixUpperSliceMut 513 | impl<'a, T> TensorOptMut<'a, T> for MatrixUpperSliceMut<'a,T> { 514 | #[inline] 515 | fn get1d_mut(&mut self, position:usize) -> Option<&mut T> { 516 | self.data.get_mut(position) 517 | } 518 | #[inline] 519 | fn get2d_mut(&mut self, position:[usize;2]) -> Option<&mut T> { 520 | let tp = self.index2d(position).unwrap(); 521 | self.data.get_mut(tp) 522 | } 523 | #[inline] 524 | fn get_mut(&mut self, position:&[usize]) -> Option<&mut T> { 525 | let tp = self.index2d([position[0],position[1]]).unwrap(); 526 | self.data.get_mut(tp) 527 | } 528 | #[inline] 529 | fn set1d(&mut self, position:usize, new_data: T) { 530 | if let Some(tmp_value) = self.data.get_mut(position) { 531 | *tmp_value = new_data 532 | } else { 533 | panic!("Error in setting the tensor element located at the position of {:?}", position); 534 | }; 535 | } 536 | #[inline] 537 | fn set2d(&mut self, position:[usize;2], new_data: T) { 538 | let tp = self.index2d(position).unwrap(); 539 | if let Some(tmp_value) = self.data.get_mut(tp) { 540 | *tmp_value = new_data 541 | } else { 542 | panic!("Error in setting the tensor element located at the position of {:?}", position); 543 | }; 544 | } 545 | #[inline] 546 | fn set(&mut self, position:&[usize], new_data: T) { 547 | //let tp = self.index4d([position[0],position[1],position[2],position[3]]); 548 | self.set2d([position[0],position[1]], new_data); 549 | 550 | } 551 | } 552 | impl<'a, T> TensorOptMutUncheck<'a, T> for MatrixUpperSliceMut<'a,T> { 553 | #[inline] 554 | fn get2d_mut_uncheck(&mut self, position:[usize;2]) -> Option<&mut T> { 555 | let tp = self.index2d_uncheck(position).unwrap(); 556 | self.data.get_mut(tp) 557 | } 558 | #[inline] 559 | fn set2d_uncheck(&mut self, position:[usize;2], new_data: T) { 560 | let tp = self.index2d_uncheck(position).unwrap(); 561 | if let Some(tmp_value) = self.data.get_mut(tp) { 562 | *tmp_value = new_data 563 | } else { 564 | panic!("Error in setting the tensor element located at the position of {:?}", position); 565 | }; 566 | } 567 | } 568 | impl<'a, T> TensorSliceMut<'a, T> for MatrixUpperSliceMut<'a, T> { 569 | #[inline] 570 | fn get1d_slice_mut(&mut self, position:usize, length: usize) -> Option<&mut [T]> { 571 | Some(&mut self.data[position..position+length]) 572 | } 573 | #[inline] 574 | fn get2d_slice_mut(&mut self, position:[usize;2], length: usize) -> Option<&mut [T]> { 575 | let tp = self.index2d(position).unwrap(); 576 | Some(&mut self.data[tp..tp+length]) 577 | } 578 | #[inline] 579 | fn get_slice_mut(&mut self, position:&[usize], length: usize) -> Option<&mut [T]> { 580 | self.get2d_slice_mut([position[0],position[1]], length) 581 | } 582 | } 583 | impl<'a, T> TensorSliceMutUncheck<'a, T> for MatrixUpperSliceMut<'a, T> { 584 | #[inline] 585 | fn get2d_slice_mut_uncheck(&mut self, position:[usize;2], length: usize) -> Option<&mut [T]> { 586 | let tp = self.index2d_uncheck(position).unwrap(); 587 | Some(&mut self.data[tp..tp+length]) 588 | } 589 | } 590 | 591 | //Trait implementations for MatrixUpperSlice 592 | impl<'a,T> TensorOpt for MatrixUpperSlice<'a,T> { 593 | #[inline] 594 | fn get1d(&self, position:usize) -> Option<&T> { 595 | self.data.get(position) 596 | } 597 | #[inline] 598 | fn get2d(&self, position:[usize;2]) -> Option<&T> { 599 | self.data.get(self.index2d(position).unwrap()) 600 | } 601 | #[inline] 602 | fn get(&self, position:&[usize]) -> Option<&T> { 603 | let tp = [position[0],position[1]]; 604 | self.data.get(self.index2d(tp).unwrap()) 605 | } 606 | } 607 | impl<'a,T> TensorOptUncheck for MatrixUpperSlice<'a,T> { 608 | #[inline] 609 | fn get2d_uncheck(&self, position:[usize;2]) -> Option<&T> { 610 | self.data.get(self.index2d_uncheck(position).unwrap()) 611 | } 612 | } 613 | impl<'a, T> TensorSlice for MatrixUpperSlice<'a, T> { 614 | #[inline] 615 | fn get1d_slice(&self, position:usize, length: usize) -> Option<&[T]> { 616 | Some(&self.data[position..position+length]) 617 | } 618 | #[inline] 619 | fn get2d_slice(&self, position:[usize;2], length: usize) -> Option<&[T]> { 620 | let tp = self.index2d(position).unwrap(); 621 | Some(&self.data[tp..tp+length]) 622 | } 623 | #[inline] 624 | fn get_slice(&self, position:&[usize], length: usize) -> Option<&[T]> { 625 | self.get2d_slice([position[0],position[1]], length) 626 | } 627 | } 628 | impl<'a, T> TensorSliceUncheck for MatrixUpperSlice<'a, T> { 629 | #[inline] 630 | fn get2d_slice_uncheck(&self, position:[usize;2], length: usize) -> Option<&[T]> { 631 | let tp = self.index2d_uncheck(position).unwrap(); 632 | Some(&self.data[tp..tp+length]) 633 | } 634 | } 635 | 636 | //Trait implementations for MatrixUpper 637 | impl<'a, T> TensorOptMut<'a, T> for MatrixUpper { 638 | #[inline] 639 | fn get1d_mut(&mut self, position:usize) -> Option<&mut T> { 640 | self.data.get_mut(position) 641 | } 642 | #[inline] 643 | fn get2d_mut(&mut self, position:[usize;2]) -> Option<&mut T> { 644 | let tp = self.index2d(position).unwrap(); 645 | self.data.get_mut(tp) 646 | } 647 | #[inline] 648 | fn get_mut(&mut self, position:&[usize]) -> Option<&mut T> { 649 | let tp = self.index2d([position[0],position[1]]).unwrap(); 650 | self.data.get_mut(tp) 651 | } 652 | #[inline] 653 | fn set1d(&mut self, position:usize, new_data: T) { 654 | if let Some(tmp_value) = self.data.get_mut(position) { 655 | *tmp_value = new_data 656 | } else { 657 | panic!("Error in setting the tensor element located at the position of {:?}", position); 658 | }; 659 | } 660 | #[inline] 661 | fn set2d(&mut self, position:[usize;2], new_data: T) { 662 | let tp = self.index2d(position).unwrap(); 663 | if let Some(tmp_value) = self.data.get_mut(tp) { 664 | *tmp_value = new_data 665 | } else { 666 | panic!("Error in setting the tensor element located at the position of {:?}", position); 667 | }; 668 | } 669 | #[inline] 670 | fn set(&mut self, position:&[usize], new_data: T) { 671 | //let tp = self.index4d([position[0],position[1],position[2],position[3]]); 672 | self.set2d([position[0],position[1]], new_data); 673 | 674 | } 675 | } 676 | impl<'a, T> TensorOptMutUncheck<'a, T> for MatrixUpper { 677 | #[inline] 678 | fn get2d_mut_uncheck(&mut self, position:[usize;2]) -> Option<&mut T> { 679 | let tp = self.index2d_uncheck(position).unwrap(); 680 | self.data.get_mut(tp) 681 | } 682 | #[inline] 683 | fn set2d_uncheck(&mut self, position:[usize;2], new_data: T) { 684 | let tp = self.index2d_uncheck(position).unwrap(); 685 | if let Some(tmp_value) = self.data.get_mut(tp) { 686 | *tmp_value = new_data 687 | } else { 688 | panic!("Error in setting the tensor element located at the position of {:?}", position); 689 | }; 690 | } 691 | } 692 | impl<'a, T> TensorSliceMut<'a, T> for MatrixUpper { 693 | #[inline] 694 | fn get1d_slice_mut(&mut self, position:usize, length: usize) -> Option<&mut [T]> { 695 | Some(&mut self.data[position..position+length]) 696 | } 697 | #[inline] 698 | fn get2d_slice_mut(&mut self, position:[usize;2], length: usize) -> Option<&mut [T]> { 699 | let tp = self.index2d(position).unwrap(); 700 | Some(&mut self.data[tp..tp+length]) 701 | } 702 | #[inline] 703 | fn get_slice_mut(&mut self, position:&[usize], length: usize) -> Option<&mut [T]> { 704 | self.get2d_slice_mut([position[0],position[1]], length) 705 | } 706 | } 707 | impl<'a, T> TensorSliceMutUncheck<'a, T> for MatrixUpper { 708 | #[inline] 709 | fn get2d_slice_mut_uncheck(&mut self, position:[usize;2], length: usize) -> Option<&mut [T]> { 710 | let tp = self.index2d_uncheck(position).unwrap(); 711 | Some(&mut self.data[tp..tp+length]) 712 | } 713 | } 714 | impl TensorOpt for MatrixUpper { 715 | #[inline] 716 | fn get1d(&self, position:usize) -> Option<&T> { 717 | self.data.get(position) 718 | } 719 | #[inline] 720 | fn get2d(&self, position:[usize;2]) -> Option<&T> { 721 | self.data.get(self.index2d(position).unwrap()) 722 | } 723 | #[inline] 724 | fn get(&self, position:&[usize]) -> Option<&T> { 725 | let tp = [position[0],position[1]]; 726 | self.data.get(self.index2d(tp).unwrap()) 727 | } 728 | } 729 | impl TensorOptUncheck for MatrixUpper { 730 | #[inline] 731 | fn get2d_uncheck(&self, position:[usize;2]) -> Option<&T> { 732 | self.data.get(self.index2d_uncheck(position).unwrap()) 733 | } 734 | } 735 | impl TensorSlice for MatrixUpper { 736 | #[inline] 737 | fn get1d_slice(&self, position:usize, length: usize) -> Option<&[T]> { 738 | Some(&self.data[position..position+length]) 739 | } 740 | #[inline] 741 | fn get2d_slice(&self, position:[usize;2], length: usize) -> Option<&[T]> { 742 | let tp = self.index2d(position).unwrap(); 743 | Some(&self.data[tp..tp+length]) 744 | } 745 | #[inline] 746 | fn get_slice(&self, position:&[usize], length: usize) -> Option<&[T]> { 747 | self.get2d_slice([position[0],position[1]], length) 748 | } 749 | } 750 | impl TensorSliceUncheck for MatrixUpper { 751 | #[inline] 752 | fn get2d_slice_uncheck(&self, position:[usize;2], length: usize) -> Option<&[T]> { 753 | let tp = self.index2d_uncheck(position).unwrap(); 754 | Some(&self.data[tp..tp+length]) 755 | } 756 | } 757 | 758 | 759 | //========================================================================== 760 | // Now implement the Index and IndexMut traits for different Structrues 761 | //========================================================================== 762 | 763 | impl Index<[usize;3]> for RIFull { 764 | type Output = T; 765 | #[inline] 766 | fn index(&self, position:[usize;3]) -> &Self::Output { 767 | let tmp_p = position.iter() 768 | .zip(self.indicing.iter()) 769 | .fold(0_usize,|acc, i| {acc + i.0*i.1}); 770 | Index::index(&self.data, tmp_p) 771 | } 772 | } 773 | 774 | impl IndexMut<[usize;3]> for RIFull { 775 | #[inline] 776 | fn index_mut(&mut self, position:[usize;3]) -> &mut Self::Output { 777 | let tmp_p = position.iter() 778 | .zip(self.indicing.iter()) 779 | .fold(0_usize,|acc, i| {acc + i.0*i.1}); 780 | IndexMut::index_mut(&mut self.data, tmp_p) 781 | } 782 | } 783 | 784 | impl> Index for MatrixUpper { 785 | type Output = I::Output; 786 | fn index(&self, index: I) -> &Self::Output { 787 | Index::index(&self.data, index) 788 | } 789 | } 790 | 791 | impl> IndexMut for MatrixUpper { 792 | fn index_mut(&mut self, index: I) -> &mut Self::Output { 793 | IndexMut::index_mut(&mut self.data, index) 794 | } 795 | } 796 | 797 | impl Index<[usize;2]> for ERIFold4 { 798 | type Output = T; 799 | #[inline] 800 | fn index(&self, position:[usize;2]) -> &Self::Output { 801 | let tmp_p = position.iter() 802 | .zip(self.indicing.iter()) 803 | .fold(0_usize,|acc, i| {acc + i.0*i.1}); 804 | Index::index(&self.data, tmp_p) 805 | } 806 | } 807 | 808 | impl IndexMut<[usize;2]> for ERIFold4 { 809 | #[inline] 810 | fn index_mut(&mut self, position:[usize;2]) -> &mut Self::Output { 811 | let tmp_p = position.iter() 812 | .zip(self.indicing.iter()) 813 | .fold(0_usize,|acc, i| {acc + i.0*i.1}); 814 | IndexMut::index_mut(&mut self.data, tmp_p) 815 | } 816 | } --------------------------------------------------------------------------------