├── Include.mk ├── test ├── src │ ├── Derivative.cpp │ ├── main.cpp │ ├── AntiSymRef.cpp │ ├── Valence.cpp │ ├── Identity.cpp │ ├── Symmetric.cpp │ ├── Quat.cpp │ ├── TotallyAntisymmetric.cpp │ ├── Antisymmetric.cpp │ ├── Matrix.cpp │ ├── TotallySymmetric.cpp │ ├── Math.cpp │ ├── Index.cpp │ ├── Vector.cpp │ └── TensorRank4.cpp ├── Makefile ├── buildinfo └── include │ └── Test │ └── Test.h ├── include └── Tensor │ ├── Tensor.h │ ├── Meta.h │ ├── clamp.h │ ├── Inverse.h.h │ ├── Index.h.h │ ├── Range.h.h │ ├── Matrix.h │ ├── Math.h.h │ ├── Derivative.h │ ├── Grid.h │ ├── Range.h │ ├── AntiSymRef.h │ ├── Quat.h │ ├── Valence.h │ ├── Inverse.h │ └── Vector.h.h ├── .github └── FUNDING.yml └── LICENSE /Include.mk: -------------------------------------------------------------------------------- 1 | TENSOR_PATH:=$(dir $(lastword $(MAKEFILE_LIST))) 2 | INCLUDE+=$(TENSOR_PATH)include 3 | -------------------------------------------------------------------------------- /test/src/Derivative.cpp: -------------------------------------------------------------------------------- 1 | #include "Tensor/Tensor.h" 2 | #include "Tensor/Derivative.h" 3 | 4 | void test_Derivative() { 5 | 6 | 7 | } 8 | -------------------------------------------------------------------------------- /test/Makefile: -------------------------------------------------------------------------------- 1 | DIST_FILENAME=test 2 | DIST_TYPE=app 3 | include ../../Common/Base.mk 4 | include ../../Common/Include.mk 5 | include ../Include.mk 6 | -------------------------------------------------------------------------------- /test/buildinfo: -------------------------------------------------------------------------------- 1 | distName='test' 2 | distType='app' 3 | depends = {'../../Common', '..'} 4 | 5 | --[[ 6 | compiler = 'clang++' 7 | linker = 'clang++' 8 | --compileFlags=compileFlags..' -ftemplate-backtrace-limit=0 ' 9 | --]] 10 | -------------------------------------------------------------------------------- /include/Tensor/Tensor.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | // TODO the dif between Tensor.h and Vector.h gets smaller and smaller ... 4 | // but now that I'm putting methods into vec that are dependent on vec ... 5 | // make this the file to include to put things in order 6 | 7 | #include "Tensor/Vector.h" 8 | #include "Tensor/Quat.h" 9 | #include "Tensor/Matrix.h" 10 | #include "Tensor/Valence.h" 11 | -------------------------------------------------------------------------------- /test/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "Test/Test.h" 2 | 3 | int main() { 4 | test_AntiSymRef(); 5 | test_Vector(); 6 | test_Identity(); 7 | test_Symmetric(); 8 | test_Antisymmetric(); 9 | test_Matrix(); 10 | test_TensorRank3(); 11 | test_TensorRank4(); 12 | test_TotallySymmetric(); 13 | test_TotallyAntisymmetric(); 14 | test_Index(); 15 | test_Derivative(); 16 | test_Math(); 17 | test_Quat(); 18 | test_Valence(); 19 | } 20 | -------------------------------------------------------------------------------- /include/Tensor/Meta.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Common/Meta.h" 4 | 5 | namespace Tensor { 6 | 7 | /* 8 | Detects if a class is a "tensor". 9 | These include vec sym asym and subclasses (like quat). 10 | It's defined in the class in the TENSOR_HEADER, as a static constexpr field. 11 | 12 | TODO should this decay_t T or should I rely on the invoker to is_tensor_v> ? 13 | */ 14 | template 15 | concept is_tensor_v = T::isTensorFlag; 16 | 17 | } 18 | -------------------------------------------------------------------------------- /include/Tensor/clamp.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Tensor/Tensor.h" 4 | #include //min, max 5 | 6 | namespace Tensor { 7 | 8 | template 9 | T clamp(T x, T xmin, T xmax) { 10 | if constexpr(Tensor::is_tensor_v){ 11 | return T([&](typename T::intN i) -> typename T::Scalar { 12 | return clamp(x(i), xmin(i), xmax(i)); 13 | }); 14 | } else { 15 | return std::min(xmax, std::max(xmin, x)); 16 | } 17 | } 18 | 19 | } 20 | -------------------------------------------------------------------------------- /include/Tensor/Inverse.h.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Tensor/Meta.h" //is_tensor_v 4 | 5 | namespace Tensor { 6 | 7 | // these are in Inverse.h: 8 | 9 | template 10 | requires is_tensor_v 11 | typename T::Scalar determinant(T const & a); 12 | 13 | 14 | template 15 | requires is_tensor_v 16 | T inverse(T const & a, typename T::Scalar const & det); 17 | 18 | template 19 | requires is_tensor_v 20 | T inverse(T const & a); 21 | 22 | } 23 | -------------------------------------------------------------------------------- /include/Tensor/Index.h.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Tensor/Meta.h" //is_tensor_v 4 | 5 | namespace Tensor { 6 | 7 | //index-access classes 8 | struct IndexBase; 9 | 10 | template 11 | struct Index; 12 | 13 | //forward-declare for tensors's operator() used for index-notation 14 | template 15 | requires is_tensor_v 16 | struct IndexAccess; 17 | 18 | //forward-declare cuz operator() IndexAccess needs to know if its making a scalar or tensor 19 | template 20 | struct IndexAccessDetails; 21 | 22 | } 23 | -------------------------------------------------------------------------------- /include/Tensor/Range.h.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | namespace Tensor { 4 | 5 | template< 6 | int rankFirst, 7 | int rankLast, 8 | int rankStep, 9 | int rank, 10 | typename Owner 11 | > 12 | struct RangeIterator; 13 | // this one is in Tensor/Range.h 14 | 15 | template< 16 | int rank, 17 | typename Owner 18 | > using RangeIteratorInner = RangeIterator<0, rank-1, 1, rank, Owner>; 19 | 20 | template< 21 | int rank, 22 | typename Owner 23 | > using RangeIteratorOuter = RangeIterator; 24 | 25 | template< 26 | int rank, 27 | bool innerFirst, 28 | typename Owner 29 | > using RangeIteratorInnerVsOuter = std::conditional_t< 30 | innerFirst, 31 | RangeIteratorInner, 32 | RangeIteratorOuter 33 | >; 34 | 35 | template 36 | struct RangeObj; 37 | 38 | } 39 | -------------------------------------------------------------------------------- /test/src/AntiSymRef.cpp: -------------------------------------------------------------------------------- 1 | #include "Tensor/AntiSymRef.h" 2 | #include "Common/Test.h" 3 | 4 | static_assert(Common::is_instance_v, Tensor::AntiSymRef>); 5 | static_assert(Common::is_instance_v>, Tensor::AntiSymRef>); 6 | 7 | void test_AntiSymRef() { 8 | float f; 9 | 10 | f = 123; 11 | auto n = Tensor::AntiSymRef(f, Tensor::Sign::NEGATIVE); 12 | TEST_EQ(f, 123); 13 | TEST_EQ(n, -123); 14 | n = 456; 15 | TEST_EQ(n, 456); 16 | TEST_EQ(f, -456); 17 | 18 | f = 123; 19 | auto p = Tensor::AntiSymRef(f, Tensor::Sign::POSITIVE); 20 | TEST_EQ(f, 123); 21 | TEST_EQ(p, 123); 22 | p = 456; 23 | TEST_EQ(p, 456); 24 | TEST_EQ(f, 456); 25 | 26 | //auto p = Tensor::AntiSymRef(f, Tensor::AntiSymRef::POSITIVE); 27 | auto z = Tensor::AntiSymRef(); 28 | TEST_EQ(z, 0); 29 | z = 1; 30 | TEST_EQ(z, 0); 31 | } 32 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: thenumbernine # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 12 | polar: # Replace with a single Polar username 13 | buy_me_a_coffee: thenumbernine # Replace with a single Buy Me a Coffee username 14 | thanks_dev: # Replace with a single thanks.dev username 15 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2022 Christopher E. Moore ( christopher.e.moore@gmail.com / https://github.com/thenumbernine/Tensor ) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /test/src/Valence.cpp: -------------------------------------------------------------------------------- 1 | #include "Test/Test.h" 2 | #include "Tensor/Valence.h" 3 | 4 | // static assert failure ... from creating the template argument 5 | // if there was a static_assert failure within the body of the type_identity dependent on the template parameter ... that's deferred ... 6 | 7 | void test_Valence() { 8 | using real = double; 9 | using namespace Tensor; 10 | 11 | /* 12 | TODO valence indicators ... 13 | right now I have 'u' vs 'd' 14 | but it'd be nice to specify 15 | */ 16 | 17 | // a^i_j 18 | /* // constructor with valence as child class of tensor: 19 | auto a = valence< 20 | tensor, 21 | 'u', 'd' 22 | >{{1,2,3},{4,5,6},{7,8,9}}; 23 | */ 24 | // constructor for valence as wrapper class of tensor: 25 | auto a = valence<'u', 'd'>( 26 | tensor{{1,2,3},{4,5,6},{7,8,9}} 27 | ); 28 | ECHO(a); 29 | 30 | // g_ij 31 | auto g = valence<'d', 'd'>( 32 | tensor{{-1,0,0},{0,1,0},{0,0,1}} 33 | ); 34 | ECHO(g); 35 | 36 | // g^ij 37 | // if I make the valence wrapper a subclass of the tensor then I still have to explicit-cast this 38 | //auto tmp = inverse((tensor)g); 39 | // so maybe I should just use a member, and use -> and * with it? 40 | auto tmp = inverse(*g); 41 | auto gU = valence<'u', 'u'>( 42 | tensor( 43 | [&](int i, int j) -> real { return tmp(i,j); } 44 | ) 45 | ); 46 | ECHO(gU); 47 | 48 | #if 0 49 | // static_assert failure of a * g; 50 | auto aerr = a * g; 51 | #endif 52 | 53 | // a_ij 54 | auto aL = g * a; 55 | auto aLcheck = (valence<'d', 'd'>(tensor{{-1,-2,-3},{4,5,6},{7,8,9}})); 56 | TEST_EQ(aL, aLcheck); 57 | 58 | #if 0 59 | // static_assert failure of gU * a 60 | auto aerr = gU * a; 61 | #endif 62 | 63 | // a^ij 64 | auto aU = a * gU; 65 | TEST_EQ(aU, (valence<'u', 'u'>(tensor{{-1,2,3},{-4,5,6},{-7,8,9}}))); 66 | 67 | #if 0 68 | //static assert failure - valence mismatch 69 | auto bL = aL + aU; 70 | ECHO(bL) 71 | #endif 72 | } 73 | -------------------------------------------------------------------------------- /test/src/Identity.cpp: -------------------------------------------------------------------------------- 1 | #include "Test/Test.h" 2 | 3 | void test_Identity() { 4 | using float3i3 = Tensor::float3i3; 5 | 6 | { 7 | auto I = float3i3(1); 8 | // ident == ident works 9 | TEST_EQ(I, float3i3(1)); 10 | // ident == matrix works 11 | TEST_EQ(I, (Tensor::float3x3{ 12 | {1,0,0}, 13 | {0,1,0}, 14 | {0,0,1}, 15 | })); 16 | // ident == sym works 17 | TEST_EQ(I, (Tensor::float3s3)(Tensor::float3x3{ 18 | {1,0,0}, 19 | {0,1,0}, 20 | {0,0,1}, 21 | })); 22 | 23 | // ident != ident works 24 | TEST_NE(I, float3i3(2)); 25 | // ident != matrix works 26 | // off-diagonal 27 | TEST_NE(I, (Tensor::float3x3{ 28 | {1,2,3}, 29 | {0,1,0}, 30 | {0,0,1}, 31 | })); 32 | // on-diagonal 33 | TEST_NE(I, (Tensor::float3x3{ 34 | {1,0,0}, 35 | {0,2,0}, 36 | {0,0,3}, 37 | })); 38 | // ident != sym works 39 | // off-diagonal 40 | TEST_NE(I, (Tensor::float3s3)(Tensor::float3x3{ 41 | {1,2,3}, 42 | {0,1,0}, 43 | {0,0,1}, 44 | })); 45 | // on-diagonal 46 | TEST_NE(I, (Tensor::float3s3)(Tensor::float3x3{ 47 | {1,0,0}, 48 | {0,2,0}, 49 | {0,0,3}, 50 | })); 51 | // ident != asym works 52 | TEST_NE(I, Tensor::float3a3()); 53 | TEST_NE(I, Tensor::float3a3(1,2,3)); 54 | } 55 | 56 | // ident + scalar => sym 57 | { 58 | auto I = float3i3(1); 59 | auto Iplus1 = I + 1; 60 | static_assert(std::is_same_v); 61 | TEST_EQ(Iplus1, (Tensor::float3x3{ 62 | {2,1,1}, 63 | {1,2,1}, 64 | {1,1,2}, 65 | })); 66 | } 67 | { 68 | auto I = float3i3(1); 69 | auto Iplus1 = 1 + I; 70 | static_assert(std::is_same_v); 71 | TEST_EQ(Iplus1, (Tensor::float3x3{ 72 | {2,1,1}, 73 | {1,2,1}, 74 | {1,1,2}, 75 | })); 76 | } 77 | 78 | // TODO should this be ident? or maybe TensorRank4? or maybe a separate test for correct operator results? 79 | using float3z3 = Tensor::tensori, Tensor::storage_zero<3>>; 80 | 81 | // zero + zero = zero 82 | { 83 | auto Z = float3z3(); 84 | auto Z2 = float3z3(); 85 | auto R = Z + Z2; 86 | static_assert(std::is_same_v); 87 | TEST_EQ(R, (Tensor::float3x3{{0,0,0},{0,0,0},{0,0,0}})); 88 | } 89 | 90 | // ident + zero = ident 91 | { 92 | auto I = float3i3(1); 93 | auto Z = float3z3(); 94 | auto R = I + Z; 95 | static_assert(std::is_same_v); 96 | TEST_EQ(R, (Tensor::float3x3{{1,0,0},{0,1,0},{0,0,1}})); 97 | } 98 | 99 | // ident + ident = ident 100 | { 101 | auto I1 = float3i3(1); 102 | auto I2 = float3i3(2); 103 | auto R = I1 + I2; 104 | static_assert(std::is_same_v); 105 | TEST_EQ(R, (Tensor::float3x3{{3,0,0},{0,3,0},{0,0,3}})); 106 | } 107 | 108 | // sym + ident => ident 109 | { 110 | auto I = float3i3(1); 111 | auto S = Tensor::float3s3(2); 112 | auto R = I + S; 113 | static_assert(std::is_same_v); 114 | } 115 | { 116 | auto I = float3i3(1); 117 | auto S = Tensor::float3s3(2); 118 | auto R = S + I; 119 | static_assert(std::is_same_v); 120 | } 121 | 122 | // ident-ident + ident-ident = ident-ident 123 | { 124 | using namespace Tensor; 125 | auto a = tensori, storage_ident<3>>(); 126 | auto b = tensori, storage_ident<3>>(); 127 | auto c = a + b; 128 | static_assert(std::is_same_v, storage_ident<3>>>); 129 | } 130 | 131 | // ident-ident + ident-sym = ident-sym 132 | { 133 | using namespace Tensor; 134 | auto a = tensori, storage_ident<3>>(); 135 | auto b = tensori, storage_sym<3>>(); 136 | auto c = a + b; 137 | static_assert(std::is_same_v, storage_sym<3>>>); 138 | } 139 | 140 | // ident-ident + ident-asym = ident-mat 141 | { 142 | using namespace Tensor; 143 | auto a = tensori, storage_ident<3>>(); 144 | auto b = tensori, storage_asym<3>>(); 145 | auto c = a + b; 146 | static_assert(std::is_same_v, storage_vec<3>, storage_vec<3>>>); 147 | } 148 | 149 | { 150 | using float4i4 = Tensor::float4i4; 151 | using float4x4 = Tensor::float4x4; 152 | float4i4 id(1); 153 | ECHO(id); 154 | float4x4 f; 155 | f = id; 156 | ECHO(f); 157 | for (int i = 0; i < 4; ++i) { 158 | for (int j = 0; j < 4; ++j) { 159 | float x = i == j ? 1 : 0; 160 | TEST_EQ(f[i][j], x); 161 | } 162 | } 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /include/Tensor/Matrix.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Tensor/Vector.h" 4 | #include "Tensor/Quat.h" 5 | 6 | /* 7 | Here's some common OpenGL / 3D matrix operations 8 | */ 9 | 10 | namespace Tensor { 11 | 12 | //glTranslate 13 | // TODO optimize for storage ... dense upper-right corner, rest is identity 14 | template 15 | mat translate( 16 | vec t 17 | ) { 18 | return mat{ 19 | {1, 0, 0, t.x}, 20 | {0, 1, 0, t.y}, 21 | {0, 0, 1, t.z}, 22 | {0, 0, 0, 1} 23 | }; 24 | } 25 | 26 | //glScale 27 | // TODO optimized storage ... diagonal scale-only ... 28 | template 29 | mat scale( 30 | vec s 31 | ) { 32 | return mat{ 33 | {s.x, 0, 0, 0}, 34 | {0, s.y, 0, 0}, 35 | {0, 0, s.z, 0}, 36 | {0, 0, 0, 1} 37 | }; 38 | } 39 | 40 | //glRotate 41 | template 42 | mat rotate( 43 | real rad, 44 | vec axis 45 | ) { 46 | auto q = quat(axis.x, axis.y, axis.z, rad) 47 | .fromAngleAxis(); 48 | auto x = q.xAxis(); 49 | auto y = q.yAxis(); 50 | auto z = q.zAxis(); 51 | /* 52 | which is faster? 53 | this 4x4 mat mul? 54 | or quat-rotate the col vectors of mq? 55 | 56 | TODO how about a sub-matrix specilized storage? dense upper-left, rest is diagonal. 57 | where you can set a cutoff dimension -- and every index beyond that dimension is filled with identity. 58 | So that this could just be stored as a 3x3's worth (9 reals) in memory, even though it's a 4x4 59 | Likewise for asymmetric storage, 3x4 translations could be just 12 reals in memory. 60 | */ 61 | return mat { 62 | {x.x, y.x, z.x, 0}, 63 | {x.y, y.y, z.y, 0}, 64 | {x.z, y.z, z.z, 0}, 65 | {0, 0, 0, 1} 66 | }; 67 | } 68 | 69 | //gluLookAt 70 | //https://stackoverflow.com/questions/21830340/understanding-glmlookat 71 | template 72 | mat lookAt( 73 | vec eye, 74 | vec center, 75 | vec up 76 | ) { 77 | auto Z = (eye - center).normalize(); 78 | auto Y = up; 79 | auto X = Y.cross(Z).normalize(); 80 | Y = Z.cross(X); 81 | // could explot a submatrix-storage, rest-is-identity optimization ... 82 | return mat{ 83 | {X.x, X.y, X.z, -eye.dot(X)}, 84 | {Y.x, Y.y, Y.z, -eye.dot(Y)}, 85 | {Z.x, Z.y, Z.x, -eye.dot(Z)}, 86 | {0, 0, 0, 1}, 87 | }; 88 | } 89 | 90 | /* 91 | glFrustum 92 | https://www.khronos.org/opengl/wiki/GluPerspective_code 93 | https://www.scratchapixel.com/lessons/3d-basic-rendering/perspective-and-orthographic-projection-matrix/opengl-perspective-projection-matrix.html 94 | */ 95 | template 96 | mat frustum( 97 | real left, 98 | real right, 99 | real bottom, 100 | real top, 101 | real near, 102 | real far 103 | ) { 104 | auto near2 = 2 * near; 105 | auto diff = vec(right, top, far) - vec(left, bottom, near); 106 | return mat{ 107 | {near2 / diff.x, 0, (right + left) / diff.x, 0}, 108 | {0, near2 / diff.y, (top + bottom) / diff.y, 0}, 109 | {0, 0, -(near + far) / diff.z, -(near2 * far) / diff.z}, 110 | {0, 0, -1, 0}, 111 | }; 112 | } 113 | 114 | /* 115 | gluPerspective 116 | http://www.songho.ca/opengl/gl_transform.html 117 | https://www.khronos.org/opengl/wiki/GluPerspective_code 118 | TODO tempted to make an optimized storage class for this as well. 119 | It's *almost* a diagonal scale class (min(m,n) reals for m x n matrix) 120 | though this have to be specialized for perspective since it has that one off-diagonal element... 121 | I see this as the library slowly creeping into sparse-matrix storage. 122 | */ 123 | template 124 | mat perspective( 125 | real fovY, // in radians 126 | real aspectRatio, 127 | real near, 128 | real far 129 | ) { 130 | auto ymax = near * tan(fovY * (real).5); 131 | auto xmax = aspectRatio * ymax; 132 | return frustum(-xmax, xmax, -ymax, ymax, near, far); 133 | } 134 | 135 | // glOrtho 136 | // https://www.scratchapixel.com/lessons/3d-basic-rendering/perspective-and-orthographic-projection-matrix/orthographic-projection-matrix.html 137 | template 138 | mat ortho( 139 | real left, 140 | real right, 141 | real bottom, 142 | real top, 143 | real near, 144 | real far 145 | ) { 146 | auto diff = vec(right, top, far) - vec(left, bottom, near); 147 | return mat{ 148 | {2 / diff.x, 0, 0, -(right + left) / diff.x}, 149 | {0, 2 / diff.y, 0, -(top + bottom) / diff.y}, 150 | {0, 0, -2 / diff.z, -(near + far) / diff.z}, 151 | {0, 0, 0, 1}, 152 | }; 153 | } 154 | 155 | // gluOrtho2D 156 | template 157 | mat ortho2D( 158 | real left, 159 | real right, 160 | real bottom, 161 | real top 162 | ) { 163 | return ortho(left, right, bottom, top, -1, 1); 164 | } 165 | 166 | } 167 | -------------------------------------------------------------------------------- /include/Tensor/Math.h.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Tensor/Vector.h.h" 4 | 5 | namespace Tensor { 6 | 7 | //forward-declare, body is below all tensor classes. 8 | 9 | template 10 | requires is_tensor_v 11 | T elemMul(T const & a, T const & b); 12 | 13 | template 14 | auto matrixCompMult(T&&... args); 15 | 16 | template 17 | auto hadamard(T&&... args); 18 | 19 | template 20 | requires ( 21 | !is_tensor_v || !is_tensor_v || // ... or two scalars 22 | ( 23 | IsBinaryTensorOp && // ... with matching rank 24 | std::is_same_v 25 | ) 26 | ) 27 | auto inner(A const & a, B const & b); 28 | 29 | template 30 | auto dot(T&&... args); 31 | 32 | template requires is_tensor_v 33 | typename T::Scalar lenSq(T const & v); 34 | 35 | template requires is_tensor_v 36 | typename T::Scalar normSq(T const & v); 37 | 38 | template requires (is_tensor_v) 39 | typename T::Scalar length(T const & v); 40 | 41 | template requires (is_tensor_v) 42 | typename T::Scalar norm(T const & v); 43 | 44 | template 45 | requires (IsBinaryTensorOp && std::is_same_v) 46 | typename A::Scalar distance(A const & a, B const & b); 47 | 48 | template 49 | requires (is_tensor_v) 50 | T normalize(T const & v); 51 | 52 | template 53 | requires IsBinaryTensorR3xR3Op 54 | auto cross(A const & a, B const & b); 55 | 56 | template 57 | auto outer(A const & a, B const & b); 58 | 59 | template 60 | auto outerProduct(T&&... args); 61 | 62 | template 63 | requires ( 64 | is_tensor_v 65 | && T::rank >= 2 66 | ) 67 | auto transpose(T const & t); 68 | 69 | template 70 | requires (is_tensor_v 71 | && m < T::rank 72 | && n < T::rank 73 | && T::template dim == T::template dim 74 | ) 75 | auto contract(T const & t); 76 | 77 | template 78 | auto trace(T const & o); 79 | 80 | template 81 | requires (is_tensor_v) 82 | auto contractN(A const & a); 83 | 84 | template 85 | requires IsInteriorOp 86 | auto interior(A const & a, B const & b); 87 | 88 | template 89 | requires (is_tensor_v) 90 | auto diagonal(T const & t); 91 | 92 | template 93 | requires IsSquareTensor 94 | auto makeSym(T const & t); 95 | 96 | template 97 | requires IsSquareTensor 98 | auto makeAsym(T const & t); 99 | 100 | template 101 | auto wedge(A const & a, B const & b); 102 | 103 | template 104 | requires IsSquareTensor 105 | auto hodgeDual(T const & a); 106 | 107 | //name 108 | template 109 | auto dual(T&&... args); 110 | 111 | //wedge all rows of a m x n matrix 112 | template 113 | auto wedgeAll(auto const & v); 114 | 115 | template 116 | requires (IsBinaryTensorOp && std::is_same_v && A::isSquare && B::isSquare) 117 | auto innerExt(A const & a, B const & b); 118 | 119 | template requires (is_tensor_v) 120 | typename T::Scalar normExtSq(T const & v); 121 | 122 | template requires (is_tensor_v) 123 | typename T::Scalar normExt(T const & v); 124 | 125 | template requires (is_tensor_v && T::rank == 2) 126 | typename T::Scalar measure(T const & v); 127 | 128 | template requires (is_tensor_v && T::rank == 2) 129 | typename T::Scalar measureSimplex(T const & v); 130 | 131 | template 132 | requires IsBinaryTensorOpWithMatchingNeighborDims 133 | auto operator*(A const & a, B const & b); 134 | 135 | //funny, 'if constexpr' causes this to lose constexpr-ness, but ternary is ok. 136 | constexpr int constexpr_isqrt_r(int inc, int limit) { 137 | return inc * inc > limit ? inc-1 : constexpr_isqrt_r(inc+1, limit); 138 | } 139 | constexpr int constexpr_isqrt(int i) { 140 | return constexpr_isqrt_r(0, i); 141 | } 142 | 143 | //https://en.cppreference.com/w/cpp/language/constexpr 144 | constexpr int constexpr_factorial(int n) { 145 | return n <= 1 ? 1 : (n * constexpr_factorial(n-1)); 146 | } 147 | constexpr int consteval_nChooseR(int m, int n) { 148 | return constexpr_factorial(n) / constexpr_factorial(m) / constexpr_factorial(n - m); 149 | } 150 | 151 | //https://stackoverflow.com/a/9331125 152 | constexpr int nChooseR(int n, int k) { 153 | if (k > n) return 0; 154 | if (k << 1 > n) k = n - k; 155 | if (k == 0) return 1; 156 | int result = n; 157 | // TODO can you guarantee that /=i will always have 'i' as a divisor? or do we need two loops? 158 | for (int i = 2; i <= k; ++i) { 159 | result *= n - i + 1; 160 | result /= i; 161 | } 162 | return result; 163 | } 164 | 165 | } 166 | -------------------------------------------------------------------------------- /include/Tensor/Derivative.h: -------------------------------------------------------------------------------- 1 | #include "Tensor/Vector.h" 2 | #include "Common/Macros.h" 3 | #include 4 | 5 | namespace Tensor { 6 | 7 | /* 8 | partial derivative index operator 9 | (partial derivative alone one coordinate) 10 | 11 | finite difference coefficients for center-space finite-difference partial derivatives found at 12 | http://en.wikipedia.org/wiki/Finite_difference_coefficients 13 | */ 14 | 15 | template 16 | struct PartialDerivativeCoeffs; 17 | 18 | template 19 | struct PartialDerivativeCoeffs { 20 | static constexpr std::array coeffs = { 1./2. }; 21 | }; 22 | 23 | template 24 | struct PartialDerivativeCoeffs { 25 | static constexpr std::array coeffs = { 2./3., -1./12. }; 26 | }; 27 | 28 | template 29 | struct PartialDerivativeCoeffs { 30 | static constexpr std::array coeffs = { 3./4., -3./20., 1./60. }; 31 | }; 32 | 33 | template 34 | struct PartialDerivativeCoeffs { 35 | static constexpr std::array coeffs = { 4./5., -1./5., 4./105., -1./280. }; 36 | }; 37 | 38 | // continuous derivative 39 | 40 | template 41 | auto partialDerivative( 42 | auto f, 43 | auto x, 44 | typename decltype(f(x))::Scalar h = .01 45 | ) { 46 | using T = decltype(f(x)); 47 | using S = typename T::Scalar; 48 | using C = PartialDerivativeCoeffs; 49 | static_assert(T::isSquare); // all dimensions match 50 | constexpr int dim = T::template dim<0>; 51 | auto xofs = []( 52 | auto x, 53 | S h, 54 | int k, // which dimension 55 | S offset // how far 56 | ) { 57 | auto x2 = x; 58 | x2[k] += h * offset; 59 | return x2; 60 | }; 61 | vec result; //first index is derivative 62 | for (int k = 0; k < dim; ++k) { 63 | result[k] = [&](std::integer_sequence) constexpr -> T { 64 | return ((( 65 | f(xofs(x,h,k,i+1)) 66 | - f(xofs(x,h,k,-i-1)) 67 | ) * C::coeffs[i] / h) + ... + (0)); 68 | }(Common::make_integer_range{}); 69 | } 70 | return result; 71 | } 72 | 73 | // grid derivatives 74 | // TODO redo the whole Grid class 75 | 76 | template 77 | Type getOffset( 78 | std::function)> f, 79 | intN index, 80 | int dim, 81 | int offset) 82 | { 83 | index(dim) += offset; 84 | return f(index); 85 | } 86 | 87 | /* 88 | partial derivative operator 89 | for now let's use 2-point everywhere: d/dx^i f(x) ~= (f(x + dx^i) - f(x - dx^i)) / (2 * |dx^i|) 90 | index = index in grid of cell to pull the specified field 91 | k = dimension to differentiate across 92 | */ 93 | template 94 | struct PartialDerivativeGridImpl; 95 | 96 | template 97 | requires is_tensor_v && std::is_same_v 98 | struct PartialDerivativeGridImpl { 99 | static constexpr auto rank = InputType::rank; 100 | static constexpr decltype(auto) exec( 101 | intN const & gridIndex, 102 | vec const & dx, 103 | std::function index)> f 104 | ) { 105 | using Coeffs = PartialDerivativeCoeffs; 106 | return vec([&](intN dstIndex) -> Real { 107 | int gradIndex = dstIndex(0); 108 | #if 0 109 | intN srcIndex = [&](std::integer_sequence) constexpr -> intN { 110 | // this is calling (Scalar, ...) ctors, which have a fixed limit 111 | // how do I get this to call the initializer_list ctor? 112 | //return intN{((int)dstIndex(i+1))...}; 113 | return intN(std::make_tuple(((int)dstIndex(i+1))...)); 114 | //return intN(std::array{((int)dstIndex(i+1))...}); 115 | }(std::make_integer_sequence{}); 116 | #else 117 | intN srcIndex([&](int i) -> int { return dstIndex(i+1); }); 118 | #endif 119 | return [&](std::integer_sequence) constexpr -> Real { 120 | return ((( 121 | getOffset(f, gridIndex, gradIndex, i)(srcIndex) 122 | - getOffset(f, gridIndex, gradIndex, -i)(srcIndex) 123 | ) * Coeffs::coeffs[i-1]) + ... + (0)) / dx(gradIndex); 124 | }(Common::make_integer_range{}); 125 | }); 126 | } 127 | }; 128 | 129 | template 130 | struct PartialDerivativeGridImpl { 131 | using InputType = Real; 132 | static constexpr decltype(auto) exec( 133 | intN const &gridIndex, 134 | vec const &dx, 135 | std::function index)> f 136 | ) { 137 | using Coeffs = PartialDerivativeCoeffs; 138 | return vec([&](int gradIndex) -> Real { 139 | return [&](std::integer_sequence) constexpr -> Real { 140 | return ((( 141 | getOffset(f, gridIndex, gradIndex, i) 142 | - getOffset(f, gridIndex, gradIndex, -i) 143 | ) * Coeffs::coeffs[i-1]) + ... + (0)) / dx(gradIndex); 144 | }(Common::make_integer_range{}); 145 | }); 146 | } 147 | }; 148 | 149 | template 150 | auto partialDerivativeGrid( 151 | intN const & index, 152 | vec const & dx, 153 | std::function)> f 154 | ) { 155 | return PartialDerivativeGridImpl::exec(index, dx, f); 156 | } 157 | 158 | } 159 | -------------------------------------------------------------------------------- /test/src/Symmetric.cpp: -------------------------------------------------------------------------------- 1 | #include "Test/Test.h" 2 | 3 | //#define STORAGE_LOWER //lower-triangular 4 | #define STORAGE_UPPER //upper-triangular 5 | 6 | template 7 | void verifyAccessSym(T & a){ 8 | // testing fields 9 | TEST_EQ(a.x_x, 0); 10 | TEST_EQ(a.x_y, 1); 11 | TEST_EQ(a.x_z, 4); 12 | TEST_EQ(a.y_x, 1); 13 | TEST_EQ(a.y_y, 2); 14 | TEST_EQ(a.y_z, 5); 15 | TEST_EQ(a.z_x, 4); 16 | TEST_EQ(a.z_y, 5); 17 | TEST_EQ(a.z_z, 8); 18 | } 19 | 20 | void test_Symmetric() { 21 | //symmetric 22 | 23 | auto a = Tensor::float3s3(); // default 24 | static_assert(a.rank == 2); 25 | static_assert(a.dim<0> == 3); 26 | static_assert(a.dim<1> == 3); 27 | // default ctor 28 | for (int i = 0; i < a.count<0>; ++i) { 29 | TEST_EQ(a.s[i], 0); 30 | } 31 | for (int i = 0; i < a.dim<0>; ++i) { 32 | for (int j = 0; j < a.dim<1>; ++j) { 33 | TEST_EQ(a(i,j), 0); 34 | TEST_EQ(a(j,i), 0); 35 | } 36 | } 37 | /* 38 | use a symmetric procedural matrix with distinct values , esp for verifying .x_x fields 39 | don't use a_ij = i+j, because a_02 == a_11 40 | so here's a procedural symmetric matrix with all distinct symmetric components: 41 | a_ij = i*i + j*j 42 | {{0, 1, 4}, 43 | {1, 2, 5}, 44 | {4, 5, 8}} 45 | so a.s == {0,1,2,4,5,8}; 46 | */ 47 | a = Tensor::float3s3(0,1,2,4,5,8); 48 | 49 | // verify index access works 50 | 51 | verifyAccessSym(a); 52 | verifyAccessSym(a); 53 | 54 | auto f = [](int i, int j) -> float { return i*i + j*j; }; 55 | verifyAccessRank2(a, f); 56 | verifyAccessRank2(a, f); 57 | 58 | // lambda ctor using int,int... 59 | TEST_EQ(a, Tensor::float3s3([](int i, int j) -> float { return i*i + j*j; })); 60 | 61 | // lambda ctor using int2 62 | TEST_EQ(a, Tensor::float3s3([](Tensor::int2 ij) -> float { return ij(0)*ij(0) + ij(1)*ij(1); })); 63 | 64 | // verify only 6 writes take place during ctor 65 | { 66 | int k = 0; 67 | // verifies lambda-of-ref too 68 | auto c = Tensor::float3s3([&](int i, int j) -> float { 69 | ++k; 70 | return (float)(i*i+j*j); 71 | }); 72 | TEST_EQ(k, 6); //for write iterators in lambda ctor ... 73 | TEST_EQ(c, a); 74 | } 75 | 76 | // lambda ctor using int2 77 | TEST_EQ(a, Tensor::float3s3([](Tensor::int2 ij) -> float { 78 | return (float)(ij.x*ij.x+ij.y*ij.y); 79 | })); 80 | 81 | 82 | #ifdef STORAGE_LOWER // lower-triangular 83 | TEST_EQ(Tensor::float3s3::getLocalReadForWriteIndex(0), Tensor::int2(0,0)); 84 | TEST_EQ(Tensor::float3s3::getLocalReadForWriteIndex(1), Tensor::int2(1,0)); 85 | TEST_EQ(Tensor::float3s3::getLocalReadForWriteIndex(2), Tensor::int2(1,1)); 86 | TEST_EQ(Tensor::float3s3::getLocalReadForWriteIndex(3), Tensor::int2(2,0)); 87 | TEST_EQ(Tensor::float3s3::getLocalReadForWriteIndex(4), Tensor::int2(2,1)); 88 | TEST_EQ(Tensor::float3s3::getLocalReadForWriteIndex(5), Tensor::int2(2,2)); 89 | #endif 90 | #ifdef STORAGE_UPPER // upper-triangular 91 | TEST_EQ(Tensor::float3s3::getLocalReadForWriteIndex(0), Tensor::int2(0,0)); 92 | TEST_EQ(Tensor::float3s3::getLocalReadForWriteIndex(1), Tensor::int2(0,1)); 93 | TEST_EQ(Tensor::float3s3::getLocalReadForWriteIndex(2), Tensor::int2(1,1)); 94 | TEST_EQ(Tensor::float3s3::getLocalReadForWriteIndex(3), Tensor::int2(0,2)); 95 | TEST_EQ(Tensor::float3s3::getLocalReadForWriteIndex(4), Tensor::int2(1,2)); 96 | TEST_EQ(Tensor::float3s3::getLocalReadForWriteIndex(5), Tensor::int2(2,2)); 97 | #endif 98 | 99 | for (int i = 0; i < Tensor::float3s3::localCount; ++i) { 100 | std::cout << i << "\t" << Tensor::float3s3::getLocalReadForWriteIndex(i) << std::endl; 101 | } 102 | 103 | // this is symmetric, it shouldn't matter 104 | std::cout << "getLocalWriteForReadIndex" << std::endl; 105 | for (int i = 0; i < 3; ++i) { 106 | for (int j = 0; j < 3; ++j) { 107 | std::cout << "\t" << Tensor::float3s3::getLocalWriteForReadIndex(i,j); 108 | } 109 | std::cout << std::endl; 110 | } 111 | 112 | /* 113 | test storing matrix 114 | for this test, construct from an asymmetric matrix 115 | 0 1 2 116 | 3 4 5 117 | 6 7 8 118 | ... in a symmetric tensor 119 | if storage / write iterate is lower-triangular then this will be 120 | 0 3 6 121 | 3 4 7 122 | 6 7 8 123 | if it is upper-triangular: 124 | 0 1 2 125 | 1 4 5 126 | 2 5 8 127 | */ 128 | auto b = Tensor::float3s3([](int i, int j) -> float { 129 | return 3 * i + j; 130 | }); 131 | #ifdef STORAGE_LOWER // lower triangular 132 | // test storage order 133 | // this order is for sym/asym getLocalReadForWriteIndex incrementing iread(0) first 134 | // it also means for asym that i POSITIVE, j NEGATIVE 135 | TEST_EQ(b.s[0], 0); // xx 136 | TEST_EQ(b.s[1], 3); // xy 137 | TEST_EQ(b.s[2], 4); // yy 138 | TEST_EQ(b.s[3], 6); // xz 139 | TEST_EQ(b.s[4], 7); // yz 140 | TEST_EQ(b.s[5], 8); // zz 141 | // test arg ctor 142 | TEST_EQ(b, Tensor::float3s3(0,3,4,6,7,8)); 143 | #endif // upper triangular 144 | #ifdef STORAGE_UPPER 145 | // test storage order 146 | // this order is for sym/asym getLocalReadForWriteIndex incrementing iread(1) first 147 | // it also means for asym that i NEGATIVE, j POSITIVE 148 | TEST_EQ(b.s[0], 0); // xx 149 | TEST_EQ(b.s[1], 1); // xy 150 | TEST_EQ(b.s[2], 4); // yy 151 | TEST_EQ(b.s[3], 2); // xz 152 | TEST_EQ(b.s[4], 5); // yz 153 | TEST_EQ(b.s[5], 8); // zz 154 | // test arg ctor 155 | TEST_EQ(b, Tensor::float3s3(0,1,4,2,5,8)); 156 | #endif 157 | 158 | // test symmetry read/write 159 | b(0,2) = 7; 160 | TEST_EQ(b(2,0), 7); 161 | b.x_y = -1; 162 | TEST_EQ(b.y_x, -1); 163 | 164 | // partial index 165 | for (int i = 0; i < b.dim<0>; ++i) { 166 | for (int j = 0; j < b.dim<1>; ++j) { 167 | TEST_EQ(b[i][j], b(i,j)); 168 | } 169 | } 170 | 171 | // operators 172 | operatorScalarTest(a); 173 | operatorMatrixTest(); 174 | 175 | TEST_EQ(Tensor::trace(Tensor::float3s3({1,2,3,4,5,6})), 10); 176 | } 177 | -------------------------------------------------------------------------------- /test/src/Quat.cpp: -------------------------------------------------------------------------------- 1 | #include "Test/Test.h" 2 | 3 | // quaternions are not tensors ... right? 4 | static_assert(!Tensor::is_tensor_v); 5 | 6 | constexpr float epsilon = 1e-6; 7 | 8 | //#define TEST_QUAT_EQ(a,b) TEST_EQ_EPS(Tensor::distance(a,b), 0, epsilon) 9 | #define TEST_QUAT_EQ(a,b)\ 10 | {\ 11 | std::ostringstream ss;\ 12 | float dist = (a).distance(b);\ 13 | ss << __FILE__ << ":" << __LINE__ << ": " << #a << " == " << #b << " :: " << (a) << " == " << (b);\ 14 | ss << " (distance=" << dist << ", epsilon=" << epsilon << ")";\ 15 | std::string msg = ss.str();\ 16 | if (dist > epsilon) {\ 17 | msg += " FAILED!";\ 18 | std::cout << msg << std::endl;\ 19 | throw Common::Exception() << msg;\ 20 | }\ 21 | std::cout << msg << std::endl;\ 22 | } 23 | 24 | void test_Quaternions() { 25 | // me messing around ... putting quaterniong basis elements into a 4x4 matrix 26 | using Q = Tensor::quatf; 27 | using Q4 = Tensor::tensor; 28 | using Q44 = Tensor::tensor; 29 | // TODO despite convenience of casting-to-vectors ... I should make quat real be q(0) ... 30 | //auto z = Q{0,0,0,0}; 31 | auto e0 = Q{0,0,0,1}; 32 | auto e1 = Q{1,0,0,0}; 33 | auto e2 = Q{0,1,0,0}; 34 | auto e3 = Q{0,0,1,0}; 35 | auto e = Q4{e0,e1,e2,e3}; 36 | ECHO(e); 37 | auto g = e.outer(e); 38 | static_assert(std::is_same_v); 39 | ECHO(g); 40 | 41 | /* 42 | g_ab = e_a * e_b 43 | g^ab = 1/4 * ~e_a * ~e_b 44 | then g_ac * g^cb = e_a * e_c * 1/4 * ~e_c * ~e_b 45 | Sum_c ( 1/4 * e_c * ~e_c) = 1/4 * 4 = 1 46 | so g_ac * g^cb = e_a ~e_b 47 | so 1/2 (g_ac * g^cb + g_bc * g^ca) = delta_a^b 48 | and 1/2 ((g_ac * g^cb) + ~(g_ac * g^cb)) = delta_a^b 49 | and 1/2 ((g_ac * g^cb) + ~g^cb * ~g_ac) = delta_a^b 50 | 51 | 52 | */ 53 | auto conj = [](auto q) { return q.conjugate(); }; 54 | auto econj = e.map(conj); 55 | auto ginv = econj.outer(econj) * .25f; 56 | ECHO(ginv); 57 | ECHO(g * ginv); 58 | ECHO(ginv * g); 59 | // well at least the diagonal is ident ... and the off-diagonal is imaginary and skew-symmetric ... 60 | ECHO(.5f * ((g * ginv) + (g * ginv).transpose())); 61 | ECHO(.5f * ((g * ginv) + (g * ginv).map(conj))); 62 | } 63 | 64 | 65 | void test_Quat() { 66 | { 67 | //verify identity 68 | Tensor::quatf q; 69 | TEST_EQ(q.x, 0); 70 | TEST_EQ(q.y, 0); 71 | TEST_EQ(q.z, 0); 72 | TEST_EQ(q.w, 0); //on the fence about this ... default to 1 or to 0? 1 for rotations, 0 for summing (which yes sometimes you do sum quaternions, esp if you put them in a matrix.) 73 | } 74 | 75 | { 76 | //real quaternion 77 | Tensor::quatf q = {0,0,0,2}; 78 | 79 | //normal = unit 80 | TEST_EQ(normalize(q), Tensor::quatf(1)); 81 | 82 | //real quaternion, conjugate = self 83 | TEST_EQ(q.conjugate(), q); 84 | } 85 | 86 | { 87 | //90' rotation 88 | float const sqrt_1_2 = sqrt(.5); 89 | auto rx = Tensor::quatf(1,0,0,.5*M_PI).fromAngleAxis(); 90 | auto ry = Tensor::quatf(0,1,0,.5*M_PI).fromAngleAxis(); 91 | auto rz = Tensor::quatf(0,0,1,.5*M_PI).fromAngleAxis(); 92 | TEST_QUAT_EQ(rx, Tensor::quatf(sqrt_1_2, 0, 0, sqrt_1_2)); 93 | TEST_QUAT_EQ(ry, Tensor::quatf(0, sqrt_1_2, 0, sqrt_1_2)); 94 | TEST_QUAT_EQ(rz, Tensor::quatf(0, 0, sqrt_1_2, sqrt_1_2)); 95 | 96 | //verify conjugate by components 97 | TEST_QUAT_EQ(rx.conjugate(), Tensor::quatf(-sqrt_1_2, 0, 0, sqrt_1_2)); 98 | TEST_QUAT_EQ(ry.conjugate(), Tensor::quatf(0, -sqrt_1_2, 0, sqrt_1_2)); 99 | TEST_QUAT_EQ(rz.conjugate(), Tensor::quatf(0, 0, -sqrt_1_2, sqrt_1_2)); 100 | 101 | //verify negative by components 102 | TEST_QUAT_EQ(-rx, Tensor::quatf(-sqrt_1_2, 0, 0, -sqrt_1_2)); 103 | TEST_QUAT_EQ(-ry, Tensor::quatf(0, -sqrt_1_2, 0, -sqrt_1_2)); 104 | TEST_QUAT_EQ(-rz, Tensor::quatf(0, 0, -sqrt_1_2, -sqrt_1_2)); 105 | 106 | // 180 rotation around each axis 107 | //180' rotation = w==0 = "pure quaternion" 108 | auto r2x = Tensor::quatf(1,0,0,M_PI).fromAngleAxis(); 109 | auto r2y = Tensor::quatf(0,1,0,M_PI).fromAngleAxis(); 110 | auto r2z = Tensor::quatf(0,0,1,M_PI).fromAngleAxis(); 111 | TEST_QUAT_EQ(r2x, Tensor::quatf(1,0,0,0)); 112 | TEST_QUAT_EQ(r2y, Tensor::quatf(0,1,0,0)); 113 | TEST_QUAT_EQ(r2z, Tensor::quatf(0,0,1,0)); 114 | 115 | //verify conjugate 116 | TEST_QUAT_EQ(r2x.conjugate(), Tensor::quatf(-1,0,0,0)); 117 | TEST_QUAT_EQ(r2y.conjugate(), Tensor::quatf(0,-1,0,0)); 118 | TEST_QUAT_EQ(r2z.conjugate(), Tensor::quatf(0,0,-1,0)); 119 | 120 | //pure quaternion = negative equal to its conjugate 121 | TEST_QUAT_EQ(r2x.conjugate(), -r2x); 122 | TEST_QUAT_EQ(r2y.conjugate(), -r2y); 123 | TEST_QUAT_EQ(r2z.conjugate(), -r2z); 124 | 125 | //verify two 90' rotation around an axis == 180' rotation around the axis 126 | TEST_QUAT_EQ(rx*rx, r2x); 127 | TEST_QUAT_EQ(ry*ry, r2y); 128 | TEST_QUAT_EQ(rz*rz, r2z); 129 | 130 | //verify rx*ry, ry*rz, rz*rx 131 | TEST_QUAT_EQ(rx*ry, Tensor::quatf(.5f, .5f, .5f, .5f)); 132 | TEST_QUAT_EQ(ry*rz, Tensor::quatf(.5f, .5f, .5f, .5f)); 133 | TEST_QUAT_EQ(rz*rx, Tensor::quatf(.5f, .5f, .5f, .5f)); 134 | 135 | // three rotations around each axis 136 | auto r3x = r2x*rx; 137 | auto r3y = r2y*ry; 138 | auto r3z = r2z*rz; 139 | 140 | //test associativity 141 | TEST_QUAT_EQ(r3x, rx*r2x); 142 | TEST_QUAT_EQ(r3y, ry*r2y); 143 | TEST_QUAT_EQ(r3z, rz*r2z); 144 | 145 | //verify three rotations is rotationally equivalent (up to negative) of a conjugate (equals inverse when normalized) rotation 146 | TEST_QUAT_EQ(-rx, r3x.conjugate()); 147 | TEST_QUAT_EQ(-ry, r3y.conjugate()); 148 | TEST_QUAT_EQ(-rz, r3z.conjugate()); 149 | 150 | //quaterion question: how come this one has a sign flip on the axis while the reverse order doesn't? 151 | // hmm because the reverse-order , all the same, demonstrates the 2nd transform not affecting the 1st transform, while in this order the transforms do affect one another 152 | TEST_QUAT_EQ(rz*ry, Tensor::quatf(-.5f, .5f, .5f, .5f)); 153 | TEST_QUAT_EQ(ry*rx, Tensor::quatf(.5f, .5f, -.5f, .5f)); 154 | TEST_QUAT_EQ(rx*rz, Tensor::quatf(.5f, -.5f, .5f, .5f)); 155 | 156 | TEST_QUAT_EQ(ry*rx*rz, rx); 157 | TEST_QUAT_EQ(rz*ry*rx, ry); 158 | TEST_QUAT_EQ(rx*rz*ry, rz); 159 | 160 | TEST_QUAT_EQ(rz*rx*ry, Tensor::quatf(0, sqrt_1_2, sqrt_1_2, 0)); 161 | TEST_QUAT_EQ(rx*ry*rz, Tensor::quatf(sqrt_1_2, 0, sqrt_1_2, 0)); 162 | TEST_QUAT_EQ(ry*rz*rx, Tensor::quatf(sqrt_1_2, sqrt_1_2, 0, 0)); 163 | 164 | { 165 | Tensor::quatf::vec3 v = {1,2,3}; 166 | auto vx = rx.rotate(v); 167 | TEST_QUAT_EQ(vx, Tensor::quatf::vec3(1, -3, 2)); 168 | auto vy = ry.rotate(v); 169 | TEST_QUAT_EQ(vy, Tensor::quatf::vec3(3, 2, -1)); 170 | auto vz = rz.rotate(v); 171 | TEST_QUAT_EQ(vz, Tensor::quatf::vec3(-2, 1, 3)); 172 | } 173 | } 174 | 175 | test_Quaternions(); 176 | } 177 | -------------------------------------------------------------------------------- /include/Tensor/Grid.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Tensor/Vector.h" // new tensor struct 4 | #include "Tensor/Range.h" 5 | #include "Common/Exception.h" 6 | #include 7 | 8 | #if PLATFORM_MSVC 9 | #undef min 10 | #undef max 11 | #endif 12 | 13 | 14 | namespace Tensor { 15 | 16 | template 17 | inline intN stepForSize(intN const size) { 18 | intN step; 19 | step[0] = 1; 20 | for (int i = 1; i < rank; ++i) { 21 | step[i] = step[i-1] * size[i-1]; 22 | } 23 | return step; 24 | } 25 | 26 | //rank is templated, but dim is not as it varies per-rank 27 | //so this is dynamically-sized tensor 28 | template 29 | struct Grid { 30 | using Type = Type_; 31 | using value_type = Type; 32 | static constexpr auto rank = rank_; 33 | using intN = Tensor::intN; 34 | 35 | intN size; 36 | Type * v = {}; 37 | bool own = {}; //or just use a shared_ptr to v? 38 | 39 | //cached for quick access by dot with index vector 40 | //step[0] = 1, step[1] = size[0], step[j] = product(i=1,j-1) size[i] 41 | intN step; 42 | 43 | Grid() { 44 | // TODO but in my ptr ctor I say v cannot be null ... ? 45 | } 46 | 47 | // deep copy 48 | Grid(Grid const & src) 49 | : size(src.size), 50 | v(new Type[src.size.product()]()), 51 | own(true), 52 | step(stepForSize(src.size)) 53 | { 54 | for (auto i : range()) { 55 | (*this)(i) = src(i); 56 | } 57 | } 58 | 59 | Grid(Grid && src) 60 | : size(src.size), 61 | v(src.v), 62 | own(true), 63 | step(stepForSize(src.size)) 64 | { 65 | src.v = nullptr; 66 | src.own = false; // don't free 67 | } 68 | 69 | Grid(intN const & size_) 70 | : size(size_), 71 | v(new Type[size.product()]()), 72 | own(true), 73 | step(stepForSize(size_)) 74 | {} 75 | 76 | // shallow copy by default ... when passed a pointer ... 77 | // ... is this a bad idea? 78 | Grid(intN const & size_, Type * v_) 79 | : size(size_), 80 | v(v_), 81 | own(false), 82 | step(stepForSize(size_)) 83 | { 84 | if (!v) throw Common::Exception() << "v cannot be null. use the (intN) constructor."; 85 | } 86 | 87 | Grid(intN const & size_, std::function f) 88 | : size(size_), 89 | v(new Type[size_.product()]()), 90 | own(true), 91 | step(stepForSize(size_)) 92 | { 93 | for (auto i : range()) { 94 | (*this)(i) = f(i); 95 | } 96 | } 97 | 98 | ~Grid() { 99 | if (own) { 100 | delete[] v; 101 | } 102 | } 103 | 104 | // dereference by vararg ints 105 | 106 | template 107 | struct BuildDeref; 108 | 109 | template 110 | struct BuildDeref { 111 | static intN exec(int next, Rest ... rest) { 112 | intN index = BuildDeref::exec(rest...); 113 | index(offset) = next; 114 | return index; 115 | } 116 | }; 117 | 118 | template 119 | struct BuildDeref { 120 | static intN exec(int last) { 121 | static_assert(offset == rank-1, "didn't provide enough arguments for dereference"); 122 | intN index; 123 | index(offset) = last; 124 | return index; 125 | } 126 | }; 127 | 128 | template 129 | Type & operator()(int first, Rest... rest) { 130 | return getValue(BuildDeref<0, int, Rest...>::exec(first, rest...)); 131 | } 132 | 133 | template 134 | Type const & operator()(int first, Rest... rest) const { 135 | return getValue(BuildDeref<0, int, Rest...>::exec(first, rest...)); 136 | } 137 | 138 | //dereference by a vector of ints 139 | 140 | //typical access will be only for the Type's sake 141 | Type & operator()(intN const & deref) { return getValue(deref); } 142 | Type const & operator()(intN const & deref) const { return getValue(deref); } 143 | 144 | //but other folks (currently only our initialization of our indexes) will want the whole value 145 | Type & getValue(intN const & deref) { 146 | #ifdef DEBUG 147 | for (int i = 0; i < rank; ++i) { 148 | if (deref(i) < 0 || deref(i) >= size(i)) { 149 | throw Common::Exception() << "size is " << size << " but dereference is " << deref; 150 | } 151 | } 152 | #endif 153 | int flat_deref = deref.dot(step); 154 | assert(flat_deref >= 0 && flat_deref < size.product()); 155 | return v[flat_deref]; 156 | } 157 | Type const &getValue(intN const &deref) const { 158 | #ifdef DEBUG 159 | for (int i = 0; i < rank; ++i) { 160 | if (deref(i) < 0 || deref(i) >= size(i)) { 161 | throw Common::Exception() << "size is " << size << " but dereference is " << deref; 162 | } 163 | } 164 | #endif 165 | int flat_deref = deref.dot(step); 166 | assert(flat_deref >= 0 && flat_deref < size.product()); 167 | return v[flat_deref]; 168 | } 169 | 170 | using iterator = Type*; 171 | using const_iterator = Type const*; 172 | Type *begin() { return v; } 173 | Type *end() { return v + size.product(); } 174 | Type const *begin() const { return v; } 175 | Type const *end() const { return v + size.product(); } 176 | 177 | RangeObj range() const { 178 | return RangeObj(intN(), size); 179 | } 180 | 181 | //dereference by vararg ints 182 | 183 | template 184 | void resize(int first, Rest... rest) { 185 | resize(BuildDeref<0, int, Rest...>::exec(first, rest...)); 186 | } 187 | 188 | //dereference by a vector of ints 189 | 190 | void resize(intN const& newSize) { 191 | if (size == newSize) return; 192 | 193 | intN oldSize = size; 194 | intN oldStep = step; 195 | Type* oldV = v; 196 | 197 | size = newSize; 198 | v = new Type[newSize.product()]; 199 | step(0) = 1; 200 | for (int i = 1; i < rank; ++i) { 201 | step(i) = step(i-1) * size(i-1); 202 | } 203 | 204 | intN minSize; 205 | for (int i = 0; i < rank; ++i) { 206 | minSize(i) = size(i) < oldSize(i) ? size(i) : oldSize(i); 207 | } 208 | 209 | RangeObj range(intN(), minSize); 210 | for (typename RangeObj::iterator iter = range.begin(); iter != range.end(); ++iter) { 211 | intN index = *iter; 212 | int oldOffset = oldStep.dot(index); 213 | (*this)(index) = oldV[oldOffset]; 214 | } 215 | 216 | delete[] oldV; 217 | } 218 | 219 | Grid & operator=(Grid const & src) { 220 | resize(src.size); 221 | 222 | Type* srcv = src.v; 223 | Type* dstv = v; 224 | for (int i = size.product()-1; i >= 0; --i) { 225 | *(dstv++) = *(srcv++); 226 | } 227 | return *this; 228 | } 229 | 230 | Grid & operator=(Grid && src) { 231 | if (own) { 232 | delete[] v; 233 | } 234 | v = src.v; 235 | own = src.own; 236 | size = src.size; 237 | step = src.step; 238 | src.v = nullptr; 239 | src.own = false; 240 | return *this; 241 | } 242 | 243 | template 244 | decltype(auto) map(std::function f) const { 245 | return Grid(size, [&](intN i) -> Return { 246 | return f((*this)(i)); 247 | }); 248 | } 249 | }; 250 | 251 | } 252 | -------------------------------------------------------------------------------- /include/Tensor/Range.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Common/ForLoop.h" 4 | #include "Tensor/Range.h.h" 5 | #include 6 | #include 7 | 8 | namespace Tensor { 9 | 10 | /* 11 | inc index 0 first (and flattening adding order 0 last) is memory-in-order for row-major images (since the 1st coordinate is the column) 12 | but inc index rank-1 first (and flattening adding it last) is not memory-in-order for row-major matrices (since the 1st coordinate is the row) 13 | so for memory-in-order matrix iteration, use OuterOrderIterator 14 | but for memory-in-order image iteration, use InnerOrderIterator 15 | or ofc you can switch matrixes to be col-major, like GLSL, and then InnerOrderIterator works for both, but then A_ij in math = A.j.i in code 16 | 17 | TODO con... template int Owner::getRangeMin() and getRangeMax 18 | */ 19 | template< 20 | int rankFirst, 21 | int rankLast, 22 | int rankStep, 23 | int rank, 24 | typename Owner // should include the constness 25 | > 26 | struct RangeIterator { 27 | Owner & owner; 28 | using intN = Tensor::intN; 29 | intN index; 30 | 31 | constexpr RangeIterator(Owner & owner_) : owner(owner_) { 32 | [](RangeIterator & it, std::integer_sequence) constexpr { 33 | it.index = {it.owner.template getRangeMin()...}; 34 | }(*this, std::make_integer_sequence{}); 35 | } 36 | 37 | constexpr RangeIterator(Owner & owner_, intN index_) : owner(owner_), index(index_) {} 38 | constexpr RangeIterator(RangeIterator const & iter) : owner(iter.owner), index(iter.index) {} 39 | constexpr RangeIterator(RangeIterator && iter) : owner(iter.owner), index(iter.index) {} 40 | 41 | RangeIterator & operator=(RangeIterator const & o) { 42 | owner = o.owner; 43 | index = o.index; 44 | return *this; 45 | } 46 | RangeIterator & operator=(RangeIterator && o) { 47 | owner = o.owner; 48 | index = o.index; 49 | return *this; 50 | } 51 | 52 | template 53 | struct FlattenLoop { 54 | static bool exec(RangeIterator const & it, int & flatIndex) { 55 | flatIndex *= it.owner.template getRangeMax() - it.owner.template getRangeMin(); 56 | flatIndex += it.index[i] - it.owner.template getRangeMin(); 57 | return false; 58 | } 59 | }; 60 | //converts index to int 61 | int flatten() const { 62 | int flatIndex = 0; 63 | Common::ForLoop::exec(*this, flatIndex); 64 | return flatIndex; 65 | } 66 | 67 | template 68 | struct UnFlattenLoop { 69 | static bool exec(RangeIterator & it, int & flatIndex) { 70 | int s = it.owner.template getRangeMax() - it.owner.template getRangeMin(); 71 | int n = i == rankLast ? flatIndex : flatIndex % s; 72 | it.index[i] = n + it.owner.template getRangeMin(); 73 | flatIndex = (flatIndex - n) / s; 74 | return false; 75 | } 76 | }; 77 | //converts int to index 78 | void unflatten(int flatIndex) { 79 | Common::ForLoop::exec(*this, flatIndex); 80 | } 81 | 82 | constexpr bool operator==(RangeIterator const & b) const { return index == b.index; } 83 | constexpr bool operator!=(RangeIterator const & b) const { return index != b.index; } 84 | constexpr RangeIterator & operator+=(int offset) { unflatten(flatten() + offset); return *this; } 85 | constexpr RangeIterator & operator-=(int offset) { unflatten(flatten() - offset); return *this; } 86 | constexpr RangeIterator operator+(int offset) const { return RangeIterator(*this) += offset; } 87 | constexpr RangeIterator operator-(int offset) const { return RangeIterator(*this) -= offset; } 88 | constexpr int operator-(RangeIterator const &i) const { return flatten() - i.flatten(); } 89 | 90 | template 91 | struct Inc { 92 | static constexpr bool exec(RangeIterator & it) { 93 | ++it.index[i]; 94 | if (it.index[i] < it.owner.template getRangeMax()) return true; 95 | if constexpr (i != rankLast) it.index[i] = it.owner.template getRangeMin(); 96 | return false; 97 | } 98 | }; 99 | constexpr void inc() { 100 | #if 1 // works but risks runaway template compiling 101 | Common::ForLoop::exec(*this); 102 | #elif 0 // works but still requires a helper class in global namespace ... still haven't got that constexpr lambda for template arg like I want ... 103 | Common::for_seq< 104 | Common::make_integer_range, 105 | Inc 106 | >(*this); 107 | #endif 108 | } 109 | constexpr RangeIterator & operator++() { inc(); return *this; } 110 | constexpr RangeIterator & operator++(int) { inc(); return *this; } 111 | 112 | constexpr decltype(auto) operator*() const { return owner.getIterValue(index); } 113 | constexpr decltype(auto) operator->() const { return &owner.getIterValue(index); } 114 | 115 | static constexpr RangeIterator begin(Owner & owner) { 116 | return RangeIterator(owner); 117 | } 118 | 119 | static constexpr RangeIterator end(Owner & owner) { 120 | auto i = RangeIterator(owner); 121 | i.index[rankLast] = owner.template getRangeMax(); 122 | return i; 123 | } 124 | 125 | // weird that this works with Common::has_to_ostream_v operator<< ... since nothing else does 126 | std::ostream & to_ostream(std::ostream & o) const { 127 | return o << "Iterator(owner=" << &owner << ", index=" << index << ")"; 128 | } 129 | }; 130 | 131 | // TODO intermediate class with templated getMin and getMax or something 132 | // so I can use RangeObj iteration functionality but without having to store the min and max? 133 | 134 | // iterate over an arbitrary range 135 | template 136 | struct RangeObj { 137 | using This = RangeObj; 138 | static constexpr auto rank = rank_; 139 | using intN = Tensor::intN; 140 | intN min, max; 141 | 142 | constexpr RangeObj(intN min_, intN max_) : min(min_), max(max_) {} 143 | 144 | // implementation for RangeIterator's Owner: 145 | template constexpr int getRangeMin() const { return min[i]; } 146 | template constexpr int getRangeMax() const { return max[i]; } 147 | intN const & getIterValue(intN const & i) const { return i; } 148 | // 149 | 150 | using InnerOrderIterator = RangeIteratorInner; // inc 0 first 151 | using OuterOrderIterator = RangeIteratorOuter; // inc n-1 first 152 | 153 | using iterator = std::conditional_t; 154 | constexpr iterator begin() { return iterator::begin(*this); } 155 | constexpr iterator end() { return iterator::end(*this); } 156 | 157 | using const_iterator = iterator; 158 | constexpr const_iterator begin() const { return const_iterator::begin(*this); } 159 | constexpr const_iterator end() const { return const_iterator::end(*this); } 160 | constexpr const_iterator cbegin() const { return const_iterator::begin(*this); } 161 | constexpr const_iterator cend() const { return const_iterator::end(*this); } 162 | }; 163 | 164 | } 165 | -------------------------------------------------------------------------------- /include/Tensor/AntiSymRef.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | // wrapper of a ref of a type that reads and writes the negative of the value its wrapping 4 | 5 | #include "Tensor/Meta.h" //is_tensor_v, is_instance_v 6 | #include "Common/Meta.h" //is_instance_v 7 | #include //std::reference_wrapper 8 | #include 9 | #include //std::ostream 10 | 11 | namespace Tensor { 12 | 13 | struct Sign { 14 | typedef enum { 15 | POSITIVE, // == 0 // so that how1 ^ how2 produces the sign for AntiSymRef1-of-AntiSymRef2 16 | NEGATIVE, // == 1 17 | ZERO, 18 | } Value; 19 | Value value = POSITIVE; 20 | Sign() {} 21 | Sign(Value const & value_) : value(value_) {} 22 | Sign(Value && value_) : value(value_) {} 23 | Sign & operator=(Value const & value_) { 24 | value = value_; 25 | return *this; 26 | } 27 | bool operator==(Value const & value_) const { return value == value_; } 28 | bool operator!=(Value const & value_) const { return !operator==(value_); } 29 | }; 30 | 31 | inline std::ostream& operator<<(std::ostream & o, Sign const & s) { 32 | return o << "Sign(" << s.value << ")"; 33 | } 34 | 35 | inline Sign operator*(Sign a, Sign b) { 36 | if ((a == Sign::POSITIVE || a == Sign::NEGATIVE) && 37 | (b == Sign::POSITIVE || b == Sign::NEGATIVE)) 38 | { 39 | return (Sign::Value)((int)a.value ^ (int)b.value); 40 | } 41 | return Sign::ZERO; 42 | } 43 | 44 | inline Sign operator!(Sign a) { 45 | return a * Sign::NEGATIVE; 46 | } 47 | 48 | template 49 | struct AntiSymRef { 50 | using This = AntiSymRef; 51 | using Type = T; 52 | 53 | static_assert(!Common::is_instance_v); //never wrap AntiSymRef>, instead just use 1 wrapper, flip 'how' if necessary. 54 | 55 | std::optional> x; 56 | 57 | Sign how = Sign::ZERO; 58 | 59 | AntiSymRef() {} 60 | AntiSymRef(std::reference_wrapper const & x_, Sign how_) : x(x_), how(how_) {} 61 | AntiSymRef(AntiSymRef const & r) : x(r.x), how(r.how) {} 62 | AntiSymRef(AntiSymRef && r) : x(r.x), how(r.how) {} 63 | 64 | This & operator=(T const & y) { 65 | if (how == Sign::POSITIVE) { 66 | (*x).get() = y; 67 | } else if (how == Sign::NEGATIVE) { 68 | (*x).get() = -y; 69 | } else {//if (how == ZERO) { 70 | //ZERO should only be wrapping temp elements outside the antisymmetric matrix 71 | } 72 | return *this; 73 | } 74 | 75 | This & operator=(T && y) { 76 | if (how == Sign::POSITIVE) { 77 | (*x).get() = y; 78 | } else if (how == Sign::NEGATIVE) { 79 | (*x).get() = -y; 80 | } else {//if (how == ZERO) { 81 | // no write 82 | } 83 | return *this; 84 | } 85 | 86 | operator T() const { 87 | if (how == Sign::POSITIVE) { 88 | return (*x).get(); 89 | } else if (how == Sign::NEGATIVE) { 90 | return -(*x).get(); 91 | } else {//if (how == ZERO) { 92 | return {}; 93 | } 94 | } 95 | 96 | template 97 | auto operator()(Args&&... args) const 98 | //requires (std::is_invocable_v) 99 | requires (is_tensor_v) 100 | { 101 | using R = std::decay_t(args)...))>; 102 | if constexpr (Common::is_instance_v) { 103 | using RT = typename R::Type; // TODO nested-most? verified for two-nestings deep, what about 3? 104 | using RRT = std::conditional_t, const RT, RT>; 105 | if (how != Sign::POSITIVE && how != Sign::NEGATIVE) { 106 | return AntiSymRef(); 107 | } else { 108 | R && r = (*x).get()(std::forward(args)...); 109 | if (r.how != Sign::POSITIVE && r.how != Sign::NEGATIVE) { 110 | return AntiSymRef(); 111 | } else { 112 | return AntiSymRef(*r.x, how * r.how); 113 | } 114 | } 115 | } else { 116 | using RRT = std::conditional_t, const R, R>; 117 | if (how != Sign::POSITIVE && how != Sign::NEGATIVE) { 118 | return AntiSymRef(); 119 | } else { 120 | return AntiSymRef(std::ref( 121 | (*x).get()(std::forward(args)...) 122 | ), how); 123 | } 124 | } 125 | } 126 | 127 | // return ref or no? 128 | constexpr This flip() const { 129 | if (how == Sign::POSITIVE || how == Sign::NEGATIVE) { 130 | return AntiSymRef(*x, !how); 131 | } 132 | return *this; 133 | } 134 | 135 | This & operator+=(T const & y) { 136 | if (how == Sign::POSITIVE) { 137 | (*x).get() += y; 138 | } else if (how == Sign::NEGATIVE) { 139 | (*x).get() -= y; 140 | } 141 | return *this; 142 | } 143 | 144 | This & operator-=(T const & y) { 145 | if (how == Sign::POSITIVE) { 146 | (*x).get() -= y; 147 | } else if (how == Sign::NEGATIVE) { 148 | (*x).get() *= y; 149 | } 150 | return *this; 151 | } 152 | 153 | This & operator*=(T const & y) { 154 | if (how == Sign::POSITIVE || how == Sign::NEGATIVE) { 155 | (*x).get() *= y; 156 | } 157 | return *this; 158 | } 159 | 160 | This & operator/=(T const & y) { 161 | if (how == Sign::POSITIVE || how == Sign::NEGATIVE) { 162 | (*x).get() /= y; 163 | } 164 | return *this; 165 | } 166 | 167 | This & operator%=(T const & y) { 168 | if (how == Sign::POSITIVE || how == Sign::NEGATIVE) { 169 | (*x).get() %= y; 170 | } 171 | return *this; 172 | } 173 | 174 | This & operator++() { 175 | if (how == Sign::POSITIVE) { 176 | (*x).get()++; 177 | } else if (how == Sign::NEGATIVE) { 178 | (*x).get()--; 179 | } 180 | return *this; 181 | } 182 | 183 | This & operator++(int) { 184 | if (how == Sign::POSITIVE) { 185 | ++(*x).get(); 186 | } else if (how == Sign::NEGATIVE) { 187 | --(*x).get(); 188 | } 189 | return *this; 190 | } 191 | 192 | This & operator--() { 193 | if (how == Sign::POSITIVE) { 194 | (*x).get()--; 195 | } else if (how == Sign::NEGATIVE) { 196 | (*x).get()++; 197 | } 198 | return *this; 199 | } 200 | 201 | This & operator--(int) { 202 | if (how == Sign::POSITIVE) { 203 | --(*x).get(); 204 | } else if (how == Sign::NEGATIVE) { 205 | ++(*x).get(); 206 | } 207 | return *this; 208 | } 209 | 210 | T operator-() const { 211 | if (how == Sign::POSITIVE) { 212 | return -(*x).get(); 213 | } else if (how == Sign::NEGATIVE) { 214 | return (*x).get(); 215 | } 216 | return 0; 217 | } 218 | }; 219 | 220 | #define TENSOR_ANTISYMREF_OP(op)\ 221 | template\ 222 | decltype(auto) operator op(AntiSymRef const & a, T const & b) {\ 223 | return a.operator T() op b;\ 224 | }\ 225 | template\ 226 | decltype(auto) operator op(T const & a, AntiSymRef const & b) {\ 227 | return a op b.operator T();\ 228 | }\ 229 | template\ 230 | decltype(auto) operator op(AntiSymRef const & a, AntiSymRef const & b) {\ 231 | return a.operator T() op b.operator T();\ 232 | } 233 | 234 | TENSOR_ANTISYMREF_OP(==) 235 | TENSOR_ANTISYMREF_OP(!=) 236 | TENSOR_ANTISYMREF_OP(<) 237 | TENSOR_ANTISYMREF_OP(<=) 238 | TENSOR_ANTISYMREF_OP(>) 239 | TENSOR_ANTISYMREF_OP(>=) 240 | 241 | template 242 | std::ostream & operator<<(std::ostream & o, AntiSymRef const & t) { 243 | return o << t.operator T(); 244 | } 245 | 246 | } 247 | -------------------------------------------------------------------------------- /test/src/TotallyAntisymmetric.cpp: -------------------------------------------------------------------------------- 1 | #include "Test/Test.h" 2 | 3 | void test_TotallyAntisymmetric() { 4 | using float3a3a3 = Tensor::float3a3a3; 5 | static_assert(sizeof(float3a3a3) == sizeof(float)); 6 | 7 | using float3x3x3 = Tensor::tensorr; 8 | 9 | { 10 | auto L = float3a3a3(); 11 | L(0,0,0) = 1; 12 | TEST_EQ(L(0,0,0), 0); 13 | L(0,0,1) = 1; 14 | TEST_EQ(L(0,0,1), 0); 15 | L(0,1,0) = 1; 16 | TEST_EQ(L(0,1,0), 0); 17 | L(1,0,0) = 1; 18 | TEST_EQ(L(1,0,0), 0); 19 | 20 | L(0,1,2) = 1; 21 | TEST_EQ(L(0,1,2), 1); 22 | TEST_EQ(L(1,2,0), 1); 23 | TEST_EQ(L(2,0,1), 1); 24 | TEST_EQ(L(2,1,0), -1); 25 | TEST_EQ(L(1,0,2), -1); 26 | TEST_EQ(L(0,2,1), -1); 27 | } 28 | 29 | { 30 | // do iteration bounds match between rank-3 and asym-3? 31 | auto x = float3x3x3(); 32 | auto a = float3a3a3(); 33 | auto ix = x.begin(); 34 | auto ia = a.begin(); 35 | for (;;) { 36 | TEST_EQ(ix.index, ia.index); 37 | TEST_EQ(*ix, *ia); //mind you *ia will be an AntiSymRef, but the value will evaluate to Scalar(0) 38 | ++ix; 39 | ++ia; 40 | if (ix == x.end()) { 41 | TEST_EQ(ia, a.end()); 42 | break; 43 | } 44 | if (ia == a.end()) throw Common::Exception() << "looks like iterator doesn't end in the right place"; 45 | } 46 | } 47 | 48 | //iterator test inline 49 | { 50 | auto a = float3a3a3(); 51 | auto b = float3x3x3(); 52 | for (auto i = a.begin(); i != a.end(); ++i) { 53 | ECHO(i.index); 54 | Tensor::int3 sortedi = i.index; 55 | auto sign = antisymSortAndCountFlips(sortedi); 56 | ECHO(sortedi); 57 | ECHO(sign); 58 | auto writeIndex = float3a3a3::getLocalWriteForReadIndex(sortedi); 59 | ECHO(writeIndex); 60 | TEST_BOOL( 61 | sign == Tensor::Sign::ZERO || 62 | writeIndex == 0 // requires input to be sorted 63 | ); 64 | TEST_EQ(a(i.index), b(i.index)); 65 | } 66 | } 67 | { 68 | auto a = float3x3x3(); 69 | auto b = float3a3a3(); 70 | for (auto i = a.begin(); i != a.end(); ++i) { 71 | ECHO(i.index); 72 | Tensor::int3 sortedi = i.index; 73 | auto sign = antisymSortAndCountFlips(sortedi); 74 | ECHO(sortedi); 75 | ECHO(sign); 76 | auto writeIndex = float3a3a3::getLocalWriteForReadIndex(sortedi); 77 | ECHO(writeIndex); 78 | TEST_BOOL( 79 | sign == Tensor::Sign::ZERO || 80 | writeIndex == 0 // requires input to be sorted 81 | ); 82 | TEST_EQ(a(i.index), b(i.index)); 83 | } 84 | } 85 | 86 | { 87 | // does = work between two tensor types of matching dims? 88 | TEST_EQ(float3a3a3(), float3x3x3()); // works 89 | TEST_EQ(float3x3x3(), float3a3a3()); // fails 90 | } 91 | 92 | // parity flipping chokes the decltype(auto) 93 | { 94 | // lambda for Levi-Civita permutation tensor 95 | auto f = [](int i, int j, int k) -> float { 96 | return sign(i-j) * sign(j-k) * sign(k-i); 97 | }; 98 | auto t = float3a3a3(f); 99 | verifyAccessRank3(t, f); 100 | verifyAccessRank3(t, f); 101 | operatorScalarTest(t); 102 | } 103 | 104 | { 105 | using float3 = Tensor::float3; 106 | auto L = float3a3a3(1); //Levi-Civita permutation tensor 107 | ECHO(L); 108 | auto x = float3(1,0,0); 109 | ECHO(x); 110 | auto dualx = x * L; 111 | ECHO(dualx); 112 | static_assert(std::is_same_v); 113 | auto y = float3(0,1,0); 114 | auto z = y * dualx; 115 | TEST_EQ(z, float3(0,0,1)); 116 | } 117 | 118 | { 119 | auto n = Tensor::float3(3,-4,8); // 3 floats :: 3-vector 120 | constexpr auto L = Tensor::float3a3a3(1); // 1 real ... value 1 121 | auto dualn = n * L; // 3 floats :: 3x3 antisymmetric matrix 122 | static_assert(std::is_same_v); 123 | ECHO((Tensor::float3x3)dualn); // {{0, 8, 4}, {-8, 0, 3}, {-4, -3, 0}} 124 | // TODO this returns an Accessor. Would be nice if we had ctors that could handle them. 125 | // would be nice if Accessors had other tensor operations (like lenSq()) 126 | #if 0 127 | Tensor::float3 nx = dualn(0); 128 | ECHO(nx); 129 | Tensor::float3 ny = dualn(1); 130 | ECHO(ny); 131 | Tensor::float3 nz = dualn(2); 132 | ECHO(nz); 133 | #else //until then ... you still have to expand the basii before doing operations on it 134 | Tensor::float3x3 dualnm = dualn; 135 | Tensor::float3 nx = dualnm(0); 136 | ECHO(nx); 137 | Tensor::float3 ny = dualnm(1); 138 | ECHO(ny); 139 | Tensor::float3 nz = dualnm(2); 140 | ECHO(nz); 141 | #endif 142 | } 143 | 144 | { 145 | auto n = Tensor::float3(3,-4,8); 146 | auto ns = Tensor::hodgeDual(n); 147 | static_assert(std::is_same_v); 148 | ECHO(ns); 149 | TEST_EQ(ns, n * Tensor::float3a3a3(1)); 150 | TEST_EQ((Tensor::float3x3)ns, (Tensor::float3x3)(n * Tensor::float3a3a3(1))); 151 | } 152 | // rank-2 works 153 | { 154 | Tensor::Index<'i'> i; 155 | Tensor::Index<'j'> j; 156 | Tensor::Index<'k'> k; 157 | Tensor::Index<'l'> l; 158 | auto L2 = Tensor::float2a2(1); 159 | STATIC_ASSERT_EQ(sizeof(L2), sizeof(float)); 160 | auto gkd2_4 = L2.outer(L2); 161 | STATIC_ASSERT_EQ(sizeof(gkd2_4), sizeof(float)); 162 | auto gkd2_4_index = (L2(i,j) * L2(k,l)).assign(i,j,k,l); 163 | TEST_EQ( 164 | (Tensor::tensorr(gkd2_4)), 165 | gkd2_4_index 166 | ); 167 | auto gkd2_2 = gkd2_4.trace<0,2>(); 168 | TEST_EQ(gkd2_2, Tensor::float2i2(1)); 169 | auto gkd2_0 = gkd2_2.trace<0,1>(); 170 | TEST_EQ(gkd2_0, 2); 171 | } 172 | //rank-3 works ... 173 | { 174 | Tensor::Index<'i'> i; 175 | Tensor::Index<'j'> j; 176 | Tensor::Index<'k'> k; 177 | Tensor::Index<'l'> l; 178 | Tensor::Index<'m'> m; 179 | Tensor::Index<'n'> n; 180 | auto L3 = float3a3a3(1); // Levi-Civita tensor 181 | STATIC_ASSERT_EQ(sizeof(L3), sizeof(float)); 182 | ECHO((Tensor::tensorr)L3); // looks correct 183 | // ε^ijk ε_lmn = δ^[ijk]_[lmn] = 6! δ^[i_l δ^j_m δ^k]_n 184 | // this is zero: 185 | auto gkd3_6 = L3.outer(L3); 186 | auto gkd3_6_expand_then_outer = L3.expand().outer(L3.expand()); 187 | STATIC_ASSERT_EQ(sizeof(gkd3_6), sizeof(float)); 188 | ECHO((Tensor::tensorr)gkd3_6); 189 | // this is correct: 190 | auto gkd3_6_index = (L3(i,j,k) * L3(l,m,n)).assign(i,j,k,l,m,n); 191 | //works 192 | TEST_EQ( 193 | gkd3_6_expand_then_outer, 194 | gkd3_6_index 195 | ); 196 | //works... 197 | TEST_EQ( 198 | (Tensor::tensorr(gkd3_6)), 199 | gkd3_6_index 200 | ); 201 | // ε^ijk ε_lmk = δ^ijk_lmk = δ^ij_lm 202 | auto gkd3_4 = gkd3_6.trace<0, 3>(); 203 | static_assert(std::is_same_v>); 204 | STATIC_ASSERT_EQ(sizeof(gkd3_4), 3 * 3 * sizeof(float)); 205 | // ε^ijk ε_ljk = δ^ijk_ljk = 2 δ^i_l 206 | auto gkd3_2 = gkd3_4.trace<0,2>(); 207 | // this would evaluate to 3 x 3 == 9 tho it could be opimized to 3 s 3 == 6 208 | static_assert(std::is_same_v); 209 | STATIC_ASSERT_EQ(sizeof(gkd3_2), 3 * 3 * sizeof(float)); 210 | TEST_EQ(gkd3_2, 2. * Tensor::float3i3(1)); 211 | auto gkd3_0 = gkd3_2.trace<0,1>(); 212 | TEST_EQ(gkd3_0, 6.); 213 | } 214 | } 215 | -------------------------------------------------------------------------------- /test/include/Test/Test.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Tensor/Tensor.h" 4 | #include "Common/Test.h" 5 | 6 | void test_AntiSymRef(); 7 | void test_Vector(); 8 | void test_Quat(); 9 | void test_Identity(); 10 | void test_Matrix(); 11 | void test_Symmetric(); 12 | void test_Antisymmetric(); 13 | void test_TensorRank3(); 14 | void test_TensorRank4(); 15 | void test_TotallySymmetric(); 16 | void test_TotallyAntisymmetric(); 17 | void test_Math(); 18 | void test_Index(); 19 | void test_Derivative(); 20 | void test_Valence(); 21 | 22 | template 23 | T sign (T x) { 24 | return x == T{} ? T{} : (x < T{} ? (T)-1 : (T)1); 25 | }; 26 | 27 | template 28 | void operatorScalarTest(T const & t) { 29 | using S = typename T::Scalar; 30 | constexpr bool sumTypeMatches = std::is_same_v< 31 | T, 32 | typename T::ScalarSumResult 33 | >; 34 | TEST_EQ(t + (S)0, t); 35 | // if the sum types don't match then constructoring a T from the scalar 1 might not give us the same as t + 1 36 | if constexpr (sumTypeMatches) { 37 | TEST_EQ((S)1 + t, t + T((S)1)); 38 | } 39 | TEST_EQ(t + T(), t); 40 | TEST_EQ(t - T(), t); 41 | TEST_EQ((S)0 - t, -t); 42 | TEST_EQ(t - (S)0, t); 43 | if constexpr (sumTypeMatches) { 44 | TEST_EQ(t - (S)1, t - T((S)1)); 45 | } 46 | TEST_EQ(t - t, T()); 47 | TEST_EQ(t - t * (S)2, -t); 48 | TEST_EQ(t - (S)2 * t, -t); 49 | TEST_EQ(t * (S)1, t); 50 | TEST_EQ(t * (S)-1, -t); 51 | TEST_EQ((S)-1 * t, -t); 52 | TEST_EQ(t * (S)2, t + t); 53 | TEST_EQ(t * (S)0, T()); 54 | TEST_EQ(t / (S)1, t); 55 | // hmm, why is this failing 56 | TEST_EQ(t / (S).5, (S)2 * t); 57 | //TEST_EQ(t / t, T((S)1)); // if t(I) == 0 then this gives nan ... so ... 58 | TEST_EQ(t / T((S)1), t); 59 | } 60 | 61 | template 62 | void operatorMatrixTest() { 63 | static_assert(T::rank >= 2); 64 | } 65 | 66 | template 67 | void verifyAccessRank1(T & t, F f) { 68 | for (int i = 0; i < T::template dim<0>; ++i) { 69 | typename T::Scalar x = f(i); 70 | // various [] and (int...) and (intN) 71 | TEST_EQ(t(i), x); 72 | TEST_EQ(t(Tensor::intN<1>(i)), x); 73 | TEST_EQ(t[i], x); 74 | TEST_EQ(t.s[i], x); 75 | if constexpr (!std::is_const_v) { 76 | t(i) = x; 77 | t(Tensor::intN<1>(i)) = x; 78 | t[i] = x; 79 | t.s[i] = x; 80 | } 81 | } 82 | } 83 | 84 | template 85 | void verifyAccessRank2(T & t, F f) { 86 | for (int i = 0; i < T::template dim<0>; ++i) { 87 | for (int j = 0; j < T::template dim<1>; ++j) { 88 | typename T::Scalar x = f(i,j); 89 | TEST_EQ(t(i)(j), x); 90 | TEST_EQ(t(i,j), x); 91 | TEST_EQ(t(Tensor::int2(i,j)), x); 92 | TEST_EQ(t[i](j), x); 93 | TEST_EQ(t(i)[j], x); 94 | TEST_EQ(t[i][j], x); 95 | if constexpr (!std::is_const_v) { 96 | t(i)(j) = x; 97 | t(i,j) = x; 98 | t(Tensor::int2(i,j)) = x; 99 | t[i](j) = x; 100 | t(i)[j] = x; 101 | t[i][j] = x; 102 | } 103 | } 104 | } 105 | } 106 | 107 | template 108 | void verifyAccessRank3(T & t, F f) { 109 | for (int i = 0; i < T::template dim<0>; ++i) { 110 | for (int j = 0; j < T::template dim<1>; ++j) { 111 | for (int k = 0; k < T::template dim<2>; ++k) { 112 | float x = f(i,j,k); 113 | 114 | // for vec interchangeability , do by grouping first then () then [] instead of by () then [] then grouping 115 | //()()() and any possible merged ()'s 116 | TEST_EQ(t(i)(j)(k), x); 117 | TEST_EQ(t[i](j)(k), x); 118 | TEST_EQ(t(i)[j](k), x); 119 | TEST_EQ(t(i)(j)[k], x); 120 | TEST_EQ(t[i][j](k), x); 121 | TEST_EQ(t[i](j)[k], x); 122 | TEST_EQ(t(i)[j][k], x); 123 | TEST_EQ(t[i][j][k], x); 124 | TEST_EQ(t(i,j)(k), x); 125 | TEST_EQ(t(i,j)[k], x); 126 | TEST_EQ(t(Tensor::int2(i,j))(k), x); 127 | TEST_EQ(t(Tensor::int2(i,j))[k], x); 128 | TEST_EQ(t(i)(j,k), x); 129 | TEST_EQ(t[i](j,k), x); 130 | TEST_EQ(t(i)(Tensor::int2(j,k)), x); 131 | TEST_EQ(t[i](Tensor::int2(j,k)), x); 132 | TEST_EQ(t(i,j,k), x); 133 | TEST_EQ(t(Tensor::int3(i,j,k)), x); 134 | 135 | if constexpr (!std::is_const_v) { 136 | t(i)(j)(k) = x; 137 | t[i](j)(k) = x; 138 | t(i)[j](k) = x; 139 | t(i)(j)[k] = x; 140 | t[i][j](k) = x; 141 | t[i](j)[k] = x; 142 | t(i)[j][k] = x; 143 | t[i][j][k] = x; 144 | t(i,j)(k) = x; 145 | t(i,j)[k] = x; 146 | t(Tensor::int2(i,j))(k) = x; 147 | t(Tensor::int2(i,j))[k] = x; 148 | t(i)(j,k) = x; 149 | t[i](j,k) = x; 150 | t(i)(Tensor::int2(j,k)) = x; 151 | t[i](Tensor::int2(j,k)) = x; 152 | t(i,j,k) = x; 153 | t(Tensor::int3(i,j,k)) = x; 154 | } 155 | } 156 | } 157 | } 158 | } 159 | 160 | template 161 | void verifyAccessRank4(T & t, F f){ 162 | for (int i = 0; i < T::template dim<0>; ++i) { 163 | for (int j = 0; j < T::template dim<1>; ++j) { 164 | for (int k = 0; k < T::template dim<2>; ++k) { 165 | for (int l = 0; l < T::template dim<3>; ++l) { 166 | float x = f(i,j,k,l); 167 | TEST_EQ(t(i)(j)(k)(l), x); 168 | TEST_EQ(t[i](j)(k)(l), x); 169 | TEST_EQ(t(i)[j](k)(l), x); 170 | TEST_EQ(t(i)(j)[k](l), x); 171 | TEST_EQ(t(i)(j)(k)[l], x); 172 | TEST_EQ(t[i][j](k)(l), x); 173 | TEST_EQ(t[i](j)[k](l), x); 174 | TEST_EQ(t[i](j)(k)[l], x); 175 | TEST_EQ(t(i)[j][k](l), x); 176 | TEST_EQ(t(i)[j](k)[l], x); 177 | TEST_EQ(t(i)(j)[k][l], x); 178 | TEST_EQ(t[i][j][k](l), x); 179 | TEST_EQ(t[i][j](k)[l], x); 180 | TEST_EQ(t[i](j)[k][l], x); 181 | TEST_EQ(t(i)[j][k][l], x); 182 | TEST_EQ(t[i][j][k][l], x); 183 | 184 | TEST_EQ(t(i,j)(k)(l), x); 185 | TEST_EQ(t(i,j)[k](l), x); 186 | TEST_EQ(t(i,j)(k)[l], x); 187 | TEST_EQ(t(i,j)(k)[l], x); 188 | TEST_EQ(t(i,j)[k][l], x); 189 | TEST_EQ(t(Tensor::int2(i,j))(k)(l), x); 190 | TEST_EQ(t(Tensor::int2(i,j))[k](l), x); 191 | TEST_EQ(t(Tensor::int2(i,j))(k)[l], x); 192 | TEST_EQ(t(Tensor::int2(i,j))(k)[l], x); 193 | TEST_EQ(t(Tensor::int2(i,j))[k][l], x); 194 | 195 | TEST_EQ(t(i,j)(k,l), x); 196 | TEST_EQ(t(Tensor::int2(i,j))(k,l), x); 197 | TEST_EQ(t(i,j)(Tensor::int2(k,l)), x); 198 | TEST_EQ(t(Tensor::int2(i,j))(Tensor::int2(k,l)), x); 199 | 200 | TEST_EQ(t(i)(j,k)(l), x); 201 | TEST_EQ(t[i](j,k)(l), x); 202 | TEST_EQ(t(i)(j,k)[l], x); 203 | TEST_EQ(t[i](j,k)[l], x); 204 | TEST_EQ(t(i)(Tensor::int2(j,k))(l), x); 205 | TEST_EQ(t[i](Tensor::int2(j,k))(l), x); 206 | TEST_EQ(t(i)(Tensor::int2(j,k))[l], x); 207 | TEST_EQ(t[i](Tensor::int2(j,k))[l], x); 208 | 209 | TEST_EQ(t(i)(j)(k,l), x); 210 | TEST_EQ(t[i](j)(k,l), x); 211 | TEST_EQ(t(i)[j](k,l), x); 212 | TEST_EQ(t[i][j](k,l), x); 213 | TEST_EQ(t(i)(j)(Tensor::int2(k,l)), x); 214 | TEST_EQ(t[i](j)(Tensor::int2(k,l)), x); 215 | TEST_EQ(t(i)[j](Tensor::int2(k,l)), x); 216 | TEST_EQ(t[i][j](Tensor::int2(k,l)), x); 217 | 218 | TEST_EQ(t(i)(j,k,l), x); 219 | TEST_EQ(t[i](j,k,l), x); 220 | TEST_EQ(t(i)(Tensor::int3(j,k,l)), x); 221 | TEST_EQ(t[i](Tensor::int3(j,k,l)), x); 222 | 223 | TEST_EQ(t(i,j,k)(l), x); 224 | TEST_EQ(t(Tensor::int3(i,j,k))(l), x); 225 | 226 | TEST_EQ(t(i,j,k,l), x); 227 | TEST_EQ(t(Tensor::int4(i,j,k,l)), x); 228 | } 229 | } 230 | } 231 | } 232 | } 233 | -------------------------------------------------------------------------------- /test/src/Antisymmetric.cpp: -------------------------------------------------------------------------------- 1 | #include "Test/Test.h" 2 | 3 | //#define STORAGE_LOWER //lower-triangular 4 | #define STORAGE_UPPER //upper-triangular 5 | 6 | template 7 | void verifyAccessAntisym(T & t, F f) { 8 | // "field" method access 9 | TEST_EQ(t.x_x(), f(0,0)); 10 | TEST_EQ(t.x_y(), f(0,1)); 11 | TEST_EQ(t.x_z(), f(0,2)); 12 | TEST_EQ(t.y_x(), f(1,0)); 13 | TEST_EQ(t.y_y(), f(1,1)); 14 | TEST_EQ(t.y_z(), f(1,2)); 15 | TEST_EQ(t.z_x(), f(2,0)); 16 | TEST_EQ(t.z_y(), f(2,1)); 17 | TEST_EQ(t.z_z(), f(2,2)); 18 | } 19 | 20 | void test_Antisymmetric() { 21 | #ifdef STORAGE_LOWER // lower-triangular 22 | TEST_EQ(Tensor::float3a3::getLocalReadForWriteIndex(0), Tensor::int2(1,0)); 23 | TEST_EQ(Tensor::float3a3::getLocalReadForWriteIndex(1), Tensor::int2(2,0)); 24 | TEST_EQ(Tensor::float3a3::getLocalReadForWriteIndex(2), Tensor::int2(2,1)); 25 | #endif 26 | #ifdef STORAGE_UPPER // upper-triangular 27 | TEST_EQ(Tensor::float3a3::getLocalReadForWriteIndex(0), Tensor::int2(0,1)); 28 | TEST_EQ(Tensor::float3a3::getLocalReadForWriteIndex(1), Tensor::int2(0,2)); 29 | TEST_EQ(Tensor::float3a3::getLocalReadForWriteIndex(2), Tensor::int2(1,2)); 30 | #endif 31 | 32 | // antisymmetric matrix 33 | /* 34 | [ 0 1 2] 35 | [-1 0 3] 36 | [-2 -3 0] 37 | */ 38 | auto f = [](int i, int j) -> float { return float(sign(j-i)*(i+j)); }; 39 | auto t = Tensor::float3a3{ 40 | /*x_y=*/f(0,1), 41 | /*x_z=*/f(0,2), 42 | /*y_z=*/f(1,2) 43 | }; 44 | #ifdef STORAGE_UPPER 45 | TEST_EQ(t.s[0], 1); 46 | TEST_EQ(t.s[1], 2); 47 | TEST_EQ(t.s[2], 3); 48 | #endif 49 | #ifdef STORAGE_LOWER 50 | TEST_EQ(t.s[0], -1); 51 | TEST_EQ(t.s[1], -2); 52 | TEST_EQ(t.s[2], -3); 53 | #endif 54 | ECHO(t); 55 | TEST_EQ(t(0,0), 0); 56 | TEST_EQ(t(0,1), 1); 57 | TEST_EQ(t(0,2), 2); 58 | TEST_EQ(t(1,0), -1); 59 | TEST_EQ(t(1,1), 0); 60 | TEST_EQ(t(1,2), 3); 61 | TEST_EQ(t(2,0), -2); 62 | TEST_EQ(t(2,1), -3); 63 | TEST_EQ(t(2,2), 0); 64 | for (int i = 0; i < 3; ++i) { 65 | for (int j = 0; j < 3; ++j) { 66 | std::cout << "\t" << t(i,j); 67 | } 68 | std::cout << std::endl; 69 | } 70 | 71 | // (int,int) access 72 | t(0,0) = 1; //cannot write to diagonals 73 | TEST_EQ(t(0,0),0); 74 | t(1,1) = 2; 75 | TEST_EQ(t(1,1),0); 76 | t(2,2) = 3; 77 | TEST_EQ(t(2,2),0); 78 | 79 | auto a = Tensor::float3a3(f); 80 | ECHO(a); 81 | TEST_EQ(a, t); 82 | 83 | //verify ctor from lambda for int2 84 | auto f2 = [](Tensor::int2 ij) -> float { return float(ij.x + ij.y); }; 85 | auto b = Tensor::float3a3(f2); //MS internal compiler error on this line 86 | ECHO(b); 87 | TEST_EQ(b, t); 88 | 89 | verifyAccessAntisym(t, f); 90 | verifyAccessAntisym(t, f); 91 | 92 | verifyAccessRank2(t, f); 93 | verifyAccessRank2(t, f); 94 | 95 | // verify antisymmetric writes work 96 | for (int i = 0; i < t.dim<0>; ++i) { 97 | for (int j = 0; j < t.dim<1>; ++j) { 98 | float k = float(1 + i + j); 99 | t(i,j) = k; 100 | if (i != j) { 101 | TEST_EQ(t(i,j), k); 102 | TEST_EQ(t(j,i), -k); 103 | } else { 104 | TEST_EQ(t(i,j), 0); 105 | } 106 | } 107 | } 108 | 109 | // TODO verify that 'float3a3::ExpandStorage<0> == float3x3' & same with <1> 110 | 111 | // verify assignment to expanded type 112 | // TODO won't work until you get intN dereference in asym 113 | Tensor::float3x3 c = t; 114 | TEST_EQ(c, (Tensor::float3x3{ 115 | {0, -2, -3}, 116 | {2, 0, -4}, 117 | {3, 4, 0} 118 | })); 119 | 120 | //can't do yet until I fix asym access 121 | //operatorScalarTest(t); 122 | operatorMatrixTest(); 123 | 124 | { 125 | Tensor::float3x3 m = Tensor::float3x3{ 126 | {0,1,2}, 127 | {-1,0,3}, 128 | {-2,-3,0} 129 | }; 130 | ECHO(m); 131 | for (int i = 0; i < 3; ++i) { 132 | for (int j = 0; j < 3; ++j) { 133 | std::cout << "\t" << m(i,j); 134 | } 135 | std::cout << std::endl; 136 | } 137 | Tensor::float3a3 as = m; 138 | ECHO(as); 139 | for (int i = 0; i < 3; ++i) { 140 | for (int j = 0; j < 3; ++j) { 141 | std::cout << "\t" << as(i,j); 142 | } 143 | std::cout << std::endl; 144 | } 145 | //does assigning to mat work? 146 | Tensor::float3x3 mas = as; 147 | ECHO(mas); 148 | for (int i = 0; i < 3; ++i) { 149 | for (int j = 0; j < 3; ++j) { 150 | std::cout << "\t" << mas(i,j); 151 | } 152 | std::cout << std::endl; 153 | } 154 | // does equality between asym and assigned mat work? 155 | for (int i = 0; i < 3; ++i) { 156 | for (int j = 0; j < 3; ++j) { 157 | TEST_EQ(as(i,j), m(i,j)); 158 | } 159 | } 160 | TEST_EQ(as,m); 161 | } 162 | 163 | { 164 | Tensor::float3x3 m = { 165 | {1,2,3}, 166 | {4,5,6}, 167 | {7,8,9}, 168 | }; 169 | Tensor::float3a3 a([](int i, int j) -> float { return sign(j-i)*(i+j); }); 170 | 171 | // works with unoptimized matrix-mul 172 | // crashing with the optimized version of matrix-mul 173 | auto ma = m * a; 174 | static_assert(std::is_same_v); 175 | TEST_EQ(ma, (Tensor::float3x3{ 176 | {-8, -8, 8}, 177 | {-17, -14, 23}, 178 | {-26, -20, 38}, 179 | })); 180 | 181 | static_assert(std::is_same_v); 182 | TEST_EQ( .5f * (ma + transpose(ma)), (Tensor::float3x3)makeSym(ma)); 183 | static_assert(std::is_same_v); 184 | TEST_EQ( .5f * (ma - transpose(ma)), (Tensor::float3x3)ma.makeAsym()); 185 | 186 | // TODO outer of antisym and ident is failing ... 187 | auto I = Tensor::ident(1); 188 | auto aOuterI = outer(a, I); 189 | static_assert(std::is_same_v, Tensor::storage_ident<3>>>); 190 | static_assert(std::is_same_v, Tensor::storage_asym<3>>>); 191 | static_assert(sizeof(aOuterI) == sizeof(float) * 3); // expanded storage would be 3^4 = 81 floats, but this is just 3 ... 192 | 193 | auto aTimesI = a * I; 194 | // the matrix-mul will expand the antisymmetric matrix storage to a matrix 195 | static_assert(std::is_same_v); 196 | // crashing 197 | TEST_EQ(aTimesI, a); 198 | } 199 | 200 | { 201 | auto a = Tensor::float3a3(1,2,3); 202 | static_assert(sizeof(a) == 3 * sizeof(float)); 203 | static_assert(a.localCount == 3); 204 | auto ax = a[0]; 205 | //ECHO(ax); // TODO operator<< for Accessor? 206 | // works 207 | auto axt = Tensor::float3(ax); 208 | ECHO(axt); 209 | // but I thought .rank and .dims needed to match? 210 | //ECHO(ax.rank); 211 | //ECHO(ax.dims()); 212 | /* yeah, ax.rank doesn't exist, 213 | so what kind of ctor is being used for the conversion? 214 | oh wait, the Accessor might be usings its containing class' rank and dims for iterator as well 215 | that would mean ... meh 216 | looks like rank-2 Accessors only use 217 | TENSOR_ADD_RANK1_CALL_INDEX_AUX()\ 218 | TENSOR_ADD_INT_VEC_CALL_INDEX()\ 219 | ... and neither uses 'This' 220 | ... then rank-N Accessors use just 221 | TENSOR_ADD_RANK1_CALL_INDEX_AUX()\ 222 | so that means overall 'This' is not used by any Accessor 223 | and that means I can use 'This' within the Accessor to make it more compat with things. 224 | In fact I should move Accessors outside of the classes and just 'using' them in... 225 | ok now they're added 226 | */ 227 | ECHO(ax.rank); 228 | ECHO(ax.dims()); 229 | //weird warnings: 230 | ECHO(ax.lenSq()); 231 | } 232 | { 233 | auto a = Tensor::float3a3(1,2,3); 234 | auto ax = a[0]; 235 | ECHO(ax.lenSq()); 236 | auto ay = a[1]; 237 | ECHO(ay.lenSq()); 238 | auto az = a[2]; 239 | ECHO(az.lenSq()); 240 | } 241 | } 242 | -------------------------------------------------------------------------------- /test/src/Matrix.cpp: -------------------------------------------------------------------------------- 1 | #include "Test/Test.h" 2 | 3 | namespace Test { 4 | using namespace Common; 5 | using namespace Tensor; 6 | using namespace std; 7 | static_assert(tuple_size_v == 2); 8 | static_assert(is_same_v< 9 | tuple_element_t<0, float3x3::InnerPtrTensorTuple>, 10 | float3x3* 11 | >); 12 | static_assert(is_same_v< 13 | tuple_element_t<1, float3x3::InnerPtrTensorTuple>, 14 | float3* 15 | >); 16 | static_assert(tuple_size_v == 3); 17 | static_assert(is_same_v< 18 | tuple_element_t<0, float3x3::InnerPtrTuple>, 19 | float3x3* 20 | >); 21 | static_assert(is_same_v< 22 | tuple_element_t<1, float3x3::InnerPtrTuple>, 23 | float3* 24 | >); 25 | static_assert(is_same_v< 26 | tuple_element_t<2, float3x3::InnerPtrTuple>, 27 | float* 28 | >); 29 | static_assert(float3x3::dimseq::size() == std::tuple_size_v); 30 | static_assert(float3x3::dimseq::size() == float3x3::rank); 31 | STATIC_ASSERT_EQ((seq_get_v<0, float3x3::dimseq>), 3); 32 | STATIC_ASSERT_EQ((seq_get_v<1, float3x3::dimseq>), 3); 33 | } 34 | 35 | void test_Matrix() { 36 | // matrix 37 | 38 | { // 1x1 39 | using float1x1 = Tensor::tensor; 40 | //list ctor 41 | auto x = float1x1{{3}}; 42 | TEST_EQ(x(0,0), 3); 43 | using float1 = Tensor::floatN<1>; 44 | auto x1 = float1(2); 45 | // lambdas work 46 | { 47 | auto x2 = float1x1([&](int, int) -> float { return x1(0); }); 48 | TEST_EQ(x2(0,0), 2); 49 | } 50 | { 51 | auto x2 = float1x1([&](Tensor::int2) -> float { return x1(0); }); 52 | TEST_EQ(x2(0,0), 2); 53 | } 54 | { 55 | auto x2 = float1x1{x1}; 56 | TEST_EQ(x2(0,0), 2); 57 | } 58 | #if 0 59 | { // failing to compile - ambiguous constructor 60 | auto x2 = float1x1(x1); 61 | TEST_EQ(x2(0,0), 2); 62 | } 63 | #endif 64 | } 65 | 66 | { //1x2 67 | // in fact, all list ctors of dim=(1,1,1.., N), for N != 1 are failing to compile 68 | using namespace Tensor; 69 | using float1x2 = tensor; 70 | 71 | auto x1 = float2(3,4); 72 | auto x = float1x2{x1}; 73 | { 74 | ECHO(x); 75 | TEST_EQ(x, float1x2{x1}); 76 | } 77 | { 78 | auto y = float1x2{{3,4}}; 79 | TEST_EQ(x,y); 80 | TEST_EQ(x, (float1x2{{3,4}})); 81 | } 82 | #if 0 83 | { 84 | auto y = float1x2({3,4}); 85 | TEST_EQ(x,y); // errors ... initializes y to {{3,3}} 86 | } 87 | #endif 88 | #if 0 89 | TEST_EQ(x,(float1x2({3,4}))); // errors ... initializes y to {{3,3}} 90 | #endif 91 | #if 0 92 | TEST_EQ(x,(float1x2{3,4})); // should fail to compile? it doesn't - and does scalar ctor 93 | #endif 94 | #if 0 95 | { 96 | auto y = float1x2(x1); // won't compile 97 | ECHO(y); 98 | } 99 | #endif 100 | #if 0 101 | ECHO(float1x2(x1)); // won't compile - ambiguous conversion 102 | #endif 103 | #if 0 104 | ECHO(float1x2(float2(3,4))); // won't compile - ambiguous conversion 105 | #endif 106 | #if 0 107 | auto y = float1x2(float2(3,4)); // won't compile - ambiguous conversion 108 | #endif 109 | } 110 | 111 | { //2x1 112 | using namespace Tensor; 113 | using float1 = floatN<1>; 114 | using float2x1 = tensor; 115 | 116 | ECHO(float2x1(float1(1),float1(2))); 117 | 118 | ECHO(float2x1({1},{2})); 119 | 120 | ECHO((float2x1{{1},{2}})); 121 | 122 | auto a1 = float1(1); 123 | auto a2 = float1(2); 124 | ECHO(float2x1(a1,a2)); 125 | ECHO((float2x1{a1,a2})); 126 | 127 | auto a = float2x1{{1},{2}}; 128 | TEST_EQ(a(0,0), 1); 129 | TEST_EQ(a(1,0), 2); 130 | 131 | auto b = float2x1({1},{2}); 132 | TEST_EQ(a,b); 133 | 134 | auto c = float2x1(float1(1), float1(2)); 135 | TEST_EQ(a,c); 136 | } 137 | 138 | { //2x2 ctors 139 | using namespace Tensor; 140 | 141 | ECHO(float2x2(float2(1,2),float2(3,4))); 142 | 143 | ECHO(float2x2({1,2},{3,4})); 144 | 145 | ECHO((float2x2{{1,2},{3,4}})); 146 | 147 | auto a1 = float2(1,2); 148 | auto a2 = float2(3,4); 149 | ECHO(float2x2(a1,a2)); 150 | ECHO((float2x2{a1,a2})); 151 | 152 | auto a = float2x2{{1,2},{3,4}}; 153 | ECHO(a); 154 | 155 | auto b = float2x2({1,2},{3,4}); 156 | ECHO(b); 157 | 158 | auto c = float2x2(float2(1,2),float2(3,4)); 159 | ECHO(c); 160 | } 161 | 162 | //bracket ctor 163 | Tensor::float3x3 m = { 164 | {1,2,3}, 165 | {4,5,6}, 166 | {7,8,9}, 167 | }; 168 | 169 | //dims and rank. really these are static_assert's, except dims, but it could be, but I'd have to constexpr some things ... 170 | STATIC_ASSERT_EQ(m.rank, 2); 171 | STATIC_ASSERT_EQ((m.dim<0>), 3); 172 | STATIC_ASSERT_EQ((m.dim<1>), 3); 173 | TEST_EQ(m.dims(), Tensor::int2(3,3)); 174 | STATIC_ASSERT_EQ(m.numNestings, 2); 175 | STATIC_ASSERT_EQ((m.count<0>), 3); 176 | STATIC_ASSERT_EQ((m.count<1>), 3); 177 | 178 | // .x .y .z indexing 179 | TEST_EQ(m.x.x, 1); 180 | TEST_EQ(m.x.y, 2); 181 | TEST_EQ(m.x.z, 3); 182 | TEST_EQ(m.y.x, 4); 183 | TEST_EQ(m.y.y, 5); 184 | TEST_EQ(m.y.z, 6); 185 | TEST_EQ(m.z.x, 7); 186 | TEST_EQ(m.z.y, 8); 187 | TEST_EQ(m.z.z, 9); 188 | 189 | // .s0 .s1 .s2 indexing 190 | TEST_EQ(m.s0.s0, 1); 191 | TEST_EQ(m.s0.s1, 2); 192 | TEST_EQ(m.s0.s2, 3); 193 | TEST_EQ(m.s1.s0, 4); 194 | TEST_EQ(m.s1.s1, 5); 195 | TEST_EQ(m.s1.s2, 6); 196 | TEST_EQ(m.s2.s0, 7); 197 | TEST_EQ(m.s2.s1, 8); 198 | TEST_EQ(m.s2.s2, 9); 199 | 200 | // indexing - various [] and (int...) and (intN) 201 | auto f = [](int i, int j) -> float { return 1 + j + 3 * i; }; 202 | verifyAccessRank2(m, f); 203 | verifyAccessRank2(m, f); 204 | 205 | //matrix-specific access , doesn't work for sym or asym 206 | auto verifyAccessMat = [](T & t, F f) { 207 | for (int i = 0; i < T::template dim<0>; ++i) { 208 | for (int j = 0; j < T::template dim<1>; ++j) { 209 | typename T::Scalar e = f(i,j); 210 | TEST_EQ(t.s[i](j), e); 211 | TEST_EQ(t.s[i][j], e); 212 | TEST_EQ(t(i).s[j], e); 213 | TEST_EQ(t[i].s[j], e); 214 | TEST_EQ(t.s[i].s[j], e); 215 | } 216 | } 217 | }; 218 | verifyAccessMat.template operator()(m, f); 219 | verifyAccessMat.template operator()(m, f); 220 | 221 | // scalar ctor 222 | // TODO how do GLSL matrix ctor from scalars work? 223 | // do they initialize to full scalars like vecs do? 224 | // do they initialize to ident times scalar like math do? 225 | TEST_EQ(Tensor::float3x3(3), (Tensor::float3x3{{3,3,3},{3,3,3},{3,3,3}})); 226 | TEST_EQ(Tensor::float3x3(3), (Tensor::float3x3({3,3,3},{3,3,3},{3,3,3}))); 227 | 228 | // lambda constructor 229 | // row-major, sequential in memory: 230 | TEST_EQ(m, Tensor::float3x3([](Tensor::int2 i) -> float { return 1 + i(1) + 3 * i(0); })); 231 | // col-major, sequential in memory: 232 | // why I don't do col-major? because then it's transposed of C construction, and that means m[i][j] == m.s[j].s[i] so your indexes are transposed the storage indexes 233 | //TEST_EQ(m, Tensor::float3x3([](Tensor::int2 i) -> float { return 1 + i(0) + 3 * i(1); })); 234 | 235 | // TODO casting ctor 236 | 237 | // read iterator 238 | { 239 | auto i = m.begin(); 240 | if constexpr (Tensor::int2::useReadIteratorOuter) { 241 | // iterating in memory order for row-major 242 | // also in left-right top-bottom order when read 243 | // but you have to increment the last index first and first index last 244 | // TODO really? 245 | // but lambda init is now m(i,j) == 1 + i(1) + 3 * i(0) ... transposed of typical memory indexing 246 | TEST_EQ(*i, 1); ++i; 247 | TEST_EQ(*i, 2); ++i; 248 | TEST_EQ(*i, 3); ++i; 249 | TEST_EQ(*i, 4); ++i; 250 | TEST_EQ(*i, 5); ++i; 251 | TEST_EQ(*i, 6); ++i; 252 | TEST_EQ(*i, 7); ++i; 253 | TEST_EQ(*i, 8); ++i; 254 | TEST_EQ(*i, 9); ++i; 255 | TEST_EQ(i, m.end()); 256 | } else { 257 | //iterating transpose to memory order for row-major 258 | // - inc first index first, last index last 259 | // but lambda init is now m(i,j) == 1 + i(0) + 3 * i(1) ... typical memory indexing 260 | // I could fulfill both at the same time by making my matrices column-major, like OpenGL does ... tempting ... 261 | TEST_EQ(*i, 1); ++i; 262 | TEST_EQ(*i, 4); ++i; 263 | TEST_EQ(*i, 7); ++i; 264 | TEST_EQ(*i, 2); ++i; 265 | TEST_EQ(*i, 5); ++i; 266 | TEST_EQ(*i, 8); ++i; 267 | TEST_EQ(*i, 3); ++i; 268 | TEST_EQ(*i, 6); ++i; 269 | TEST_EQ(*i, 9); ++i; 270 | TEST_EQ(i, m.end()); 271 | } 272 | } 273 | 274 | // write iterator (should match read iterator except for symmetric members) 275 | 276 | // sub-vectors (row) 277 | TEST_EQ(m[0], Tensor::float3(1,2,3)); 278 | TEST_EQ(m[1], Tensor::float3(4,5,6)); 279 | TEST_EQ(m[2], Tensor::float3(7,8,9)); 280 | 281 | // TODO matrix subset access 282 | 283 | // TODO matrix swizzle 284 | 285 | // operators 286 | operatorScalarTest(m); 287 | } 288 | -------------------------------------------------------------------------------- /include/Tensor/Quat.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Tensor/Vector.h" 4 | #include "Tensor/clamp.h" 5 | #include 6 | 7 | // TODO any vec& member return types, like operator+=, will have to be overloaded 8 | // and it looks like I can't crtp the operator+= members due to vec having unions? 9 | 10 | namespace Tensor { 11 | 12 | template 13 | struct quat; 14 | 15 | template struct is_quat : public std::false_type {}; 16 | template struct is_quat> : public std::true_type {}; 17 | template constexpr bool is_quat_v = is_quat::value; 18 | 19 | template 20 | quat operator*(quat a, quat b); 21 | 22 | template 23 | struct quat : public vec4 { 24 | using Super = vec4; 25 | // disable the is_tensor_v flag for quaternion so tensor-mul doesn't try indexing into it, so that a matrix-of-quats times a matrix-of-quats produces a matrix-of-quats (and not a rank-5 object) 26 | //TENSOR_THIS(quat) 27 | using This = quat; 28 | //static constexpr bool isTensorFlag = true; 29 | static constexpr bool isTensorFlag = false; 30 | static constexpr bool dontUseFieldsOStream = true; 31 | 32 | TENSOR_SET_INNER_LOCALDIM_LOCALRANK(Inner_, 4, 1) 33 | TENSOR_TEMPLATE_T(quat) 34 | TENSOR_HEADER_VECTOR_SPECIFIC() // defines localCount=localDim, matching for vec and quat 35 | TENSOR_HEADER() 36 | 37 | using vec3 = ::Tensor::vec3; 38 | using mat3x3 = ::Tensor::mat3x3; 39 | 40 | // ok here's a dilemma ... 41 | // default ident quat would be useful for rotations 42 | // but for sums-of-quats, and consistency of equating quats with reals, it is useful to default this to zero 43 | // constexpr quat() : Super(0,0,0,1) {} 44 | constexpr quat() : Super(0,0,0,0) {} 45 | 46 | // mathematically, a real is equivalent to a quaternion with only the real component defined ... 47 | constexpr quat(Inner const & w) : Super(0,0,0,w) {} 48 | 49 | constexpr quat(Inner const & x, Inner const & y, Inner const & z, Inner const & w) : Super(x,y,z,w) {} 50 | 51 | //TENSOR_ADD_OPS parts: 52 | // ok I don't just want any tensor constructor ... 53 | //TENSOR_ADD_CTOR_FOR_GENERIC_TENSORS(quat) 54 | // for 3-component vectors I want to initialize s[0] to 0 and fill the rest 55 | // for 4-component vectors I want to copy all across 56 | template 57 | requires (is_tensor_v && T::rank == 1 && T::template dim<0> == 3) 58 | quat(T const & t) : Super(t(0), t(1), t(2), 0) {} 59 | template 60 | requires ((is_tensor_v && T::rank == 1 && T::template dim<0> == 4) || (is_quat_v)) 61 | quat(T const & t) : Super(t(0), t(1), t(2), t(3)) {} 62 | 63 | TENSOR_ADD_LAMBDA_CTOR(quat) 64 | TENSOR_ADD_ITERATOR() 65 | 66 | //conjugate 67 | // same as inverse if the quat is unit length 68 | quat conjugate() const { 69 | return quat(-this->x, -this->y, -this->z, this->w); 70 | } 71 | 72 | //inverse 73 | quat inverse() const { 74 | return conjugate() / this->lenSq(); 75 | } 76 | 77 | //angle-axis, where angle is in radians 78 | quat fromAngleAxis() const { 79 | Inner const c = cos(this->w / 2); 80 | Inner const n = sqrt(this->x * this->x + this->y * this->y + this->z * this->z); 81 | Inner const sn = sin(this->w / 2) / n; 82 | return {sn * this->x, sn * this->y, sn * this->z, c}; 83 | } 84 | 85 | static Inner angleAxisEpsilon; 86 | 87 | quat toAngleAxis() const { 88 | Inner const cosHalfAngle = clamp(this->w, (Inner)-1, (Inner)1); 89 | Inner const halfAngle = acos(cosHalfAngle); 90 | Inner const scale = sin(halfAngle); 91 | if (std::abs(scale) <= angleAxisEpsilon) return quat(0,0,1,0); 92 | return {this->x / scale, this->y / scale, this->z / scale, 2 * halfAngle}; 93 | } 94 | 95 | static quat mul(quat const &q, quat const &r) { 96 | Inner const a = (q.w + q.x) * (r.w + r.x); 97 | Inner const b = (q.z - q.y) * (r.y - r.z); 98 | Inner const c = (q.x - q.w) * (r.y + r.z); 99 | Inner const d = (q.y + q.z) * (r.x - r.w); 100 | Inner const e = (q.x + q.z) * (r.x + r.y); 101 | Inner const f = (q.x - q.z) * (r.x - r.y); 102 | Inner const g = (q.w + q.y) * (r.w - r.z); 103 | Inner const h = (q.w - q.y) * (r.w + r.z); 104 | 105 | return { 106 | a - ( e + f + g + h) / 2, 107 | -c + ( e - f + g - h) / 2, 108 | -d + ( e - f - g + h) / 2, 109 | b + (-e - f + g + h) / 2, 110 | }; 111 | } 112 | 113 | // would like a shorthand name for this instead of 'template subset<3>' ... 114 | // .vec and .vec3 are taken 115 | vec3 & axis() { return Super::template subset<3>(); } 116 | vec3 const & axis() const { return Super::template subset<3>(); } 117 | 118 | vec3 rotate(vec3 const & v) const { 119 | return (*this * quat(v) * conjugate()).axis(); 120 | } 121 | 122 | vec3 xAxis() const { 123 | return { 124 | 1 - 2 * (this->y * this->y + this->z * this->z), 125 | 2 * (this->x * this->y + this->z * this->w), 126 | 2 * (this->x * this->z - this->w * this->y) 127 | }; 128 | } 129 | 130 | vec3 yAxis() const { 131 | return { 132 | 2 * (this->x * this->y - this->w * this->z), 133 | 1 - 2 * (this->x * this->x + this->z * this->z), 134 | 2 * (this->y * this->z + this->w * this->x) 135 | }; 136 | } 137 | 138 | vec3 zAxis() const { 139 | return { 140 | 2 * (this->x * this->z + this->w * this->y), 141 | 2 * (this->y * this->z - this->w * this->x), 142 | 1 - 2 * (this->x * this->x + this->y * this->y) 143 | }; 144 | } 145 | 146 | mat3x3 toMatrix() const { 147 | return mat3x3(xAxis(), yAxis(), zAxis()).transpose(); 148 | } 149 | 150 | quat & operator*=(quat const & o) { 151 | *this = *this * o; 152 | return *this; 153 | } 154 | 155 | quat operator-() const { 156 | return quat(Super::operator-()); 157 | } 158 | }; 159 | 160 | template 161 | T quat::angleAxisEpsilon = (T)1e-4; 162 | 163 | // TODO more math operators, correctly implementing quaternion math (ex: scalar mul, quat inv) 164 | template 165 | quat operator*(quat a, quat b) { 166 | return quat::mul(a,b); 167 | } 168 | 169 | //Q operator*(Q const & a, Q const & b) { return Q(operator*((Q::Super)a,(Q::Super)b)); } 170 | template decltype(auto) operator+(quat const & a, quat const & b) { using S = decltype(A() + B()); return quat(typename quat::Super(a) + typename quat::Super(b)); } 171 | template decltype(auto) operator-(quat const & a, quat const & b) { using S = decltype(A() - B()); return quat(typename quat::Super(a) - typename quat::Super(b)); } 172 | template decltype(auto) operator/(quat const & a, quat const & b) { using S = decltype(A() / B()); return quat(typename quat::Super(a) / typename quat::Super(b)); } 173 | template decltype(auto) operator+(quat const & a, B const & b) { using S = decltype(A() + B()); return quat(typename quat::Super(a) + b); } 174 | template decltype(auto) operator-(quat const & a, B const & b) { using S = decltype(A() - B()); return quat(typename quat::Super(a) - b); } 175 | template decltype(auto) operator*(quat const & a, B const & b) { using S = decltype(A() * B()); return quat(typename quat::Super(a) * b); } 176 | template decltype(auto) operator/(quat const & a, B const & b) { using S = decltype(A() / B()); return quat(typename quat::Super(a) / b); } 177 | template decltype(auto) operator+(A const & a, quat const & b) { using S = decltype(A() + B()); return quat(a + typename quat::Super(b)); } 178 | template decltype(auto) operator-(A const & a, quat const & b) { using S = decltype(A() - B()); return quat(a - typename quat::Super(b)); } 179 | template decltype(auto) operator*(A const & a, quat const & b) { using S = decltype(A() * B()); return quat(a * typename quat::Super(b)); } 180 | template decltype(auto) operator/(A const & a, quat const & b) { using S = decltype(A() / B()); return quat(a / typename quat::Super(b)); } 181 | 182 | template 183 | quat normalize(quat const & q) { 184 | T len = q.length(); 185 | if (len <= quat::angleAxisEpsilon) return quat(); 186 | return (quat)(q / len); 187 | } 188 | 189 | template 190 | std::ostream & operator<<(std::ostream & o, quat const & q) { 191 | char const * seporig = ""; 192 | char const * sep = seporig; 193 | for (int i = 0; i < 4; ++i) { 194 | auto const & qi = q[i]; 195 | if (qi != 0) { 196 | o << sep; 197 | if (qi == -1) { 198 | o << "-"; 199 | } else if (qi != 1) { 200 | o << qi << "*"; 201 | } 202 | o << "e_" << ((i + 1) % 4); // TODO quaternion indexing ... 203 | sep = " + "; 204 | } 205 | } 206 | if (sep == seporig) { 207 | return o << "0"; 208 | } 209 | return o; 210 | } 211 | 212 | 213 | 214 | using quati = quat; // I don't judge 215 | using quatf = quat; 216 | using quatd = quat; 217 | 218 | } 219 | -------------------------------------------------------------------------------- /test/src/TotallySymmetric.cpp: -------------------------------------------------------------------------------- 1 | #include "Test/Test.h" 2 | 3 | // TODO move this to Tensor/Vector.h.h 4 | namespace Tensor { 5 | using float3x3x3 = tensorr; 6 | using float3x3i3 = tensori, storage_ident<3>>; 7 | using float3i3x3 = tensori, storage_vec<3>>; 8 | using float3x3s3 = tensori, storage_sym<3>>; 9 | using float3s3x3 = tensori, storage_vec<3>>; 10 | using float3x3a3 = tensori, storage_asym<3>>; 11 | using float3a3x3 = tensori, storage_vec<3>>; 12 | } 13 | namespace TestTotallySymmetric { 14 | using namespace Tensor; 15 | using namespace std; 16 | 17 | // tensorx creation notation 18 | // test against tensori so I'm sure it's not just testing itself to itself 19 | static_assert(is_same_v, tensori>>); //T_i float# 20 | static_assert(is_same_v, tensori, storage_vec<3>>>); //T_ij float#x# 21 | static_assert(is_same_v, tensori>>); //δ_ij = δ_(ij) float#i# 22 | static_assert(is_same_v, tensori>>); //T_ij = T_(ij) float#s# 23 | static_assert(is_same_v, tensori>>); //T_ij = T_[ij] float#a# 24 | static_assert(is_same_v, tensori, storage_vec<3>, storage_vec<3>>>); //T_ijk float#x#x# 25 | static_assert(is_same_v, tensori, storage_ident<3>>>); //T_ijk = a_i δ_(jk) 26 | static_assert(is_same_v, tensori, storage_sym<3>>>); //T_ijk = T_i(jk) 27 | static_assert(is_same_v, tensori, storage_asym<3>>>); //T_ijk = T_i[jk] 28 | static_assert(is_same_v, tensori, storage_vec<3>>>); //T_ijk = δ_(ij) b_k 29 | static_assert(is_same_v, tensori, storage_vec<3>>>); //T_ijk = T_(ij)k 30 | static_assert(is_same_v, tensori, storage_vec<3>>>); //T_ijk = T_[ij]_k 31 | static_assert(is_same_v, tensori>>); //T_ijk = T_[ijk] float#s#s#, # >= 3 32 | static_assert(is_same_v, tensori>>); //T_ijk = T_[ijk] float#a#a#, # >= 3 33 | static_assert(is_same_v, tensori, storage_vec<3>, storage_vec<3>, storage_vec<3>>>); //T_ijkl 34 | static_assert(is_same_v, tensori, storage_vec<3>, storage_sym<3>>>); //T_ijkl = T_ij(kl) 35 | static_assert(is_same_v, tensori, storage_vec<3>, storage_ident<3>>>); //T_ijkl = a_ij δ_(kl) 36 | static_assert(is_same_v, tensori, storage_vec<3>, storage_asym<3>>>); //T_ijkl = T_ij[kl] 37 | static_assert(is_same_v, tensori, storage_sym<3>, storage_vec<3>>>); //T_ijkl = T_i(jk)l 38 | static_assert(is_same_v, tensori, storage_ident<3>, storage_vec<3>>>); //T_ijkl = a_i δ_(jk) c_l 39 | static_assert(is_same_v, tensori, storage_asym<3>, storage_vec<3>>>); //T_ijkl = T_i[jk]l 40 | static_assert(is_same_v, tensori, storage_vec<3>, storage_vec<3>>>); //T_ijkl = T_(ij)kl 41 | static_assert(is_same_v, tensori, storage_vec<3>, storage_vec<3>>>); //T_ijkl = delta_(ij) b_kl 42 | static_assert(is_same_v, tensori, storage_sym<3>>>); //T_ijkl = T_(ij)(kl) 43 | static_assert(is_same_v, tensori, storage_sym<3>>>); //T_ijkl = delta_(ij) b_(kl) 44 | static_assert(is_same_v, tensori, storage_asym<3>>>); //T_ijkl = T_(ij)[kl] 45 | static_assert(is_same_v, tensori, storage_asym<3>>>); //T_ijkl = delta_(ij) b_[kl] 46 | static_assert(is_same_v, tensori, storage_vec<3>, storage_vec<3>>>); //T_ijkl = T_[ij]kl 47 | static_assert(is_same_v, tensori, storage_sym<3>>>); //T_ijkl = T_[ij](kl) 48 | static_assert(is_same_v, tensori, storage_ident<3>>>); //T_ijkl = a_[ij] delta_(kl) 49 | static_assert(is_same_v, tensori, storage_asym<3>>>); //T_ijkl = T_[ij][kl] 50 | static_assert(is_same_v, tensori, storage_symR<3, 3>>>); //T_ijkl = T_i(jkl) 51 | static_assert(is_same_v, tensori, storage_asymR<3, 3>>>); //T_ijkl = T_i[jkl] 52 | static_assert(is_same_v, tensori, storage_vec<3>>>); //T_ijkl = T_(ijk)l 53 | static_assert(is_same_v, tensori, storage_vec<3>>>); //T_ijkl = T_[ijk]l 54 | static_assert(is_same_v, tensori>>); //T_ijkl = T_(ijkl) float#s#s#s#, # >= 4 55 | static_assert(is_same_v, tensori>>); //T_ijkl = T_[ijkl] float#a#a#a#, # >= 4 56 | 57 | // TODO storage helper? if the user chooses storage_symR rank=1 then use storage_vec, for rank=2 use storage_sym ... 58 | // however doing symR for rank==1 or rank==2 does static_assert / require fail, 59 | // so should I just also let storage_symR fail in the same way? 60 | 61 | //RemoveIndex: 62 | static_assert(is_same_v, float3>); 63 | static_assert(is_same_v, float3a3>); 64 | // TODO get this to work 65 | static_assert(is_same_v, float3x3>); 66 | static_assert(is_same_v, float3a3>); 67 | 68 | 69 | STATIC_ASSERT_EQ((constexpr_isqrt(0)), 0); 70 | STATIC_ASSERT_EQ((constexpr_isqrt(1)), 1); 71 | STATIC_ASSERT_EQ((constexpr_isqrt(2)), 1); 72 | STATIC_ASSERT_EQ((constexpr_isqrt(3)), 1); 73 | STATIC_ASSERT_EQ((constexpr_isqrt(4)), 2); 74 | STATIC_ASSERT_EQ((constexpr_isqrt(5)), 2); 75 | STATIC_ASSERT_EQ((constexpr_isqrt(6)), 2); 76 | STATIC_ASSERT_EQ((constexpr_isqrt(7)), 2); 77 | STATIC_ASSERT_EQ((constexpr_isqrt(8)), 2); 78 | STATIC_ASSERT_EQ((constexpr_isqrt(9)), 3); 79 | 80 | static_assert(constexpr_factorial(0) == 1); 81 | static_assert(constexpr_factorial(1) == 1); 82 | static_assert(constexpr_factorial(2) == 2); 83 | static_assert(constexpr_factorial(3) == 6); 84 | static_assert(constexpr_factorial(4) == 24); 85 | 86 | static_assert(nChooseR(0,0) == 1); 87 | static_assert(nChooseR(1,0) == 1); 88 | static_assert(nChooseR(1,1) == 1); 89 | static_assert(nChooseR(2,0) == 1); 90 | static_assert(nChooseR(2,1) == 2); 91 | static_assert(nChooseR(2,2) == 1); 92 | static_assert(nChooseR(3,0) == 1); 93 | static_assert(nChooseR(3,1) == 3); 94 | static_assert(nChooseR(3,2) == 3); 95 | static_assert(nChooseR(3,3) == 1); 96 | static_assert(nChooseR(4,0) == 1); 97 | static_assert(nChooseR(4,1) == 4); 98 | static_assert(nChooseR(4,2) == 6); 99 | static_assert(nChooseR(4,3) == 4); 100 | static_assert(nChooseR(4,4) == 1); 101 | } 102 | 103 | 104 | 105 | void test_TotallySymmetric() { 106 | /* 107 | unique indexing of 3s3s3: 108 | 000 109 | 001 010 100 110 | 002 020 200 111 | 011 101 110 112 | 012 021 102 120 201 210 113 | 022 202 220 114 | 111 115 | 112 121 211 116 | 122 212 221 117 | 222 118 | */ 119 | using float3s3s3 = Tensor::symR; 120 | static_assert(sizeof(float3s3s3) == sizeof(float) * 10); 121 | { 122 | auto t = float3s3s3(); 123 | 124 | t(0,0,0) = 1; 125 | TEST_EQ(t(0,0,0), 1); 126 | 127 | t(1,0,0) = 2; 128 | TEST_EQ(t(1,0,0), 2); 129 | TEST_EQ(t(0,1,0), 2); 130 | TEST_EQ(t(0,0,1), 2); 131 | 132 | // Iterator needs operator()(intN i) 133 | auto i = t.begin(); 134 | TEST_EQ(*i, 1); 135 | } 136 | 137 | { 138 | auto f = [](int i, int j, int k) -> float { return i + j + k; }; 139 | auto t = float3s3s3(f); 140 | verifyAccessRank3(t, f); 141 | verifyAccessRank3(t, f); 142 | 143 | // operators need Iterator 144 | operatorScalarTest(t); 145 | } 146 | 147 | { 148 | auto f = [](int i, int j, int k) -> float { return i + j + k; }; 149 | auto t = float3s3s3(f); 150 | 151 | auto v = Tensor::float3{3, 6, 9}; 152 | auto m = v * t; 153 | static_assert(std::is_same_v); 154 | } 155 | 156 | // make sure call-through works 157 | { 158 | using namespace Tensor; 159 | // t_ijklmn = t_(ij)(klm)n 160 | auto f = [](int i, int j, int k, int l, int m, int n) -> float { 161 | return (i+j) - (k+l+m) + n; 162 | }; 163 | auto t = tensori, storage_symR<3,3>, storage_vec<3>>(f); 164 | ECHO(t); 165 | ECHO(t(0,0,0,0,0,0)); 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /test/src/Math.cpp: -------------------------------------------------------------------------------- 1 | #include "Test/Test.h" 2 | 3 | namespace MathTest { 4 | using namespace Tensor; 5 | using namespace std; 6 | static_assert(is_same_v())), vec>); 7 | static_assert(is_same_v())), sym>); 8 | static_assert(is_same_v())), symR>); 9 | static_assert(is_same_v())), symR>); 10 | } 11 | 12 | #define TENSOR_TEST_1(\ 13 | funcName,\ 14 | resultType, resultArgs,\ 15 | inputAType, inputAArgs\ 16 | )\ 17 | /* member */\ 18 | /* lvalues */\ 19 | [](inputAType const & a){\ 20 | auto c = a.funcName();\ 21 | static_assert(std::is_same_v);\ 22 | TEST_EQ(c, resultType resultArgs);\ 23 | }(inputAType inputAArgs);\ 24 | /* rvalues */\ 25 | [](inputAType && a){\ 26 | auto c = a.funcName();\ 27 | static_assert(std::is_same_v);\ 28 | TEST_EQ(c, resultType resultArgs);\ 29 | }(inputAType inputAArgs);\ 30 | /* this should be rvalue too right? */\ 31 | {\ 32 | auto c = (inputAType inputAArgs).funcName();\ 33 | static_assert(std::is_same_v);\ 34 | TEST_EQ(c, resultType resultArgs);\ 35 | }\ 36 | /* global */\ 37 | /* lvalues */\ 38 | [](inputAType const & a){\ 39 | auto c = funcName(a);\ 40 | static_assert(std::is_same_v);\ 41 | TEST_EQ(c, resultType resultArgs);\ 42 | }(inputAType inputAArgs);\ 43 | /* rvalues */\ 44 | [](inputAType && a){\ 45 | auto c = funcName(a);\ 46 | static_assert(std::is_same_v);\ 47 | TEST_EQ(c, resultType resultArgs);\ 48 | }(inputAType inputAArgs);\ 49 | /* same? */\ 50 | {\ 51 | auto c = funcName(inputAType inputAArgs);\ 52 | static_assert(std::is_same_v);\ 53 | TEST_EQ(c, resultType resultArgs);\ 54 | } 55 | 56 | #define TENSOR_TEST_2(\ 57 | funcName,\ 58 | resultType, resultArgs,\ 59 | inputAType, inputAArgs,\ 60 | inputBType, inputBArgs\ 61 | )\ 62 | /* member */\ 63 | /* lvalues */\ 64 | [](inputAType const & a, inputBType const & b){\ 65 | auto c = a.funcName(b);\ 66 | static_assert(std::is_same_v);\ 67 | TEST_EQ(c, resultType resultArgs);\ 68 | }(inputAType inputAArgs, inputBType inputBArgs);\ 69 | /* rvalues */\ 70 | [](inputAType && a, inputBType && b){\ 71 | auto c = a.funcName(b);\ 72 | static_assert(std::is_same_v);\ 73 | TEST_EQ(c, resultType resultArgs);\ 74 | }(inputAType inputAArgs, inputBType inputBArgs);\ 75 | /* same? */\ 76 | {\ 77 | auto c = (inputAType inputAArgs).funcName(inputBType inputBArgs);\ 78 | static_assert(std::is_same_v);\ 79 | TEST_EQ(c, resultType resultArgs);\ 80 | }\ 81 | /* global */\ 82 | /* lvalues */\ 83 | [](inputAType const & a, inputBType const & b){\ 84 | auto c = funcName(a,b);\ 85 | static_assert(std::is_same_v);\ 86 | TEST_EQ(c, resultType resultArgs);\ 87 | }(inputAType inputAArgs, inputBType inputBArgs);\ 88 | /* rvalues */\ 89 | [](inputAType && a, inputBType && b){\ 90 | auto c = funcName(a,b);\ 91 | static_assert(std::is_same_v);\ 92 | TEST_EQ(c, resultType resultArgs);\ 93 | }(inputAType inputAArgs, inputBType inputBArgs);\ 94 | /* same? */\ 95 | {\ 96 | auto c = funcName(inputAType inputAArgs, inputBType inputBArgs);\ 97 | static_assert(std::is_same_v);\ 98 | TEST_EQ(c, resultType resultArgs);\ 99 | } 100 | 101 | // testing the tensor math functions, hopefully as lvalues and rvalues, as globals and members ... 102 | void test_Math() { 103 | using namespace Tensor; 104 | 105 | // TODO test dif ranks, test sym vs asym 106 | // for some, test template args 107 | TENSOR_TEST_2(elemMul, float3, (4, 10, 18), float3, (1,2,3), float3, (4,5,6)); 108 | TENSOR_TEST_2(matrixCompMult, float3, (4, 10, 18), float3, (1,2,3), float3, (4,5,6)); 109 | TENSOR_TEST_2(hadamard, float3, (4, 10, 18), float3, (1,2,3), float3, (4,5,6)); 110 | TENSOR_TEST_2(inner, float, (32), float3, (1,2,3), float3, (4,5,6)); 111 | TENSOR_TEST_2(dot, float, (32), float3, (1,2,3), float3, (4,5,6)); 112 | TENSOR_TEST_1(lenSq, float, (14), float3, (1,2,3)); 113 | TENSOR_TEST_1(length, float, (43), float3, (9,18,38)); 114 | TENSOR_TEST_2(distance, float, (43), float3, (2,27,33), float3, (-7,9,-5)); 115 | TENSOR_TEST_1(normalize, float3, (0, .6f, .8f), float3, (0,3,4)); 116 | TENSOR_TEST_2(cross, float3, (0,0,1), float3, (1,0,0), float3, (0,1,0)); 117 | TENSOR_TEST_2(outer, float3x3, ({{0,1,0},{0,0,0},{0,0,0}}), float3, (1,0,0), float3, (0,1,0)); 118 | TENSOR_TEST_2(outerProduct, float3x3, ({{0,1,0},{0,0,0},{0,0,0}}), float3, (1,0,0), float3, (0,1,0)); 119 | // which indexes to transpose 120 | // verify sym tr = sym, asym tr = -asym, symR and asymR too, ident tr = ident 121 | TENSOR_TEST_1(transpose, float3x3, ({{1,4,7},{2,5,8},{3,6,9}}), float3x3, ({{1,2,3},{4,5,6},{7,8,9}})); 122 | // which indexes to contract / trace 123 | TENSOR_TEST_1(contract, float, (15.f), float3x3, ({{1,4,7},{2,5,8},{3,6,9}})); 124 | TENSOR_TEST_1(trace, float, (15.f), float3x3, ({{1,4,7},{2,5,8},{3,6,9}})); 125 | // = which indexes & how many to contract 126 | TENSOR_TEST_1(contractN, float, (15.f), float3x3, ({{1,4,7},{2,5,8},{3,6,9}})); 127 | // = how many indexes to contract 128 | TENSOR_TEST_2(interior, float, (32), float3, (1,2,3), float3, (4,5,6)); 129 | TENSOR_TEST_1(diagonal, float3s3, (float3x3{{1,0,0},{0,2,0},{0,0,3}}), float3, (1,2,3)); 130 | //makeSym 131 | //makeAsym 132 | //wedge 133 | //hodgeDual 134 | //operator* 135 | TENSOR_TEST_1(determinant, float, (1.f), float3x3, ({{1,0,0},{0,1,0},{0,0,1}})); 136 | TENSOR_TEST_1(inverse, float3x3, ({{1,0,0},{0,1,0},{0,0,1}}), float3x3, ({{1,0,0},{0,1,0},{0,0,1}})); 137 | TENSOR_TEST_2(inverse, float3x3, ({{1,0,0},{0,1,0},{0,0,1}}), float3x3, ({{1,0,0},{0,1,0},{0,0,1}}), float, (1.f)); 138 | 139 | static_assert(sizeof(tensori, storage_zero<3>, storage_zero<3>>) == sizeof(float)); 140 | 141 | static_assert(std::is_same_v, storage_zero<3>>>); 142 | static_assert(std::is_same_v, storage_zero<3>, storage_zero<3>>>); 143 | 144 | static_assert(std::is_same_v, storage_zero<3>>>); 145 | static_assert(std::is_same_v, storage_zero<3>, storage_zero<3>>>); 146 | 147 | // does a.dot(b) == a.wedge(b.hodgeDual) ? 148 | // probably not for non-antisymmetric a and b (since a∧✱b will antisymmetrize a and b) 149 | // but will it have a 1/k! factor for k-forms a and b? 150 | // looks like it ... 151 | // 1-form test 152 | { 153 | auto a = float3([](int i) -> float { return i + 1; }); 154 | auto b = float3([](int i) -> float { return 5 - 2*i; }); 155 | ECHO(a); 156 | ECHO(b); 157 | ECHO(a.dot(b)); 158 | ECHO(b.dual().expand()); 159 | ECHO(b.dual().dual().expand()); 160 | ECHO(a.wedge(b.dual()).expand()); 161 | ECHO(a.wedge(b.dual()).dual()); 162 | // for 1 forms, a dot b = a wedge star b 163 | TEST_EQ(a.dot(b), a.wedge(b.dual()).dual()); 164 | } 165 | // 2-form test 166 | { 167 | auto a = float3x3([](int i, int j) -> float { return i + 4 * j; }); 168 | auto b = float3x3([](int i, int j) -> float { return 10 - 3 * i - j; }); 169 | ECHO(a); 170 | ECHO(b); 171 | ECHO(a.dot(b)); 172 | ECHO(a.makeSym().expand()); 173 | ECHO(a.makeSym() + a.makeAsym()); 174 | TEST_EQ(a, a.makeSym() + a.makeAsym()); 175 | ECHO(a.makeAsym().expand()); 176 | ECHO(b.makeAsym().expand()); 177 | ECHO(a.makeAsym().dot(b.makeAsym())); 178 | ECHO(b.dual().expand()); 179 | ECHO(b.dual().dual().expand()); 180 | ECHO(a.wedge(b.dual()).expand()); 181 | ECHO(a.wedge(b.dual()).dual()); 182 | // for 2 forms, asym(a) dot asym(b) = 2 a wedge star b 183 | TEST_EQ( 184 | a.makeAsym().dot(b.makeAsym()), 185 | 2.f * a.wedge(b.dual()).dual() 186 | ); 187 | } 188 | // 3-form test 189 | { 190 | auto a = floatNaR<3,3>(2.f) + tensorr([](int i, int j, int k) -> float { return 1.f + i + 2. * j - 3. * k; }); 191 | auto b = floatNaR<3,3>(5.f) + tensorr([](int i, int j, int k) -> float { return 4.f * i - 2. * j + k + 5.f; }); 192 | ECHO(a.makeAsym()); 193 | ECHO(b.makeAsym()); 194 | TEST_EQ( 195 | a.makeAsym().dot(b.makeAsym()), 196 | //6.f * a.wedge(b.dual()).dual() // b.dual is a float, so a.wedge( b.dual() ) is just mul 197 | // TODO should I make Tensor::wedge(float) fall back to operator*() ? 198 | // also should I make Tensor::dual(float) make a asymR tensor? but what dimension would it be? 199 | 6.f * (a * b.dual()).dual() 200 | ); 201 | } 202 | // what happens to wedges for rank > dim? 203 | { 204 | auto a = float3{1,2,3}; 205 | ECHO(a); 206 | auto b = float3{4,5,6}; 207 | ECHO(b); 208 | auto c = float3{6,7,8}; 209 | ECHO(c); 210 | auto d = float3{9,1,2}; 211 | ECHO(d); 212 | auto ab = a.wedge(b); // a wedge b 213 | ECHO(ab); 214 | auto abc = ab.wedge(c); // should be a volume-form, *1 215 | ECHO(abc); 216 | //auto abcd = abc.wedge(d); //compiler error ... cuz antisymmetric rank > dim. in math this is a zero so... 217 | // I could set this to zero if A::rank + B::rank > A::dim ... but there's multiple dims ... which dim to use? the max maybe? 218 | //ECHO(abcd); 219 | } 220 | // btw what is a 1-form 3-dim wedge 1-form 4-dim anyways? 221 | //compiler error cuz makeAsym needs a square tensor ... 222 | //or should it just grow all vector to the max dim size, forcing it to be square? 223 | //leave that for the end-user 224 | //how about a 'makeSquare()' function that does that? 225 | #if 0 226 | { 227 | auto a = float3{1,2,3}; 228 | ECHO(a); 229 | auto b = float4{3,4,5,6}; 230 | ECHO(b); 231 | auto c = a.wedge(b); 232 | ECHO(c); 233 | } 234 | #endif 235 | } 236 | -------------------------------------------------------------------------------- /include/Tensor/Valence.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include //integer_sequence 4 | #include "Common/Sequence.h" 5 | 6 | /* 7 | Valence is a thin wrapper around tensor<> 8 | except that when indexing it, it produces a valenceIndex thin wrapper around Index 9 | 10 | should I allow valence+non-valence-tensors ? 11 | if so then why not just consider a ValenceWrapper to be a tensor? 12 | and then within each already-existing method I could have a test for "if it's not a valence *OR* make sure the valences match 13 | actualy I can deny valence+nonvalence operator mixing anyways and still use thet treat-valencewrapperas-as-tensors trick 14 | TODO think about this. 15 | 16 | */ 17 | 18 | namespace Tensor { 19 | 20 | template 21 | requires is_tensor_v 22 | struct ValenceWrapper; 23 | 24 | template 25 | concept is_valence_v = T::isValenceFlag; 26 | 27 | //Scalar & ValenceWrapper 28 | template 29 | requires (is_valence_v && !is_valence_v && !is_tensor_v) 30 | auto inner(A const & a, B const & b); 31 | 32 | //ValenceWrapper & Scalar 33 | template 34 | requires (is_valence_v && !is_valence_v && !is_tensor_v) 35 | auto inner(A const & a, B const & b); 36 | 37 | //ValenceWrapper & ValenceWrapper 38 | template 39 | requires ( 40 | is_valence_v && 41 | is_valence_v && 42 | std::is_same_v 43 | ) 44 | auto inner(A const & a, B const & b); 45 | 46 | #define TENSOR_VALENCE_ADD_VECTOR_OP_EQ(op)\ 47 | constexpr This & operator op(This const & b) {\ 48 | t op b.t;\ 49 | return *this;\ 50 | } 51 | 52 | #define TENSOR_VALENCE_ADD_SCALAR_OP_EQ(op)\ 53 | constexpr This & operator op(Scalar const & b) {\ 54 | t op b;\ 55 | return *this;\ 56 | } 57 | 58 | #define TENSOR_VALENCE_ADD_UNARY(op)\ 59 | constexpr This operator op() const {\ 60 | return This(op t);\ 61 | } 62 | 63 | template 64 | requires is_tensor_v 65 | struct ValenceWrapper { 66 | using This = ValenceWrapper; 67 | using Tensor = Tensor_; 68 | static constexpr bool isValenceFlag = true; 69 | using Scalar = typename Tensor::Scalar; 70 | static constexpr int rank = Tensor::rank; 71 | 72 | template 73 | using ReplaceTensor = ValenceWrapper; 74 | 75 | Tensor t; 76 | ValenceWrapper() {} 77 | ValenceWrapper(Tensor const & t_) : t(t_) {} 78 | ValenceWrapper(Tensor && t_) : t(t_) {} 79 | 80 | using valseq = std::integer_sequence; 81 | static_assert(sizeof...(Vs) == Tensor::rank); 82 | 83 | template 84 | static constexpr char val = Common::seq_get_v; 85 | 86 | decltype(auto) operator*() { return t; } 87 | decltype(auto) operator*() const { return t; } 88 | decltype(auto) operator->() { return t; } 89 | decltype(auto) operator->() const { return t; } 90 | 91 | TENSOR_VALENCE_ADD_VECTOR_OP_EQ(+=) 92 | TENSOR_VALENCE_ADD_VECTOR_OP_EQ(-=) 93 | TENSOR_VALENCE_ADD_VECTOR_OP_EQ(*=) 94 | TENSOR_VALENCE_ADD_VECTOR_OP_EQ(/=) 95 | TENSOR_VALENCE_ADD_VECTOR_OP_EQ(<<=) 96 | TENSOR_VALENCE_ADD_VECTOR_OP_EQ(>>=) 97 | TENSOR_VALENCE_ADD_VECTOR_OP_EQ(&=) 98 | TENSOR_VALENCE_ADD_VECTOR_OP_EQ(|=) 99 | TENSOR_VALENCE_ADD_VECTOR_OP_EQ(^=) 100 | TENSOR_VALENCE_ADD_VECTOR_OP_EQ(%=) 101 | TENSOR_VALENCE_ADD_SCALAR_OP_EQ(+=) 102 | TENSOR_VALENCE_ADD_SCALAR_OP_EQ(-=) 103 | TENSOR_VALENCE_ADD_SCALAR_OP_EQ(*=) 104 | TENSOR_VALENCE_ADD_SCALAR_OP_EQ(/=) 105 | TENSOR_VALENCE_ADD_SCALAR_OP_EQ(<<=) 106 | TENSOR_VALENCE_ADD_SCALAR_OP_EQ(>>=) 107 | TENSOR_VALENCE_ADD_SCALAR_OP_EQ(&=) 108 | TENSOR_VALENCE_ADD_SCALAR_OP_EQ(|=) 109 | TENSOR_VALENCE_ADD_SCALAR_OP_EQ(^=) 110 | TENSOR_VALENCE_ADD_SCALAR_OP_EQ(%=) 111 | TENSOR_VALENCE_ADD_UNARY(-) 112 | TENSOR_VALENCE_ADD_UNARY(~) 113 | 114 | //TENSOR_ADD_VECTOR_CALL_INDEX_PRIMARY() 115 | template 116 | requires std::is_integral_v 117 | constexpr decltype(auto) operator()(Int const i) { 118 | return valenceSeq>(t(i)); 119 | } 120 | template\ 121 | requires std::is_integral_v\ 122 | constexpr decltype(auto) operator()(Int const i) const { 123 | return valenceSeq>(t(i)); 124 | } 125 | 126 | //operator[](Int), operator()(Int...) fwd to operator()(Int)(...) 127 | TENSOR_ADD_RANK1_CALL_INDEX_AUX() 128 | //operator()(vec) fwd to operator()(Int...) 129 | TENSOR_ADD_INT_VEC_CALL_INDEX() 130 | 131 | // TODO Index based operator() and a whole set of wrappers of Index expression operators... 132 | 133 | 134 | //TENSOR_ADD_MATH_MEMBER_FUNCS() 135 | // TODO member method forwarding ...? 136 | // or just rely on * -> operators? 137 | // nahhh, because those won't check and apply valence wrappers. 138 | 139 | // c_I = a_I * b_I, no change in valence 140 | auto elemMul(This const & o) const { 141 | return This(Tensor::elemMul(t, o.t)); 142 | } 143 | auto matrixCompMult(This const & o) const { 144 | return This(Tensor::matrixCompMult(t, o.t)); 145 | } 146 | auto hadamard(This const & o) const { 147 | return This(Tensor::hadamard(t, o.t)); 148 | } 149 | 150 | template 151 | auto inner(B const & o) const { 152 | return Tensor::inner(*this, o); 153 | } 154 | }; 155 | 156 | 157 | template 158 | struct valenceSeqImpl; 159 | template 160 | struct valenceSeqImpl> { 161 | using type = ValenceWrapper; 162 | }; 163 | template 164 | using ValenceWrapperSeq = typename valenceSeqImpl::type; 165 | 166 | // so you don't have to explicitly write the first template arg ... 167 | template 168 | auto valence(auto const & v) { 169 | return ValenceWrapper, vs...>(v); 170 | } 171 | template 172 | auto valence(auto && v) { 173 | return ValenceWrapper, vs...>(v); 174 | } 175 | 176 | template 177 | auto valenceSeq(auto const & v) { 178 | return ValenceWrapperSeq, Seq>(v); 179 | } 180 | template 181 | auto valenceSeq(auto && v) { 182 | return ValenceWrapperSeq, Seq>(v); 183 | } 184 | 185 | template 186 | requires ( 187 | is_valence_v && 188 | is_valence_v && 189 | std::is_same_v // compile-time check that the valence matches? 190 | ) 191 | bool operator==(A const & a, B const & b) { 192 | return a.t == b.t; 193 | } 194 | 195 | template 196 | requires ( 197 | is_valence_v && 198 | is_valence_v && 199 | std::is_same_v // compile-time check that the valence matches? 200 | ) 201 | bool operator!=(A const & a, B const & b) { 202 | return !operator==(a,b); 203 | } 204 | 205 | #define TENSOR_VALENCE_SCALAR_OP(op)\ 206 | template\ 207 | requires (is_valence_v && !is_valence_v && !is_tensor_v)\ 208 | decltype(auto) operator op(A const & a, B const & b) {\ 209 | return valenceSeq(a.t op b);\ 210 | }\ 211 | template\ 212 | requires (!is_valence_v && !is_tensor_v && is_valence_v)\ 213 | decltype(auto) operator op(A const & a, B const & b) {\ 214 | return valenceSeq(a op b.t);\ 215 | } 216 | 217 | TENSOR_VALENCE_SCALAR_OP(+) 218 | TENSOR_VALENCE_SCALAR_OP(-) 219 | TENSOR_VALENCE_SCALAR_OP(*) 220 | TENSOR_VALENCE_SCALAR_OP(/) 221 | 222 | // this is distinct because it needs the require ! ostream 223 | #define TENSOR_VALENCE_SCALAR_SHIFT_OP(op)\ 224 | template\ 225 | requires (\ 226 | is_valence_v &&\ 227 | !is_valence_v && !is_tensor_v &&\ 228 | !std::is_base_of_v>\ 229 | )\ 230 | decltype(auto) operator op(A const & a, B const & b) {\ 231 | return valenceSeq(a.t op b);\ 232 | }\ 233 | \ 234 | template\ 235 | requires (\ 236 | !is_valence_v && !is_tensor_v &&\ 237 | !std::is_base_of_v> &&\ 238 | is_valence_v\ 239 | )\ 240 | decltype(auto) operator op(A const & a, B const & b) {\ 241 | return valenceSeq(a op b.t);\ 242 | } 243 | 244 | #define TENSOR_VALENCE_VALENCE_OP(op)\ 245 | template\ 246 | requires (\ 247 | is_valence_v && is_valence_v &&\ 248 | std::is_same_v\ 249 | )\ 250 | decltype(auto) operator op(A const & a, B const & b) {\ 251 | return valenceSeq(a.t op b.t);\ 252 | } 253 | 254 | TENSOR_VALENCE_VALENCE_OP(+) 255 | TENSOR_VALENCE_VALENCE_OP(-) 256 | TENSOR_VALENCE_VALENCE_OP(/) 257 | 258 | TENSOR_VALENCE_VALENCE_OP(<<) 259 | TENSOR_VALENCE_VALENCE_OP(>>) 260 | TENSOR_VALENCE_VALENCE_OP(&) 261 | TENSOR_VALENCE_VALENCE_OP(|) 262 | TENSOR_VALENCE_VALENCE_OP(^) 263 | TENSOR_VALENCE_VALENCE_OP(%) 264 | 265 | TENSOR_VALENCE_SCALAR_SHIFT_OP(<<) 266 | TENSOR_VALENCE_SCALAR_SHIFT_OP(>>) 267 | TENSOR_VALENCE_SCALAR_OP(&) 268 | TENSOR_VALENCE_SCALAR_OP(|) 269 | TENSOR_VALENCE_SCALAR_OP(^) 270 | TENSOR_VALENCE_SCALAR_OP(%) 271 | 272 | //not yet in tensor: 273 | //TENSOR_UNARY_OP(~) 274 | //TENSOR_UNARY_OP(!) 275 | //TENSOR_TENSOR_OP(&&) 276 | //TENSOR_TENSOR_OP(||) 277 | //TENSOR_TERNARY_OP(?:) 278 | 279 | template 280 | requires is_valence_v && is_valence_v 281 | auto operator*(A const & a, B const & b) { 282 | static_assert(A::template val != B::template val<0>); 283 | return valenceSeq< 284 | Common::seq_cat_t< 285 | char, 286 | Common::seq_pop_back_t, 287 | Common::seq_pop_front_t 288 | > 289 | >(a.t * b.t); 290 | } 291 | 292 | //Scalar & ValenceWrapper 293 | template 294 | requires (is_valence_v && !is_valence_v && !is_tensor_v) 295 | auto inner(A const & a, B const & b) { 296 | return inner(a, b.t); 297 | } 298 | 299 | //ValenceWrapper & Scalar 300 | template 301 | requires (is_valence_v && !is_valence_v && !is_tensor_v) 302 | auto inner(A const & a, B const & b) { 303 | return inner(a.t, b); 304 | } 305 | 306 | //ValenceWrapper & ValenceWrapper 307 | template 308 | requires ( 309 | is_valence_v && 310 | is_valence_v && 311 | std::is_same_v 312 | ) 313 | auto inner(A const & a, B const & b) { 314 | static_assert( 315 | [](std::index_sequence) constexpr -> bool { 316 | return ( 317 | (Common::seq_get_v != Common::seq_get_v) 318 | && ... && (true) 319 | ); 320 | }(std::make_index_sequence{}), 321 | "valence mismatch" 322 | ); 323 | return inner(a.t, b.t); 324 | } 325 | 326 | template 327 | std::ostream & operator<<( 328 | std::ostream & o, 329 | ValenceWrapper const & v 330 | ) { 331 | return o << v.t; 332 | } 333 | 334 | } 335 | -------------------------------------------------------------------------------- /include/Tensor/Inverse.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | //atm Vector.h includes Inverse.h so this is moot: 4 | #include "Tensor/Vector.h.h" 5 | #include "Tensor/Inverse.h.h" 6 | 7 | namespace Tensor { 8 | 9 | // determinant for matrix 10 | // TODO generalize or at least expose Levi-Civita tensor as constexpr 11 | // for implementing cross in higher dimensions, determinant, and whatever else. 12 | 13 | template 14 | T det22elem(T const & a00, T const & a01, T const & a10, T const & a11) { 15 | return a00 * a11 - a01 * a10; 16 | } 17 | 18 | template 19 | typename T::Scalar determinant22(T const & a) { 20 | return det22elem(a(0,0), a(0,1), a(1,0), a(1,1)); 21 | } 22 | 23 | template 24 | typename T::Scalar determinant33(T const & a) { 25 | return a(0,0) * a(1,1) * a(2,2) 26 | + a(0,1) * a(1,2) * a(2,0) 27 | + a(0,2) * a(1,0) * a(2,1) 28 | - a(0,2) * a(1,1) * a(2,0) 29 | - a(0,1) * a(1,0) * a(2,2) 30 | - a(0,0) * a(1,2) * a(2,1); 31 | } 32 | 33 | template 34 | typename M::Scalar determinant44(M const & a) { 35 | using T = typename M::Scalar; 36 | //autogen'd with symmath 37 | T const tmp1 = a(2,2) * a(3,3); 38 | T const tmp2 = a(2,3) * a(3,2); 39 | T const tmp3 = a(2,1) * a(3,3); 40 | T const tmp4 = a(2,3) * a(3,1); 41 | T const tmp5 = a(2,1) * a(3,2); 42 | T const tmp6 = a(2,2) * a(3,1); 43 | T const tmp7 = a(2,0) * a(3,3); 44 | T const tmp8 = a(2,3) * a(3,0); 45 | T const tmp9 = a(2,0) * a(3,2); 46 | T const tmp10 = a(2,2) * a(3,0); 47 | T const tmp11 = a(2,0) * a(3,1); 48 | T const tmp12 = a(2,1) * a(3,0); 49 | return a(0,0) * a(1,1) * tmp1 50 | - a(0,0) * a(1,1) * tmp2 51 | - a(0,0) * a(1,2) * tmp3 52 | + a(0,0) * a(1,2) * tmp4 53 | + a(0,0) * a(1,3) * tmp5 54 | - a(0,0) * a(1,3) * tmp6 55 | - a(0,1) * a(1,0) * tmp1 56 | + a(0,1) * a(1,0) * tmp2 57 | + a(0,1) * a(1,2) * tmp7 58 | - a(0,1) * a(1,2) * tmp8 59 | - a(0,1) * a(1,3) * tmp9 60 | + a(0,1) * a(1,3) * tmp10 61 | + a(0,2) * a(1,0) * tmp3 62 | - a(0,2) * a(1,0) * tmp4 63 | - a(0,2) * a(1,1) * tmp7 64 | + a(0,2) * a(1,1) * tmp8 65 | + a(0,2) * a(1,3) * tmp11 66 | - a(0,2) * a(1,3) * tmp12 67 | - a(0,3) * a(1,0) * tmp5 68 | + a(0,3) * a(1,0) * tmp6 69 | + a(0,3) * a(1,1) * tmp9 70 | - a(0,3) * a(1,1) * tmp10 71 | + a(0,3) * a(1,2) * tmp12 72 | - a(0,3) * a(1,2) * tmp11; 73 | } 74 | 75 | //matrix specializations 76 | 77 | template 78 | T determinant(mat const & a) { 79 | return a(0,0); 80 | } 81 | 82 | template 83 | T determinant(mat2x2 const & a) { 84 | return determinant22(a); 85 | } 86 | 87 | template 88 | T determinant(mat3x3 const & a) { 89 | return determinant33(a); 90 | } 91 | 92 | template 93 | T determinant(mat4x4 const & a) { 94 | return determinant44(a); 95 | } 96 | 97 | // determinant for symmetric 98 | 99 | template 100 | T determinant(sym const & a) { 101 | return a(0,0); 102 | } 103 | 104 | template 105 | T determinant(sym2 const & a) { 106 | return determinant22(a); 107 | } 108 | 109 | template 110 | T determinant(sym3 const & a) { 111 | return determinant33(a); 112 | } 113 | 114 | template 115 | T determinant(sym4 const & a) { 116 | return determinant44(a); 117 | } 118 | 119 | template 120 | typename M::Scalar determinantNN(M const & a) { 121 | using T = typename M::Scalar; 122 | static_assert(M::rank == 2); 123 | static_assert(M::template dim<0> == M::template dim<1>); 124 | constexpr int dim = M::template dim<0>; 125 | T sign = 1; 126 | T sum = {}; 127 | for (int k = 0; k < dim; ++k) { 128 | // TODO ReplaceLocalDim / ReplaceDim to preserve symmetry? 129 | using subM = mat; 130 | subM sub; 131 | for (int i = 0; i < dim-1; ++i) { 132 | for (int j = 0; j < dim-1; ++j) { 133 | sub(i,j) = a(i+1, j + (j>=k)); 134 | } 135 | } 136 | sum += sign * a(0,k) * Tensor::determinant(sub); 137 | sign = -sign; 138 | } 139 | return sum; 140 | } 141 | 142 | //general case 143 | 144 | template 145 | requires(dim>4) 146 | T determinant(mat const & a) { 147 | return determinantNN(a); 148 | } 149 | 150 | template 151 | requires (dim > 4) 152 | T determinant(sym const & a) { 153 | return determinantNN(a); 154 | } 155 | 156 | template 157 | requires is_tensor_v 158 | typename T::Scalar determinant(T const & a) { 159 | return determinantNN(a); 160 | } 161 | 162 | 163 | 164 | // inverse for matrix 165 | // I could write out all specialized functions 166 | // but then i'd have to fwd-declare them also ... 167 | // So I will just write out the specializations as a 2nd function 168 | 169 | 170 | template 171 | mat inverseImpl(mat const & a, T const & det) { 172 | return (T)1 / det; // == 1 / a(0,0); 173 | } 174 | 175 | template 176 | mat2x2 inverseImpl(mat2x2 const & a, T const & det) { 177 | return { 178 | { 179 | a.s1.s1 / det, 180 | -a.s0.s1 / det, 181 | }, 182 | { 183 | -a.s1.s0 / det, 184 | a.s0.s0 / det, 185 | } 186 | }; 187 | } 188 | 189 | template 190 | mat3x3 inverseImpl(mat3x3 const & a, T const & det) { 191 | return { 192 | { 193 | (-a.s1.s2 * a.s2.s1 + a.s1.s1 * a.s2.s2) / det, 194 | ( a.s0.s2 * a.s2.s1 + -a.s0.s1 * a.s2.s2) / det, 195 | (-a.s0.s2 * a.s1.s1 + a.s0.s1 * a.s1.s2) / det, 196 | }, { 197 | ( a.s1.s2 * a.s2.s0 + -a.s1.s0 * a.s2.s2) / det, 198 | (-a.s0.s2 * a.s2.s0 + a.s0.s0 * a.s2.s2) / det, 199 | ( a.s0.s2 * a.s1.s0 + -a.s0.s0 * a.s1.s2) / det, 200 | }, { 201 | (-a.s1.s1 * a.s2.s0 + a.s1.s0 * a.s2.s1) / det, 202 | ( a.s0.s1 * a.s2.s0 + -a.s0.s0 * a.s2.s1) / det, 203 | (-a.s0.s1 * a.s1.s0 + a.s0.s0 * a.s1.s1) / det, 204 | } 205 | }; 206 | } 207 | 208 | //from : https://stackoverflow.com/questions/1148309/inverting-a-4x4-matrix 209 | template 210 | mat4x4 inverseImpl(mat4x4 const & a, T const & det) { 211 | T const a2323 = a.s2.s2 * a.s3.s3 - a.s2.s3 * a.s3.s2; 212 | T const a1323 = a.s2.s1 * a.s3.s3 - a.s2.s3 * a.s3.s1; 213 | T const a1223 = a.s2.s1 * a.s3.s2 - a.s2.s2 * a.s3.s1; 214 | T const a0323 = a.s2.s0 * a.s3.s3 - a.s2.s3 * a.s3.s0; 215 | T const a0223 = a.s2.s0 * a.s3.s2 - a.s2.s2 * a.s3.s0; 216 | T const a0123 = a.s2.s0 * a.s3.s1 - a.s2.s1 * a.s3.s0; 217 | T const a2313 = a.s1.s2 * a.s3.s3 - a.s1.s3 * a.s3.s2; 218 | T const a1313 = a.s1.s1 * a.s3.s3 - a.s1.s3 * a.s3.s1; 219 | T const a1213 = a.s1.s1 * a.s3.s2 - a.s1.s2 * a.s3.s1; 220 | T const a2312 = a.s1.s2 * a.s2.s3 - a.s1.s3 * a.s2.s2; 221 | T const a1312 = a.s1.s1 * a.s2.s3 - a.s1.s3 * a.s2.s1; 222 | T const a1212 = a.s1.s1 * a.s2.s2 - a.s1.s2 * a.s2.s1; 223 | T const a0313 = a.s1.s0 * a.s3.s3 - a.s1.s3 * a.s3.s0; 224 | T const a0213 = a.s1.s0 * a.s3.s2 - a.s1.s2 * a.s3.s0; 225 | T const a0312 = a.s1.s0 * a.s2.s3 - a.s1.s3 * a.s2.s0; 226 | T const a0212 = a.s1.s0 * a.s2.s2 - a.s1.s2 * a.s2.s0; 227 | T const a0113 = a.s1.s0 * a.s3.s1 - a.s1.s1 * a.s3.s0; 228 | T const a0112 = a.s1.s0 * a.s2.s1 - a.s1.s1 * a.s2.s0; 229 | return { 230 | { 231 | (a.s1.s1 * a2323 - a.s1.s2 * a1323 + a.s1.s3 * a1223) / det, 232 | -(a.s0.s1 * a2323 - a.s0.s2 * a1323 + a.s0.s3 * a1223) / det, 233 | (a.s0.s1 * a2313 - a.s0.s2 * a1313 + a.s0.s3 * a1213) / det, 234 | -(a.s0.s1 * a2312 - a.s0.s2 * a1312 + a.s0.s3 * a1212) / det, 235 | }, { 236 | -(a.s1.s0 * a2323 - a.s1.s2 * a0323 + a.s1.s3 * a0223) / det, 237 | (a.s0.s0 * a2323 - a.s0.s2 * a0323 + a.s0.s3 * a0223) / det, 238 | -(a.s0.s0 * a2313 - a.s0.s2 * a0313 + a.s0.s3 * a0213) / det, 239 | (a.s0.s0 * a2312 - a.s0.s2 * a0312 + a.s0.s3 * a0212) / det, 240 | }, { 241 | (a.s1.s0 * a1323 - a.s1.s1 * a0323 + a.s1.s3 * a0123) / det, 242 | -(a.s0.s0 * a1323 - a.s0.s1 * a0323 + a.s0.s3 * a0123) / det, 243 | (a.s0.s0 * a1313 - a.s0.s1 * a0313 + a.s0.s3 * a0113) / det, 244 | -(a.s0.s0 * a1312 - a.s0.s1 * a0312 + a.s0.s3 * a0112) / det, 245 | }, { 246 | -(a.s1.s0 * a1223 - a.s1.s1 * a0223 + a.s1.s2 * a0123) / det, 247 | (a.s0.s0 * a1223 - a.s0.s1 * a0223 + a.s0.s2 * a0123) / det, 248 | -(a.s0.s0 * a1213 - a.s0.s1 * a0213 + a.s0.s2 * a0113) / det, 249 | (a.s0.s0 * a1212 - a.s0.s1 * a0212 + a.s0.s2 * a0112) / det, 250 | } 251 | }; 252 | } 253 | 254 | // inverseImpl for symmetric 255 | // has a different # of written fields so might as well optimize for it 256 | 257 | template 258 | sym inverseImpl(sym const & a, T const & det) { 259 | return (T)1 / det; // == 1 / a.xx; ... which to use? 260 | } 261 | 262 | // TODO sym2 from mat2 263 | template 264 | sym2 inverseImpl(sym2 const & a, T const & det) { 265 | return { 266 | a(1,1) / det, // AInv(0,0) 267 | -a(0,1) / det, // AInv(1,0) 268 | a(0,0) / det, // AInv(1,1) 269 | }; 270 | } 271 | 272 | template 273 | sym3 inverseImpl(sym3 const & a, T const & det) { 274 | return { 275 | det22elem(a(1,1), a(1,2), a(2,1), a(2,2)) / det, // AInv(0,0) 276 | det22elem(a(1,2), a(1,0), a(2,2), a(2,0)) / det, // AInv(1,0) 277 | det22elem(a(0,0), a(0,2), a(2,0), a(2,2)) / det, // AInv(1,1) 278 | det22elem(a(1,0), a(1,1), a(2,0), a(2,1)) / det, // AInv(2,0) 279 | det22elem(a(0,1), a(0,0), a(2,1), a(2,0)) / det, // AInv(2,1) 280 | det22elem(a(0,0), a(0,1), a(1,0), a(1,1)) / det, // AInv(2,2) 281 | }; 282 | } 283 | 284 | //from the 4x4 case above, which is from: https://stackoverflow.com/questions/1148309/inverting-a-4x4-matrix 285 | // hmm after comparing symmetric outputs i'm not seeing the results match, makes me suspicious of the above implemenation ... 286 | template 287 | sym4 inverseImpl(sym4 const & a, T const & det) { 288 | T const a2323 = a.s22 * a.s33 - a.s23 * a.s23; 289 | T const a1323 = a.s12 * a.s33 - a.s23 * a.s13; 290 | T const a1223 = a.s12 * a.s23 - a.s22 * a.s13; 291 | T const a0323 = a.s02 * a.s33 - a.s23 * a.s03; 292 | T const a0223 = a.s02 * a.s23 - a.s22 * a.s03; 293 | //T const a0123 = a.s02 * a.s13 - a.s12 * a.s03; 294 | T const a2313 = a.s12 * a.s33 - a.s13 * a.s23; 295 | T const a1313 = a.s11 * a.s33 - a.s13 * a.s13; 296 | T const a1213 = a.s11 * a.s23 - a.s12 * a.s13; 297 | T const a2312 = a.s12 * a.s23 - a.s13 * a.s22; 298 | T const a1312 = a.s11 * a.s23 - a.s13 * a.s12; 299 | T const a1212 = a.s11 * a.s22 - a.s12 * a.s12; 300 | T const a0313 = a.s01 * a.s33 - a.s13 * a.s03; 301 | T const a0213 = a.s01 * a.s23 - a.s12 * a.s03; 302 | T const a0312 = a.s01 * a.s23 - a.s13 * a.s02; 303 | T const a0212 = a.s01 * a.s22 - a.s12 * a.s02; 304 | T const a0113 = a.s01 * a.s13 - a.s11 * a.s03; 305 | T const a0112 = a.s01 * a.s12 - a.s11 * a.s02; 306 | sym4 result; 307 | result(0,0) = (a.s11 * a2323 - a.s12 * a1323 + a.s13 * a1223) / det; 308 | result(0,1) = -(a.s01 * a2323 - a.s02 * a1323 + a.s03 * a1223) / det; 309 | //result(1,0) = -(a.s01 * a2323 - a.s12 * a0323 + a.s13 * a0223) / det; 310 | result(0,2) = (a.s01 * a2313 - a.s02 * a1313 + a.s03 * a1213) / det; 311 | //result(2,0) = (a.s01 * a1323 - a.s11 * a0323 + a.s13 * a0123) / det; 312 | result(0,3) = -(a.s01 * a2312 - a.s02 * a1312 + a.s03 * a1212) / det; 313 | //result(3,0) = -(a.s01 * a1223 - a.s11 * a0223 + a.s12 * a0123) / det; 314 | result(1,1) = (a.s00 * a2323 - a.s02 * a0323 + a.s03 * a0223) / det; 315 | result(1,2) = -(a.s00 * a2313 - a.s02 * a0313 + a.s03 * a0213) / det; 316 | //result(2,1) = -(a.s00 * a1323 - a.s01 * a0323 + a.s03 * a0123) / det; 317 | result(1,3) = (a.s00 * a2312 - a.s02 * a0312 + a.s03 * a0212) / det; 318 | //result(3,1) = (a.s00 * a1223 - a.s01 * a0223 + a.s02 * a0123) / det; 319 | result(2,2) = (a.s00 * a1313 - a.s01 * a0313 + a.s03 * a0113) / det; 320 | result(2,3) = -(a.s00 * a1312 - a.s01 * a0312 + a.s03 * a0112) / det; 321 | //result(3,2) = -(a.s00 * a1213 - a.s01 * a0213 + a.s02 * a0113) / det; 322 | result(3,3) = (a.s00 * a1212 - a.s01 * a0212 + a.s02 * a0112) / det; 323 | return result; 324 | } 325 | 326 | template 327 | requires is_tensor_v 328 | T inverse(T const & a, typename T::Scalar const & det) { 329 | return inverseImpl(a, det); 330 | } 331 | 332 | // inverse without determinant 333 | 334 | template 335 | requires is_tensor_v 336 | T inverse(T const & a) { 337 | return inverse(a, determinant(a)); 338 | } 339 | 340 | } 341 | -------------------------------------------------------------------------------- /include/Tensor/Vector.h.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | // I have so many templates that I need to forward-declare them before using them... 4 | #include "Tensor/Meta.h" //is_tensor_v 5 | 6 | namespace Tensor { 7 | 8 | 9 | //forward-declare everything 10 | 11 | template 12 | requires (localDim > 0) 13 | struct vec; 14 | 15 | template 16 | requires(localDim > 0) 17 | struct zero; 18 | 19 | template 20 | requires (localDim > 0) 21 | struct ident; 22 | 23 | template 24 | requires (localDim > 0) 25 | struct sym; 26 | 27 | template 28 | requires (localDim > 0) 29 | struct asym; 30 | 31 | template 32 | requires(localDim > 0 && localRank > 2) 33 | struct symR; 34 | 35 | template 36 | requires(localDim > 0 && localRank > 2) 37 | struct asymR; 38 | 39 | 40 | // hmm, I'm trying to use these storage_*'s in combination with is_instance_v::template type> but it's failing, so here they are specialized 41 | template struct is_vec : public std::false_type {}; 42 | template struct is_vec> : public std::true_type {}; 43 | template constexpr bool is_vec_v = is_vec::value; 44 | 45 | template struct is_zero : public std::false_type {}; 46 | template struct is_zero> : public std::true_type {}; 47 | template constexpr bool is_zero_v = is_zero::value; 48 | 49 | template struct is_ident : public std::false_type {}; 50 | template struct is_ident> : public std::true_type {}; 51 | template constexpr bool is_ident_v = is_ident::value; 52 | 53 | template struct is_sym : public std::false_type {}; 54 | template struct is_sym> : public std::true_type {}; 55 | template constexpr bool is_sym_v = is_sym::value; 56 | 57 | template struct is_asym : public std::false_type {}; 58 | template struct is_asym> : public std::true_type {}; 59 | template constexpr bool is_asym_v = is_asym::value; 60 | 61 | template struct is_symR : public std::false_type {}; 62 | template struct is_symR> : public std::true_type {}; 63 | template constexpr bool is_symR_v = is_symR::value; 64 | 65 | template struct is_asymR : public std::false_type {}; 66 | template struct is_asymR> : public std::true_type {}; 67 | template constexpr bool is_asymR_v = is_asymR::value; 68 | 69 | 70 | 71 | // concepts 72 | 73 | template 74 | concept IsBinaryTensorOp = 75 | is_tensor_v 76 | && is_tensor_v; 77 | 78 | template 79 | concept IsSquareTensor = 80 | is_tensor_v 81 | && T::isSquare; 82 | 83 | template 84 | concept IsBinaryTensorOpWithMatchingNeighborDims = 85 | IsBinaryTensorOp 86 | && A::template dim == B::template dim<0>; 87 | 88 | template 89 | concept IsBinaryTensorR3xR3Op = 90 | IsBinaryTensorOp 91 | // can't use vec because it hasn't been declared yet 92 | //&& A::dims == vec(3) 93 | //&& B::dims == vec(3); 94 | && A::rank == 1 && A::template dim<0> == 3 95 | && B::rank == 1 && B::template dim<0> == 3; 96 | 97 | template 98 | concept IsBinaryTensorDiffTypeButMatchingDims = 99 | IsBinaryTensorOp 100 | && !std::is_same_v 101 | && A::dims() == B::dims(); // equal types means we use .operator== which is constexpr 102 | 103 | template 104 | concept IsInteriorOp = 105 | IsBinaryTensorOp && num > 0 && num <= A::rank && num <= B::rank; 106 | // TODO also assert the last 'num' dims of A match the first 'num' dims of B 107 | 108 | template 109 | concept hasMatchingSymAndAsymIndexes = 110 | A::rank >= 2 && 111 | B::rank >= 2 && 112 | [](std::index_sequence) constexpr { 113 | return (( 114 | // and their indexes overlap at all ... 115 | A::template numNestingsToIndex == A::template numNestingsToIndex && 116 | B::template numNestingsToIndex == B::template numNestingsToIndex && 117 | // if any nestings of A and B are asym(R) vs sym(R) or vice versa ... 118 | ( 119 | ( 120 | ( 121 | is_asym_v> || 122 | is_asymR_v> 123 | ) && ( 124 | is_sym_v> || 125 | is_symR_v> || 126 | is_ident_v> 127 | ) 128 | ) || ( 129 | ( 130 | is_asym_v> || 131 | is_asymR_v> 132 | ) && ( 133 | is_sym_v> || 134 | is_symR_v> || 135 | is_ident_v> 136 | ) 137 | ) 138 | ) 139 | ) || ... || (false)); 140 | }(std::make_index_sequence{}); 141 | 142 | 143 | //convention? row-major to match math indexing, easy C inline ctor, so A_ij = A[i][j] 144 | // ... but OpenGL getFloatv(GL_...MATRIX) uses column-major so uploads have to be transposed 145 | // ... also GLSL is column-major so between this and GLSL the indexes have to be transposed. 146 | template using mat = vec, dim1>; 147 | 148 | 149 | // specific-sized templates 150 | template using vec2 = vec; 151 | template using vec3 = vec; 152 | template using vec4 = vec; 153 | template using mat2x2 = vec2>; 154 | template using mat2x3 = vec2>; 155 | template using mat2x4 = vec2>; 156 | template using mat3x2 = vec3>; 157 | template using mat3x3 = vec3>; 158 | template using mat3x4 = vec3>; 159 | template using mat4x2 = vec4>; 160 | template using mat4x3 = vec4>; 161 | template using mat4x4 = vec4>; 162 | template using sym2 = sym; 163 | template using sym3 = sym; 164 | template using sym4 = sym; 165 | template using asym2 = asym; 166 | template using asym3 = asym; 167 | template using asym4 = asym; 168 | 169 | 170 | // dense vec-of-vec 171 | 172 | // some template metaprogram helpers 173 | // needed for the math function 174 | // including operators, esp * 175 | 176 | // tensori helpers: 177 | // tensor, storage_vec, ..., storage_vec> 178 | // use storage_sym<> storage_asym<> for injecting storage optimization 179 | // tensor, ..., dimN> 180 | 181 | template 182 | struct storage_vec { 183 | template 184 | using type = vec; 185 | // so (hopefully) storage_vec == vec 186 | }; 187 | 188 | template 189 | struct storage_zero { 190 | template 191 | using type = zero; 192 | }; 193 | 194 | template 195 | struct storage_sym { 196 | template 197 | using type = sym; 198 | }; 199 | 200 | template 201 | struct storage_asym { 202 | template 203 | using type = asym; 204 | }; 205 | 206 | template 207 | struct storage_ident { 208 | template 209 | using type = ident; 210 | }; 211 | 212 | template 213 | struct storage_symR { 214 | template 215 | using type = symR; 216 | }; 217 | 218 | template 219 | struct storage_asymR { 220 | template 221 | using type = asymR; 222 | }; 223 | 224 | 225 | // can I shorthand this? what is the syntax? 226 | // this has a template and not a type on the lhs so I think no? 227 | //template using vecI = storage_vec::type; 228 | //template using symI = storage_sym::type; 229 | //template using asymI = storage_asym::type; 230 | 231 | // useful helper macros, same as above but with transposed order 232 | 233 | // tensori: 234 | // tensor which allows custom nested storage, such as symmetric indexes 235 | 236 | template 237 | struct tensori_impl; 238 | template 239 | struct tensori_impl { 240 | using type = typename Storage::template type::type>; 241 | }; 242 | template 243 | struct tensori_impl { 244 | using type = typename Storage::template type; 245 | }; 246 | template 247 | struct tensori_impl { 248 | using type = Scalar; 249 | }; 250 | template 251 | using tensori = typename tensori_impl::type; 252 | 253 | // type of a tensor with specific rank and dimension (for all indexes) 254 | // used by some vec members 255 | 256 | template 257 | struct tensorr_impl { 258 | using type = vec::type, dim>; 259 | }; 260 | template 261 | struct tensorr_impl { 262 | using type = Scalar; 263 | }; 264 | template 265 | using tensorr = typename tensorr_impl::type; 266 | 267 | // this is a useful enough one 268 | 269 | template 270 | using tensorScalarTuple = Common::tuple_apply_t, StorageTuple>>; 271 | 272 | // make a tensor from a list of dimensions 273 | // ex: tensor 274 | // fully expanded storage - no spatial optimizations 275 | // TODO can I accept template args as int or Index? 276 | // maybe vararg function return type and decltype()? 277 | 278 | template 279 | struct tensor_impl { 280 | using type = vec::type, dim>; 281 | }; 282 | template 283 | struct tensor_impl { 284 | using type = vec; 285 | }; 286 | template 287 | using tensor = typename tensor_impl::type; 288 | 289 | // useful helper for tensor: 290 | 291 | template 292 | struct tensorScalarSeqImpl; 293 | template 294 | struct tensorScalarSeqImpl> { 295 | using type = tensor; 296 | }; 297 | template 298 | using tensorScalarSeq = typename tensorScalarSeqImpl::type; 299 | 300 | /* 301 | ok maybe this is a bad idea .. 302 | tensorx< type, dim1, dim2, ... , storage char, [storage args] > 303 | where storage args are: 304 | -'z', dim = rank-1 zero index 305 | -'i', dim = rank-2 identity index 306 | -'s', dim = rank-2 symmetric 307 | -'a', dim = rank-2 antisymmetric 308 | -'S', dim, rank = rank-N symmetric 309 | -'A', dim, rank = rank-N antisymmetric 310 | TODO rename tensor into tensorx and rename tensorx to tensor ... so tensorx is for float3x3 float4x4x4 float2x3x4 etc, hence the 'x' 311 | */ 312 | 313 | template 314 | struct tensorx_impl; 315 | template 316 | struct tensorx_impl { 317 | using type = vec::type, dim>; 318 | }; 319 | template 320 | struct tensorx_impl { 321 | using type = zero::type, dim>; 322 | }; 323 | template 324 | struct tensorx_impl { 325 | using type = ident::type, dim>; 326 | }; 327 | template 328 | struct tensorx_impl { 329 | using type = sym::type, dim>; 330 | }; 331 | template 332 | struct tensorx_impl { 333 | using type = asym::type, dim>; 334 | }; 335 | template 336 | struct tensorx_impl { 337 | using type = symR::type, dim, rank>; 338 | }; 339 | template 340 | struct tensorx_impl { 341 | using type = asymR::type, dim, rank>; 342 | }; 343 | template 344 | struct tensorx_impl { 345 | using type = Scalar; 346 | }; 347 | template 348 | using tensorx = typename tensorx_impl::type; 349 | 350 | } 351 | -------------------------------------------------------------------------------- /test/src/Index.cpp: -------------------------------------------------------------------------------- 1 | #include "Tensor/Tensor.h" 2 | #include "Tensor/Derivative.h" 3 | #include "Common/Test.h" 4 | #include 5 | 6 | namespace TupleTests { 7 | using namespace std; 8 | using namespace Common; 9 | using namespace Tensor; 10 | 11 | using I = Index<'i'>; 12 | using J = Index<'j'>; 13 | I i; 14 | J j; 15 | 16 | static_assert(is_same_v< 17 | GatherIndexesImpl< 18 | decltype(float3()(i))::IndexTuple, 19 | make_integer_sequence 20 | >::indexes, 21 | tuple 22 | >); 23 | 24 | static_assert(is_same_v< 25 | GatherIndexesImpl< 26 | decltype(float3()(i))::IndexTuple, 27 | make_integer_sequence 28 | >::type, 29 | tuple< 30 | pair< 31 | I, 32 | integer_sequence 33 | > 34 | > 35 | >); 36 | 37 | static_assert(is_same_v< 38 | GatherIndexesImpl< 39 | decltype(float3x3()(i,j))::IndexTuple, 40 | make_integer_sequence 41 | >::indexes, 42 | tuple 43 | >); 44 | 45 | static_assert(is_same_v< 46 | decltype(float3x3()(i,j))::Details::GatheredIndexes, 47 | tuple< 48 | pair< 49 | I, 50 | integer_sequence 51 | >, 52 | pair< 53 | J, 54 | integer_sequence 55 | > 56 | > 57 | >); 58 | 59 | namespace test1 { 60 | using IndexTuple = std::tuple; 61 | using GatheredIndexes = Tensor::GatherIndexes; 62 | using GetAssignVsSumGatheredLocs = Common::tuple_get_filtered_indexes_t; 63 | using SumIndexSeq = GetIndexLocsFromGatherResult; 64 | using AssignIndexSeq = GetIndexLocsFromGatherResult; 65 | static_assert(SumIndexSeq::size() == 0); 66 | static_assert(std::is_same_v>); 67 | } 68 | namespace test2 { 69 | using IndexTuple = std::tuple; 70 | using GatheredIndexes = Tensor::GatherIndexes; 71 | using GetAssignVsSumGatheredLocs = Common::tuple_get_filtered_indexes_t; 72 | using SumIndexSeq = GetIndexLocsFromGatherResult; 73 | using AssignIndexSeq = GetIndexLocsFromGatherResult; 74 | static_assert(SumIndexSeq::size() == 0); 75 | static_assert(std::is_same_v>); 76 | } 77 | namespace test3 { 78 | using IndexTuple = std::tuple; 79 | static_assert(Common::tuple_find_v> == -1); 80 | static_assert(Common::tuple_find_v> == 0); 81 | static_assert(Common::tuple_find_v> == 0); 82 | // GatheredIndexes == GatheredIndexesImpl::type so ... 83 | static_assert(std::is_same_v< 84 | typename Tensor::GatherIndexesImpl>::Next::Next::type, 85 | std::tuple<> 86 | >); 87 | static_assert(std::is_same_v< 88 | typename Tensor::GatherIndexesImpl>::Next::Next::indexes, 89 | std::tuple<> 90 | >); 91 | static_assert(-1 == Common::tuple_find_v>::Next::Next::indexes>); 92 | static_assert(std::is_same_v< 93 | typename Tensor::GatherIndexesImpl>::Next::type, 94 | std::tuple< 95 | std::pair< 96 | I, 97 | std::integer_sequence 98 | > 99 | > 100 | >); 101 | static_assert(std::is_same_v< 102 | typename Tensor::GatherIndexesImpl>::type, 103 | std::tuple< 104 | std::pair< 105 | I, 106 | std::integer_sequence 107 | > 108 | > 109 | >); 110 | static_assert(std::is_same_v>::indexes, std::tuple>); 111 | static_assert(std::is_same_v>::Next::indexes, std::tuple>); 112 | static_assert(std::is_same_v>::Next::Next::indexes, std::tuple<>>); 113 | using GatheredIndexes = Tensor::GatherIndexes; 114 | static_assert(std::is_same_v< 115 | GatheredIndexes, 116 | std::tuple< 117 | std::pair< 118 | I, 119 | std::integer_sequence 120 | > 121 | > 122 | >); 123 | using GetAssignVsSumGatheredLocs = Common::tuple_get_filtered_indexes_t< 124 | GatheredIndexes, 125 | HasMoreThanOneIndex 126 | >; 127 | using SumIndexSeq = GetIndexLocsFromGatherResult; 128 | using AssignIndexSeq = GetIndexLocsFromGatherResult; 129 | static_assert(AssignIndexSeq::size() == 0); 130 | static_assert(std::is_same_v< 131 | SumIndexSeq, 132 | std::integer_sequence 133 | >); 134 | } 135 | } 136 | 137 | void test_Index() { 138 | //index assignment 139 | { 140 | auto a = Tensor::double3(1); 141 | auto b = Tensor::double3(2); 142 | 143 | TEST_EQ(a.rank, 1); 144 | TEST_EQ(b.rank, 1); 145 | 146 | TEST_EQ(a, (Tensor::double3(1))); 147 | TEST_EQ(b, (Tensor::double3(2))); 148 | 149 | Tensor::Index<'i'> i; 150 | a(i) = b(i); 151 | 152 | TEST_EQ(a, (Tensor::double3(2))); 153 | } 154 | 155 | { 156 | //make sure 2D swizzling works 157 | Tensor::Index<'i'> i; 158 | Tensor::Index<'j'> j; 159 | Tensor::double3x3 m; 160 | m(1,0) = 1; 161 | ECHO(m); 162 | m(i,j) = m(j,i); 163 | ECHO(m); 164 | TEST_EQ(m(0, 1), 1); 165 | } 166 | 167 | { 168 | //make sure 3D swizzling works 169 | //this verifies the mapping between indexes in tensor assignment (since the 2D case is always a cycle of at most period 2, i.e. its own inverse) 170 | Tensor::Index<'i'> i; 171 | Tensor::Index<'j'> j; 172 | Tensor::Index<'k'> k; 173 | Tensor::tensor s; 174 | s(0,1,0) = 1; 175 | ECHO(s); 176 | s(i,j,k) = s(j,k,i); //s(0,0,1) = s(0,1,0) 177 | TEST_EQ(s(0,0,1), 1); 178 | ECHO(s); 179 | } 180 | 181 | { 182 | //arithemetic operations 183 | Tensor::Index<'i'> i; 184 | 185 | Tensor::double3 a = {1,2,3}; 186 | Tensor::double3 b = {5,7,11}; 187 | 188 | Tensor::double3 c; 189 | c(i) = a(i) + b(i); 190 | TEST_EQ(c, (Tensor::double3(6,9,14))); 191 | } 192 | 193 | { 194 | Tensor::Index<'i'> i; 195 | Tensor::Index<'j'> j; 196 | Tensor::double3x3 a = {{1,2,3},{4,5,6},{7,8,9}}; 197 | 198 | // transpose 199 | a(i,j) = a(j,i); 200 | TEST_EQ(a, (Tensor::double3x3{{1,4,7},{2,5,8},{3,6,9}})); 201 | 202 | // add to transpose and self-assign 203 | a(i,j) = a(i,j) + a(j,i); 204 | TEST_EQ(a, (Tensor::double3x3{{2,6,10},{6,10,14},{10,14,18}})); 205 | } 206 | { 207 | Tensor::Index<'i'> i; 208 | Tensor::Index<'j'> j; 209 | Tensor::double3x3 a = {{1,2,3},{4,5,6},{7,8,9}}; 210 | 211 | // symmetrize using index notation 212 | Tensor::double3x3 b; 213 | b(i,j) = .5 * (a(i,j) + a(j,i)); 214 | TEST_EQ(b, makeSym(a)); 215 | // explicitly-specified storage 216 | auto c = (.5 * (a(i,j) - a(j,i))).assignR(i,j); 217 | static_assert(std::is_same_v); 218 | TEST_EQ(c, makeAsym(a)); 219 | } 220 | { 221 | Tensor::Index<'i'> i; 222 | Tensor::Index<'j'> j; 223 | Tensor::Index<'k'> k; 224 | //assignR 225 | // TODO put compile-fail tests in their own cpp file and have a script assert the compiler fails 226 | #if 0 // ASSERT_FAILURE ranks of 'a' and index call-operator don't match so static-assert failure 227 | { 228 | Tensor::float3x3 a; 229 | auto c = a(i); 230 | } 231 | #endif 232 | #if 0 // ASSERT_FAILURE compile fail because a(j,i) rank doesn't match assign(i,j,k) rank 233 | { 234 | Tensor::float3x3 a; 235 | Tensor::tensor d; 236 | d = a(j,i).assign(i,j,k); 237 | } 238 | #endif 239 | #if 0 // ASSERT_FAILURE compile fail because assign(i,j) rank doesn't match d(i,j,k) rank 240 | { 241 | Tensor::float3x3 a; 242 | Tensor::tensor d; 243 | d = a(j,i).assign(i,j); 244 | } 245 | #endif 246 | { 247 | Tensor::float3x3 a; 248 | auto c = a(i,j).assignR(i,j); 249 | static_assert(std::is_same_v); 250 | } 251 | { 252 | Tensor::float3x3 a; 253 | auto c = a(i,j).assignR(j,i); 254 | static_assert(std::is_same_v); 255 | } 256 | { 257 | Tensor::float2x3 a; 258 | auto c = a(i,j).assignR(i,j); 259 | static_assert(std::is_same_v); 260 | } 261 | { 262 | Tensor::float2x3 a; 263 | auto c = a(i,j).assignR(j,i); 264 | static_assert(std::is_same_v); 265 | } 266 | #if 0 // ASSERT_FAILURE dims don't match, should compile-fail 267 | // mind you under clang this does make a compile-fail, but doesn't point to this line, even tho if0'ing it out makes compile succeed. 268 | { 269 | Tensor::float2x3 a; 270 | auto c = a(i,j).assignR(j,i); 271 | ECHO(c); 272 | } 273 | #endif 274 | #if 0 // ASSERT_FAILURE dims don't match, should compile-fail 275 | { 276 | Tensor::float2x3 a; 277 | Tensor::float3x2 c; 278 | c(i,j) = a(i,j); 279 | ECHO(c); 280 | } 281 | #endif 282 | #if 0 // ASSERT_FAILURE dims don't match, should compile-fail 283 | { 284 | Tensor::float2x3 a; 285 | Tensor::float2x3 c; 286 | c(i,j) = a(j,i); 287 | ECHO(c); 288 | } 289 | #endif 290 | #if 0 // ASSERT_FAILURE dims don't match, so static-asser failure 291 | { 292 | Tensor::float2x3 a; 293 | Tensor::float3x3 c; 294 | c(i,j) = a(j,i); 295 | } 296 | #endif 297 | //assignI 298 | { 299 | Tensor::float2x3 a; 300 | auto c = a(i,j).assignI(); 301 | static_assert(std::is_same_v); 302 | } 303 | { 304 | Tensor::float2x3 a; 305 | auto c = a(j,i).assignI(); 306 | static_assert(std::is_same_v); 307 | } 308 | //assign 309 | { 310 | Tensor::float2x3 a; 311 | auto c = a(i,j).assign(i,j); 312 | static_assert(std::is_same_v); 313 | } 314 | { 315 | Tensor::float2x3 a; 316 | auto c = a(i,j).assign(j,i); 317 | static_assert(std::is_same_v); 318 | } 319 | { 320 | Tensor::float2x3 a; 321 | auto c = (2.f * a(i,j)).assign(i,j); 322 | static_assert(std::is_same_v); 323 | } 324 | { 325 | Tensor::float2x3 a; 326 | auto c = (2.f * a(i,j)).assign(j,i); 327 | static_assert(std::is_same_v); 328 | } 329 | { 330 | Tensor::float2x3 a; 331 | auto c = (a(i,j) * 2.f).assign(i,j); 332 | static_assert(std::is_same_v); 333 | } 334 | { 335 | Tensor::float2x3 a; 336 | auto c = (a(i,j) * 2.f).assign(j,i); 337 | static_assert(std::is_same_v); 338 | } 339 | { 340 | Tensor::float2x3 a; 341 | Tensor::float3x2 b; 342 | auto c = (a(j,i) + b(i,j)).assign(j,i); 343 | static_assert(std::is_same_v); 344 | } 345 | // make sure inter-index permutations work 346 | // since right now tensor+tensor operator just uses the lhs 347 | { 348 | Tensor::tensor a; 349 | 350 | /* d_ijk = a_ijk */static_assert(std::is_same_v>); 351 | /* d_ikj = a_ijk */static_assert(std::is_same_v>); 352 | /* d_jik = a_ijk */static_assert(std::is_same_v>); 353 | /* d_jki = a_ijk */static_assert(std::is_same_v>); 354 | /* d_kij = a_ijk */static_assert(std::is_same_v>); 355 | /* d_kji = a_ijk */static_assert(std::is_same_v>); 356 | 357 | Tensor::tensor b; 358 | // d_ijk = a_ijk + b_kij 359 | // so d's dims are a's dims ... 360 | // and works only if 361 | // b's 1st dim matches a's 3rd dim 362 | // b's 2nd dim matches a's 1st dim 363 | // b's 3nd dim matches a's 2st dim 364 | auto ab1 = (a(i,j,k) + b(k,i,j)).assign(i,j,k); 365 | static_assert(std::is_same_v>); 366 | 367 | auto ab2 = (a(i,j,k) + b(k,i,j)).assign(k,i,j); 368 | static_assert(std::is_same_v>); 369 | 370 | Tensor::tensor c; 371 | auto abc = (a(i,j,k) + b(k,i,j) + c(k,j,i)).assign(j,i,k); 372 | static_assert(std::is_same_v>); 373 | } 374 | } 375 | { 376 | Tensor::Index<'i'> i; 377 | Tensor::Index<'j'> j; 378 | Tensor::double3x3 a = {{1,2,3},{4,5,6},{7,8,9}}; 379 | 380 | // symmetrize using index notation 381 | Tensor::double3x3 b; 382 | b(i,j) = .5 * (a(i,j) + a(j,i)); 383 | TEST_EQ(b, makeSym(a)); 384 | // implicit storage type, for now picks the worst case 385 | auto c = (.5 * (a(i,j) - a(j,i))).assign(i,j); 386 | static_assert(std::is_same_v); 387 | TEST_EQ(c, makeAsym(a)); 388 | } 389 | // trace of tensors ... doesn't use references but instead uses cached intermediate tensors stored in the expression-tree 390 | { 391 | Tensor::Index<'i'> i; 392 | auto a = Tensor::float3x3([](int i, int j) -> float { return 1 + j + 3 * i; }); 393 | // zero indexes == scalar result of a trace 394 | // Should IndexAccess need to wrap a fully-traced object? or should it immediately become a Scalar? 395 | // I think the latter cuz why wait for .assign()? 396 | auto tra = a(i,i); 397 | static_assert(std::is_same_v); 398 | TEST_EQ(tra, 15); 399 | } 400 | { 401 | Tensor::Index<'i'> i; 402 | Tensor::Index<'j'> j; 403 | auto a = Tensor::tensorr(); 404 | auto b = a(i,i,j).assignR(j); 405 | static_assert(std::is_same_v); 406 | } 407 | { 408 | Tensor::Index<'i'> i; 409 | Tensor::Index<'j'> j; 410 | auto a = Tensor::tensorr(); 411 | auto b = a(i,i,j).assign(j); 412 | static_assert(std::is_same_v); 413 | } 414 | { 415 | Tensor::Index<'i'> i; 416 | Tensor::Index<'j'> j; 417 | 418 | Tensor::double3 a = {1,2,3}; 419 | Tensor::double3 b = {5,7,11}; 420 | 421 | Tensor::double3x3 c; 422 | c(i,j) = a(i) * b(j); 423 | TEST_EQ(c, Tensor::double3x3({{5,7,11},{10,14,22},{15,21,33}})); 424 | } 425 | { 426 | Tensor::Index<'i'> i; 427 | 428 | Tensor::double3 a = {1,2,3}; 429 | Tensor::double3 b = {5,7,11}; 430 | 431 | // ok, trace needed a fully dif avenue to work with IndexAccess 432 | // so will contracting alll indexes in tensor-mul 433 | auto c = a(i) * b(i); 434 | TEST_EQ(c, 52); 435 | } 436 | { 437 | Tensor::Index<'i'> i; 438 | Tensor::Index<'j'> j; 439 | Tensor::Index<'k'> k; 440 | Tensor::float3s3 a; 441 | Tensor::tensorx b; 442 | auto d = (a(i,j) * b(j,k,k)).assignI(); 443 | static_assert(std::is_same_v); 444 | } 445 | { 446 | //wedge product 447 | Tensor::Index<'i'> i; 448 | Tensor::Index<'j'> j; 449 | auto b = Tensor::float3(1,2,3); 450 | auto c = Tensor::float3(4,5,6); 451 | auto a = (b(i) * c(j) - b(j) * c(i)).assign(i,j); 452 | TEST_EQ(a, wedge(b,c)); 453 | } 454 | { 455 | //inner product 456 | Tensor::Index<'i'> i; 457 | auto b = Tensor::float3(1,2,3); 458 | auto c = Tensor::float3(4,5,6); 459 | auto d = b(i) * c(i); 460 | TEST_EQ(d, dot(b,c)); 461 | } 462 | { 463 | //matrix multiplication 464 | Tensor::Index<'i'> i; 465 | Tensor::Index<'j'> j; 466 | auto a = Tensor::float3x3({{1,2,3},{4,5,6},{7,8,9}}); 467 | auto b = Tensor::float3(1,2,3); 468 | auto c = (a(i,j) * b(j)).assignI(); 469 | TEST_EQ(c, a * b); 470 | } 471 | { //double trace 472 | Tensor::Index<'i'> i; 473 | Tensor::Index<'j'> j; 474 | auto a = Tensor::float3x3({{1,2,3},{4,5,6},{7,8,9}}); 475 | auto b = Tensor::float3x3({{1,2,3},{4,5,6},{7,8,9}}); 476 | auto c = a(i,j) * b(i,j); 477 | TEST_EQ(c, a.interior<2>(b)); 478 | } 479 | 480 | //Schwarzschild coordinates 481 | { 482 | using namespace Tensor; 483 | using namespace std; 484 | Index<'i'> i; 485 | Index<'j'> j; 486 | Index<'k'> k; 487 | Index<'l'> l; 488 | Index<'m'> m; 489 | double t = 1; 490 | double r = 2; 491 | double theta = 3; 492 | double phi = 4; 493 | double R = 1; 494 | auto x = double4{t,r,theta,phi}; 495 | static_assert(is_same_v); 496 | ECHO(x); 497 | auto gx = [R](double4 x) { 498 | auto [t,r,theta,phi] = x; // tie semantics 499 | return double4{(R-r)/r, r/(-R+r), r*r, r*sin(theta)*sin(theta)}.diagonal(); 500 | }; 501 | auto g = gx(x); 502 | static_assert(is_same_v); 503 | ECHO(g(0,0)); 504 | ECHO(g); 505 | ECHO(g(i,j)); // does << work? 506 | auto gux = [&](auto x) { return gx(x).inverse(); }; 507 | auto gu = gux(x); 508 | static_assert(is_same_v); 509 | ECHO(gu); 510 | // ehhh symbolic differentiation? 511 | auto dgx = [&](auto x) { return Tensor::partialDerivative<>(gx, x); }; 512 | auto dg = dgx(x); 513 | ECHO(dg); 514 | auto connlx = [&](auto x) { 515 | auto dg = dgx(x); 516 | #if 0 // assign, naive storage 517 | return ((dg(k,i,j) + dg(j,i,k) - dg(i,j,k)) / 2).assign(i,j,k); 518 | #elif 1 // assign with specific storage 519 | return ((dg(k,i,j) + dg(j,i,k) - dg(i,j,k)) / 2).template assignR>(i,j,k); 520 | #elif 0 // assign to an already-defined variable 521 | auto connl = tensorx(); 522 | connl(i,j,k) = ((dg(k,i,j) + dg(j,i,k) - dg(i,j,k)) / 2); 523 | return connl; 524 | #elif 0 525 | auto connl = tensorx(); 526 | connl(i,j,k) = ((dg(k,i,j) + dg(j,i,k) - dg(i,j,k)) / 2); 527 | return connl; 528 | #elif 0 // assign using the inferred free indexes 529 | return connl(i,j,k) = ((dg(k,i,j) + dg(j,i,k) - dg(i,j,k)) / 2).assignI(); 530 | #endif 531 | }; 532 | auto connl = connlx(x); 533 | ECHO(connl); 534 | auto connx = [&](auto x) { 535 | return (gux(x)(i,l) * connlx(x)(l,j,k)).assign(i,j,k); 536 | }; 537 | auto conn = connx(x); 538 | ECHO(conn); 539 | auto dconnx = [&](auto x) { return Tensor::partialDerivative<>(connx, x); }; 540 | auto dconn = dconnx(x); 541 | ECHO(dconn); 542 | auto Riemann = (dconn(k,i,j,l) - dconn(l,i,j,k) + conn(i,k,m) * conn(m,j,l) - conn(i,l,m) * conn(m,j,k)).assign(i,j,k,l); 543 | ECHO(Riemann); 544 | auto Ricci = Riemann(k,i,k,j).assign(i,j); 545 | ECHO(Ricci); 546 | auto Gaussian = Ricci.dot(gu); 547 | ECHO(Gaussian); 548 | } 549 | } 550 | 551 | #if 0 552 | int main() { 553 | test_Index(); 554 | } 555 | #endif 556 | -------------------------------------------------------------------------------- /test/src/Vector.cpp: -------------------------------------------------------------------------------- 1 | #include "Test/Test.h" 2 | 3 | namespace Tensor { 4 | 5 | #define TEST_TENSOR_ADD_VECTOR_STATIC_ASSERTS(nick,ctype,dim1)\ 6 | static_assert(sizeof(nick##dim1) == sizeof(ctype) * dim1);\ 7 | static_assert(std::is_same_v);\ 8 | static_assert(std::is_same_v);\ 9 | static_assert(nick##dim1::rank == 1);\ 10 | static_assert(nick##dim1::dim<0> == dim1);\ 11 | static_assert(nick##dim1::numNestings == 1);\ 12 | static_assert(nick##dim1::count<0> == dim1); 13 | 14 | #define TEST_TENSOR_ADD_MATRIX_STATIC_ASSERTS(nick, ctype, dim1, dim2)\ 15 | static_assert(sizeof(nick##dim1##x##dim2) == sizeof(ctype) * dim1 * dim2);\ 16 | static_assert(nick##dim1##x##dim2::rank == 2);\ 17 | static_assert(nick##dim1##x##dim2::dim<0> == dim1);\ 18 | static_assert(nick##dim1##x##dim2::dim<1> == dim2);\ 19 | static_assert(nick##dim1##x##dim2::numNestings == 2);\ 20 | static_assert(nick##dim1##x##dim2::count<0> == dim1);\ 21 | static_assert(nick##dim1##x##dim2::count<1> == dim2); 22 | 23 | #define TEST_TENSOR_ADD_SYMMETRIC_STATIC_ASSERTS(nick, ctype, dim12)\ 24 | static_assert(sizeof(nick##dim12##s##dim12) == sizeof(ctype) * triangleSize(dim12));\ 25 | static_assert(std::is_same_v);\ 26 | static_assert(nick##dim12##s##dim12::rank == 2);\ 27 | static_assert(nick##dim12##s##dim12::dim<0> == dim12);\ 28 | static_assert(nick##dim12##s##dim12::dim<1> == dim12);\ 29 | static_assert(nick##dim12##s##dim12::numNestings == 1);\ 30 | static_assert(nick##dim12##s##dim12::count<0> == triangleSize(dim12)); 31 | 32 | #define TEST_TENSOR_ADD_ANTISYMMETRIC_STATIC_ASSERTS(nick, ctype, dim12)\ 33 | static_assert(sizeof(nick##dim12##a##dim12) == sizeof(ctype) * triangleSize(dim12-1));\ 34 | static_assert(std::is_same_v);\ 35 | static_assert(nick##dim12##a##dim12::rank == 2);\ 36 | static_assert(nick##dim12##a##dim12::dim<0> == dim12);\ 37 | static_assert(nick##dim12##a##dim12::dim<1> == dim12);\ 38 | static_assert(nick##dim12##a##dim12::numNestings == 1);\ 39 | static_assert(nick##dim12##a##dim12::count<0> == triangleSize(dim12-1)); 40 | 41 | #define TEST_TENSOR_ADD_IDENTITY_STATIC_ASSERTS(nick, ctype, dim12)\ 42 | static_assert(sizeof(nick##dim12##i##dim12) == sizeof(ctype));\ 43 | static_assert(std::is_same_v);\ 44 | static_assert(nick##dim12##i##dim12::rank == 2);\ 45 | static_assert(nick##dim12##i##dim12::dim<0> == dim12);\ 46 | static_assert(nick##dim12##i##dim12::dim<1> == dim12);\ 47 | static_assert(nick##dim12##i##dim12::numNestings == 1);\ 48 | static_assert(nick##dim12##i##dim12::count<0> == 1); 49 | 50 | #define TEST_TENSOR_ADD_TOTALLY_SYMMETRIC_STATIC_ASSERTS(nick, ctype, localDim, localRank, suffix)\ 51 | static_assert(sizeof(nick##suffix) == sizeof(ctype) * consteval_symmetricSize(localDim, localRank));\ 52 | static_assert(std::is_same_v);\ 53 | static_assert(nick##suffix::rank == localRank);\ 54 | static_assert(nick##suffix::dim<0> == localDim); /* TODO repeat depending on dimension */\ 55 | static_assert(nick##suffix::numNestings == 1);\ 56 | static_assert(nick##suffix::count<0> == consteval_symmetricSize(localDim, localRank)); 57 | 58 | #define TEST_TENSOR_ADD_TOTALLY_ANTISYMMETRIC_STATIC_ASSERTS(nick, ctype, localDim, localRank, suffix)\ 59 | static_assert(sizeof(nick##suffix) == sizeof(ctype) * consteval_antisymmetricSize(localDim, localRank));\ 60 | static_assert(std::is_same_v);\ 61 | static_assert(nick##suffix::rank == localRank);\ 62 | static_assert(nick##suffix::dim<0> == localDim); /* TODO repeat depending on dimension */\ 63 | static_assert(nick##suffix::numNestings == 1);\ 64 | static_assert(nick##suffix::count<0> == consteval_antisymmetricSize(localDim, localRank)); 65 | 66 | 67 | #define TEST_TENSOR_ADD_VECTOR_NICKCNAME_TYPE_DIM(nick, ctype, dim1)\ 68 | TEST_TENSOR_ADD_VECTOR_STATIC_ASSERTS(nick, ctype,dim1); 69 | 70 | #define TEST_TENSOR_ADD_MATRIX_NICKNAME_TYPE_DIM(nick, ctype, dim1, dim2)\ 71 | TEST_TENSOR_ADD_MATRIX_STATIC_ASSERTS(nick, ctype, dim1, dim2) 72 | 73 | #define TEST_TENSOR_ADD_SYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, dim12)\ 74 | TEST_TENSOR_ADD_SYMMETRIC_STATIC_ASSERTS(nick, ctype, dim12) 75 | 76 | #define TEST_TENSOR_ADD_ANTISYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, dim12)\ 77 | TEST_TENSOR_ADD_ANTISYMMETRIC_STATIC_ASSERTS(nick, ctype, dim12) 78 | 79 | #define TEST_TENSOR_ADD_IDENTITY_NICKNAME_TYPE_DIM(nick, ctype, dim12)\ 80 | TEST_TENSOR_ADD_IDENTITY_STATIC_ASSERTS(nick, ctype, dim12) 81 | 82 | #define TEST_TENSOR_ADD_TOTALLY_SYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, localDim, localRank, suffix)\ 83 | TEST_TENSOR_ADD_TOTALLY_SYMMETRIC_STATIC_ASSERTS(nick, ctype, localDim, localRank, suffix) 84 | 85 | #define TEST_TENSOR_ADD_TOTALLY_ANTISYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, localDim, localRank, suffix)\ 86 | TEST_TENSOR_ADD_TOTALLY_ANTISYMMETRIC_STATIC_ASSERTS(nick, ctype, localDim, localRank, suffix) 87 | 88 | #define TEST_TENSOR_ADD_NICKNAME_TYPE(nick, ctype)\ 89 | /* typed vectors */\ 90 | TEST_TENSOR_ADD_VECTOR_NICKCNAME_TYPE_DIM(nick, ctype, 2)\ 91 | TEST_TENSOR_ADD_VECTOR_NICKCNAME_TYPE_DIM(nick, ctype, 3)\ 92 | TEST_TENSOR_ADD_VECTOR_NICKCNAME_TYPE_DIM(nick, ctype, 4)\ 93 | /* typed matrices */\ 94 | TEST_TENSOR_ADD_MATRIX_NICKNAME_TYPE_DIM(nick, ctype, 2, 2)\ 95 | TEST_TENSOR_ADD_MATRIX_NICKNAME_TYPE_DIM(nick, ctype, 2, 3)\ 96 | TEST_TENSOR_ADD_MATRIX_NICKNAME_TYPE_DIM(nick, ctype, 2, 4)\ 97 | TEST_TENSOR_ADD_MATRIX_NICKNAME_TYPE_DIM(nick, ctype, 3, 2)\ 98 | TEST_TENSOR_ADD_MATRIX_NICKNAME_TYPE_DIM(nick, ctype, 3, 3)\ 99 | TEST_TENSOR_ADD_MATRIX_NICKNAME_TYPE_DIM(nick, ctype, 3, 4)\ 100 | TEST_TENSOR_ADD_MATRIX_NICKNAME_TYPE_DIM(nick, ctype, 4, 2)\ 101 | TEST_TENSOR_ADD_MATRIX_NICKNAME_TYPE_DIM(nick, ctype, 4, 3)\ 102 | TEST_TENSOR_ADD_MATRIX_NICKNAME_TYPE_DIM(nick, ctype, 4, 4)\ 103 | /* identity matrix */\ 104 | TEST_TENSOR_ADD_IDENTITY_NICKNAME_TYPE_DIM(nick, ctype, 2)\ 105 | TEST_TENSOR_ADD_IDENTITY_NICKNAME_TYPE_DIM(nick, ctype, 3)\ 106 | TEST_TENSOR_ADD_IDENTITY_NICKNAME_TYPE_DIM(nick, ctype, 4)\ 107 | /* typed symmetric matrices */\ 108 | TEST_TENSOR_ADD_SYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 2)\ 109 | TEST_TENSOR_ADD_SYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 3)\ 110 | TEST_TENSOR_ADD_SYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 4)\ 111 | /* typed antisymmetric matrices */\ 112 | TEST_TENSOR_ADD_ANTISYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 2)\ 113 | TEST_TENSOR_ADD_ANTISYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 3)\ 114 | TEST_TENSOR_ADD_ANTISYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 4)\ 115 | /* totally symmetric tensors */\ 116 | TEST_TENSOR_ADD_TOTALLY_SYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 2, 3, 2s2s2)\ 117 | TEST_TENSOR_ADD_TOTALLY_SYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 3, 3, 3s3s3)\ 118 | TEST_TENSOR_ADD_TOTALLY_SYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 4, 3, 4s4s4)\ 119 | TEST_TENSOR_ADD_TOTALLY_SYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 2, 4, 2s2s2s2)\ 120 | TEST_TENSOR_ADD_TOTALLY_SYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 3, 4, 3s3s3s3)\ 121 | TEST_TENSOR_ADD_TOTALLY_SYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 4, 4, 4s4s4s4)\ 122 | /* totally antisymmetric tensors */\ 123 | /* can't exist: TEST_TENSOR_ADD_TOTALLY_ANTISYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 2, 3, 2a2a2)*/\ 124 | TEST_TENSOR_ADD_TOTALLY_ANTISYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 3, 3, 3a3a3)\ 125 | TEST_TENSOR_ADD_TOTALLY_ANTISYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 4, 3, 4a4a4)\ 126 | /* can't exist: TEST_TENSOR_ADD_TOTALLY_ANTISYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 2, 4, 2a2a2a2)*/\ 127 | /* can't exist: TEST_TENSOR_ADD_TOTALLY_ANTISYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 3, 4, 3a3a3a3)*/\ 128 | TEST_TENSOR_ADD_TOTALLY_ANTISYMMETRIC_NICKNAME_TYPE_DIM(nick, ctype, 4, 4, 4a4a4a4) 129 | 130 | #define TEST_TENSOR_ADD_UTYPE(x) TEST_TENSOR_ADD_NICKNAME_TYPE(u##x,unsigned x) 131 | 132 | #define TEST_TENSOR_ADD_TYPE(x) TEST_TENSOR_ADD_NICKNAME_TYPE(x,x) 133 | 134 | TEST_TENSOR_ADD_TYPE(bool) 135 | TEST_TENSOR_ADD_TYPE(char) 136 | TEST_TENSOR_ADD_UTYPE(char) 137 | TEST_TENSOR_ADD_TYPE(short) 138 | TEST_TENSOR_ADD_UTYPE(short) 139 | TEST_TENSOR_ADD_TYPE(int) 140 | TEST_TENSOR_ADD_UTYPE(int) 141 | TEST_TENSOR_ADD_TYPE(float) 142 | TEST_TENSOR_ADD_TYPE(double) 143 | TEST_TENSOR_ADD_NICKNAME_TYPE(size, size_t) 144 | TEST_TENSOR_ADD_NICKNAME_TYPE(intptr, intptr_t) 145 | TEST_TENSOR_ADD_NICKNAME_TYPE(uintptr, uintptr_t) 146 | TEST_TENSOR_ADD_NICKNAME_TYPE(ldouble, long double) 147 | 148 | } 149 | 150 | namespace Tests { 151 | using namespace Tensor; 152 | using namespace Common; 153 | STATIC_ASSERT_EQ(int3::dim<0>, 3); 154 | STATIC_ASSERT_EQ(int3::rank, 1); 155 | STATIC_ASSERT_EQ((seq_get_v<0, typename int3::dimseq>), 3); 156 | STATIC_ASSERT_EQ(int3::totalCount, 3); 157 | } 158 | 159 | void test_Vector() { 160 | //vector 161 | 162 | { 163 | // 1D lists and args work 164 | Tensor::tensor f = {2}; 165 | TEST_EQ(f, (Tensor::tensor(2))); 166 | } 167 | 168 | { 169 | // 2D lists and args work 170 | Tensor::float2 f = {1,2}; 171 | TEST_EQ(f, Tensor::float2(1,2)); 172 | } 173 | 174 | { 175 | // default ctor 176 | Tensor::float3 f; 177 | for (int i = 0; i < f.dim<0>; ++i) { 178 | TEST_EQ(f.s[i], 0); 179 | } 180 | } 181 | 182 | { 183 | // parenthesis ctor 184 | Tensor::float3 f(4,5,7); 185 | 186 | // initializer list ctor 187 | Tensor::float3 g = {7,1,2}; 188 | 189 | //.dims 190 | static_assert(f.rank == 1); 191 | static_assert(f.dims() == 3); 192 | static_assert(f.dims() == Tensor::intN<1>(3)); 193 | static_assert(f.dim<0> == 3); 194 | static_assert(f.numNestings == 1); 195 | static_assert(f.count<0> == 3); 196 | 197 | //test .x .y .z 198 | TEST_EQ(f.x, 4); 199 | TEST_EQ(f.y, 5); 200 | TEST_EQ(f.z, 7); 201 | // test .s0 .s1 .s2 202 | TEST_EQ(f.s0, 4); 203 | TEST_EQ(f.s1, 5); 204 | TEST_EQ(f.s2, 7); 205 | // test .s[] 206 | TEST_EQ(f.s[0], 4); 207 | TEST_EQ(f.s[1], 5); 208 | TEST_EQ(f.s[2], 7); 209 | // test () indexing 210 | TEST_EQ(f(0), 4); 211 | TEST_EQ(f(1), 5); 212 | TEST_EQ(f(2), 7); 213 | 214 | //test [] indexing 215 | TEST_EQ(f[0], 4); 216 | TEST_EQ(f[1], 5); 217 | TEST_EQ(f[2], 7); 218 | 219 | // indexing 220 | { 221 | auto f = [](int i) -> float { return i+1; }; 222 | Tensor::float3 t(f); 223 | verifyAccessRank1(t, f); 224 | verifyAccessRank1(t, f); 225 | } 226 | 227 | //lambda ctor 228 | TEST_EQ(f, Tensor::float3([](int i) -> float { return 4 + i * (i + 1) / 2; })); 229 | TEST_EQ(f, Tensor::float3([](Tensor::intN<1> i) -> float { return 4 + i(0) * (i(0) + 1) / 2; })); 230 | 231 | // scalar ctor 232 | TEST_EQ(Tensor::float3(3), Tensor::float3(3,3,3)); 233 | 234 | // casting 235 | Tensor::int3 fi = {4,5,7}; 236 | Tensor::double3 fd = {4,5,7}; 237 | TEST_EQ(f, (Tensor::float3)fi); 238 | TEST_EQ(f, (Tensor::float3)fd); 239 | 240 | //iterator 241 | { 242 | auto i = f.begin(); 243 | TEST_EQ(*i, 4); ++i; 244 | TEST_EQ(*i, 5); i++; 245 | TEST_EQ(*i, 7); i++; 246 | TEST_EQ(i, f.end()); 247 | 248 | for (auto & i : f) { 249 | std::cout << "f iter = " << i << std::endl; 250 | } 251 | for (auto const & i : f) { 252 | std::cout << "f iter = " << i << std::endl; 253 | } 254 | // TODO verify cbegin/cend 255 | // TODO support for rbegin/rend const/not const and crbegin/crend 256 | } 257 | 258 | // operators 259 | // vector/scalar operations 260 | TEST_EQ(f+1.f, Tensor::float3(5,6,8)); 261 | TEST_EQ(f-1.f, Tensor::float3(3,4,6)); 262 | TEST_EQ(f*12.f, Tensor::float3(48, 60, 84)); 263 | TEST_EQ(f/2.f, Tensor::float3(2.f, 2.5f, 3.5f)); 264 | // scalar/vector operations 265 | TEST_EQ(1.f+f, Tensor::float3(5,6,8)); 266 | TEST_EQ(1.f-f, Tensor::float3(-3, -4, -6)); 267 | TEST_EQ(12.f*f, Tensor::float3(48, 60, 84)); 268 | TEST_EQ(2.f/f, Tensor::float3(0.5, 0.4, 0.28571428571429)); 269 | // vector/vector operations 270 | TEST_EQ(f+g, Tensor::float3(11, 6, 9)); 271 | TEST_EQ(f-g, Tensor::float3(-3, 4, 5)); 272 | TEST_EQ(f/g, Tensor::float3(0.57142857142857, 5.0, 3.5)); // wow, this equality passes while sqrt(90) fails 273 | // unary 274 | TEST_EQ(-f, Tensor::float3(-4, -5, -7)); 275 | 276 | // for vector*vector I'm picking the scalar-result of the 2 GLSL options (are there three? can you do mat = vec * vec in GLSL?) 277 | // this fits with general compatability of tensor operator* being outer+contract 278 | TEST_EQ(f*g, 47) 279 | 280 | // op= scalar 281 | { Tensor::float3 h = f; h += 2; TEST_EQ(h, Tensor::float3(6,7,9)); } 282 | { Tensor::float3 h = f; h -= 3; TEST_EQ(h, Tensor::float3(1,2,4)); } 283 | { Tensor::float3 h = f; h *= 3; TEST_EQ(h, Tensor::float3(12,15,21)); } 284 | { Tensor::float3 h = f; h /= 4; TEST_EQ(h, Tensor::float3(1,1.25,1.75)); } 285 | // op= vector 286 | { Tensor::float3 h = f; h += Tensor::float3(3,2,1); TEST_EQ(h, Tensor::float3(7,7,8)); } 287 | { Tensor::float3 h = f; h -= Tensor::float3(5,0,9); TEST_EQ(h, Tensor::float3(-1,5,-2)); } 288 | { Tensor::float3 h = f; h *= Tensor::float3(-1,1,-2); TEST_EQ(h, Tensor::float3(-4,5,-14)); } 289 | { Tensor::float3 h = f; h /= Tensor::float3(-2,3,-4); TEST_EQ(h, Tensor::float3(-2, 5.f/3.f, -1.75)); } 290 | 291 | //operator<< works? 292 | std::cout << f << std::endl; 293 | // to_string works? 294 | std::cout << std::to_string(f) << std::endl; 295 | 296 | // dot product 297 | TEST_EQ(dot(f,g), 47) 298 | TEST_EQ(f.dot(g), 47) 299 | 300 | // length-squared 301 | TEST_EQ(f.lenSq(), 90); 302 | TEST_EQ(lenSq(f), 90); 303 | 304 | // length 305 | TEST_EQ_EPS(f.length(), sqrt(90), 1e-6); 306 | TEST_EQ_EPS(length(f), sqrt(90), 1e-6); 307 | 308 | // cros product 309 | TEST_EQ(cross(f,g), Tensor::float3(3, 41, -31)) 310 | 311 | // outer product 312 | // hmm, in the old days macros couldn't detect <>'s so you'd have to wrap them in ()'s if the <>'s had ,'s in them 313 | // now same persists for {}'s it seems 314 | auto fouterg = outer(f,g); 315 | TEST_EQ(fouterg, Tensor::float3x3( 316 | {28, 4, 8}, 317 | {35, 5, 10}, 318 | {49, 7, 14} 319 | )); 320 | 321 | auto gouterf = transpose(fouterg); 322 | TEST_EQ(gouterf, Tensor::float3x3( 323 | {28, 35, 49}, 324 | {4, 5, 7}, 325 | {8, 10, 14} 326 | )); 327 | 328 | // TODO vector subset access 329 | 330 | // swizzle 331 | // TODO need an operator== between T and reference_wrapper ... 332 | // or casting ctor? 333 | // a generic ctor between vecs would be nice, but maybe problematic for mat = sym 334 | TEST_EQ(Tensor::float3(f.zyx()), Tensor::float3(7,5,4)); 335 | TEST_EQ(Tensor::float2(f.xy()), Tensor::float2(4,5)); 336 | TEST_EQ(Tensor::float2(f.yx()), Tensor::float2(5,4)); 337 | TEST_EQ(Tensor::float2(f.yy()), Tensor::float2(5,5)); 338 | { 339 | auto x = Tensor::float3(1,2,3); 340 | x = x.yzx(); 341 | TEST_EQ(x, Tensor::float3(2,3,1)); 342 | // ERROR: no matching constructor for initialization of 'std::reference_wrapper' 343 | // x.yzx() = Tensor::float3(7,8,9); 344 | // TEST_EQ(x, Tensor::float3(9,7,8)); 345 | } 346 | 347 | static_assert(sizeof(Tensor::float3a3a3) == sizeof(float)); 348 | 349 | /* more tests ... 350 | float2 float4 351 | int2 int3 int4 352 | 353 | default-template vectors of dif sizes (5 maybe? ... ) 354 | assert that no .x exists to verify 355 | */ 356 | 357 | // verify vec4 list constructor works 358 | Tensor::float4 h = {2,3,4,5}; 359 | TEST_EQ(h.x, 2); 360 | TEST_EQ(h.y, 3); 361 | TEST_EQ(h.z, 4); 362 | TEST_EQ(h.w, 5); 363 | 364 | Tensor::vec j = {5,6,7,8,9}; 365 | //non-specialized: can't use xyzw for dim>4 366 | TEST_EQ(j[0], 5); 367 | TEST_EQ(j[1], 6); 368 | TEST_EQ(j[2], 7); 369 | TEST_EQ(j[3], 8); 370 | TEST_EQ(j[4], 9); 371 | 372 | // iterator copy 373 | Tensor::float3 f2; 374 | std::copy(f2.begin(), f2.end(), f.begin()); // crashing ... 375 | TEST_EQ(f, f2); 376 | 377 | //verify iterators look alright 378 | // btw std::copy might work dif in release than debug? 379 | { 380 | std::array fa; 381 | auto fai = fa.begin(); 382 | auto fi = f.begin(); 383 | TEST_EQ(fi.index, Tensor::intN<1>(0)); 384 | TEST_NE(fi, f.end()); 385 | TEST_EQ(&*fi, &f[0]); 386 | TEST_EQ(&*fai, &fa[0]); 387 | ++fai; ++fi; 388 | TEST_EQ(fi.index, Tensor::intN<1>(1)); 389 | TEST_NE(fi, f.end()); 390 | TEST_EQ(&*fi, &f[1]); 391 | TEST_EQ(&*fai, &fa[1]); 392 | ++fai; ++fi; 393 | TEST_EQ(fi.index, Tensor::intN<1>(2)); 394 | TEST_NE(fi, f.end()); 395 | TEST_EQ(&*fi, &f[2]); 396 | TEST_EQ(&*fai, &fa[2]); 397 | ++fai; ++fi; 398 | TEST_EQ(fi.index, Tensor::intN<1>(3)); 399 | TEST_EQ(fi, f.end()); 400 | } 401 | 402 | // iterator copy from somewhere else 403 | { 404 | std::array fa; 405 | std::copy(f.begin(), f.end(), fa.begin()); 406 | TEST_EQ(fa[0], f[0]); 407 | TEST_EQ(fa[1], f[1]); 408 | TEST_EQ(fa[2], f[2]); 409 | } 410 | 411 | // operators 412 | operatorScalarTest(f); 413 | 414 | // contract 415 | 416 | TEST_EQ((Tensor::contract<0,0>(Tensor::float3(1,2,3))), 6); 417 | } 418 | 419 | //old libraries' tests 420 | { 421 | { 422 | //arg ctor works 423 | Tensor::float3 a(1,2,3); 424 | 425 | //bracket ctor works 426 | Tensor::float3 b = {4,5,6}; 427 | 428 | //access 429 | TEST_EQ(a(0), 1); 430 | TEST_EQ(a[0], 1); 431 | 432 | //make sure GenericArray functionality works 433 | TEST_EQ(Tensor::float3(1), Tensor::float3(1,1,1)); 434 | 435 | // new lib doesn't support this ... but should it? 436 | //TEST_EQ(Tensor::float3(1,2), Tensor::float3(1,2,0)); 437 | 438 | TEST_EQ(b + a, Tensor::float3(5,7,9)); 439 | TEST_EQ(b - a, Tensor::float3(3,3,3)); 440 | TEST_EQ(b * a, 32); 441 | TEST_EQ(Tensor::float3(2,4,6)/Tensor::float3(1,2,3), Tensor::float3(2,2,2)); 442 | TEST_EQ(b * 2., Tensor::float3(8, 10, 12)); 443 | TEST_EQ(Tensor::float3(2,4,6)/2., Tensor::float3(1,2,3)); 444 | } 445 | } 446 | 447 | //equivalent of tensor ctor of varying dimension 448 | { 449 | Tensor::float2 t = {1,2}; 450 | Tensor::float3 x; 451 | auto w = x.write(); 452 | for (auto i = w.begin(); i != w.end(); ++i) { 453 | /* TODO instead an index range iterator that spans the minimum of dims of this and t */ 454 | if (Tensor::float2::validIndex(i.readIndex)) { 455 | /* If we use operator()(intN<>) access working for asym ... */ 456 | /**i = (Scalar)t(i.readIndex);*/ 457 | /* ... or just replace the internal storage with std::array ... */ 458 | *i = std::apply(t, i.readIndex.s); 459 | } else { 460 | *i = decltype(x)::Scalar(); 461 | } 462 | } 463 | } 464 | { 465 | using namespace Tensor; 466 | float2 a = {1,2}; 467 | float3 b = a; 468 | TEST_EQ(b, float3(1,2,0)); 469 | } 470 | 471 | //tie semantics / structure binding 472 | // https://en.cppreference.com/w/cpp/language/structured_binding 473 | { 474 | using namespace Tensor; 475 | float3 a = {1,2,3}; 476 | { 477 | /* 478 | using this single declration for all types works for const and & ... but gives bad values: 479 | 480 | template 481 | requires Tensor::is_tensor_v> 482 | decltype(auto) get(T p) { 483 | static_assert(i < T::localDim, "index out of bounds for Tensor"); 484 | return p[i]; 485 | } 486 | 487 | terminate called after throwing an instance of 'Common::Exception' 488 | what(): src/Vector.cpp:465: ax == 1 :: 7.00649e-45 == 1 FAILED! 489 | 490 | */ 491 | auto [ax,ay,az] = a; 492 | TEST_EQ(ax, 1); 493 | TEST_EQ(ay, 2); 494 | TEST_EQ(az, 3); 495 | } 496 | { 497 | auto & [ax,ay,az] = a; 498 | TEST_EQ(ax, 1); 499 | TEST_EQ(ay, 2); 500 | TEST_EQ(az, 3); 501 | } 502 | { 503 | auto && [ax,ay,az] = a; 504 | TEST_EQ(ax, 1); 505 | TEST_EQ(ay, 2); 506 | TEST_EQ(az, 3); 507 | } 508 | #if 0 509 | { 510 | auto const [ax,ay,az] = a; 511 | TEST_EQ(ax, 1); 512 | TEST_EQ(ay, 2); 513 | TEST_EQ(az, 3); 514 | } 515 | { 516 | auto const & [ax,ay,az] = a; 517 | TEST_EQ(ax, 1); 518 | TEST_EQ(ay, 2); 519 | TEST_EQ(az, 3); 520 | } 521 | #endif 522 | } 523 | } 524 | -------------------------------------------------------------------------------- /test/src/TensorRank4.cpp: -------------------------------------------------------------------------------- 1 | #include "Test/Test.h" 2 | 3 | void test_TensorRank4() { 4 | // rank-4 5 | 6 | 7 | // vec-vec-vec-vec 8 | { 9 | using T = Tensor::tensorr; 10 | auto f = [](int i, int j, int k, int l) -> float { return i+j+k+l; }; 11 | auto t = T(f); 12 | verifyAccessRank4(t, f); 13 | verifyAccessRank4(t, f); 14 | } 15 | 16 | // sym-vec-vec 17 | { 18 | using T = Tensor::tensori, Tensor::storage_vec<3>, Tensor::storage_vec<3>>; 19 | auto f = [](int i, int j, int k, int l) -> float { return i+j+k+l; }; 20 | auto t = T(f); 21 | verifyAccessRank4(t, f); 22 | verifyAccessRank4(t, f); 23 | } 24 | 25 | #if 0 26 | // asym-vec-vec 27 | { 28 | using T = Tensor::tensori, Tensor::storage_vec<3>, Tensor::storage_vec<3>>; 29 | auto f = [](int i, int j, int k, int l) -> float { return i-j+k+l; }; 30 | auto t = T(f); 31 | verifyAccessRank4(t, f); 32 | verifyAccessRank4(t, f); 33 | } 34 | #endif 35 | 36 | // vec-sym-vec 37 | { 38 | using T = Tensor::tensori, Tensor::storage_sym<3>, Tensor::storage_vec<3>>; 39 | auto f = [](int i, int j, int k, int l) -> float { return i+j+k+l; }; 40 | auto t = T(f); 41 | verifyAccessRank4(t, f); 42 | verifyAccessRank4(t, f); 43 | } 44 | 45 | #if 0 46 | // vec-asym-vec 47 | { 48 | using T = Tensor::tensori, Tensor::storage_asym<3>, Tensor::storage_vec<3>>; 49 | auto f = [](int i, int j, int k, int l) -> float { return i+j-k+l; }; 50 | auto t = T(f); 51 | verifyAccessRank4(t, f); 52 | verifyAccessRank4(t, f); 53 | } 54 | #endif 55 | 56 | // vec-vec-sym 57 | { 58 | using T = Tensor::tensori, Tensor::storage_vec<3>, Tensor::storage_sym<3>>; 59 | auto f = [](int i, int j, int k, int l) -> float { return i+j+k+l; }; 60 | auto t = T(f); 61 | verifyAccessRank4(t, f); 62 | verifyAccessRank4(t, f); 63 | } 64 | #if 0 65 | // vec-vec-asym 66 | { 67 | using T = Tensor::tensori, Tensor::storage_vec<3>, Tensor::storage_asym<3>>; 68 | auto f = [](int i, int j, int k, int l) -> float { return i+j+k-l; }; 69 | auto t = T(f); 70 | verifyAccessRank4(t, f); 71 | verifyAccessRank4(t, f); 72 | } 73 | #endif 74 | 75 | // sym-sym 76 | { 77 | using T = Tensor::tensori, Tensor::storage_sym<3>>; 78 | auto f = [](int i, int j, int k, int l) -> float { return i+j+k+l; }; 79 | auto t = T(f); 80 | verifyAccessRank4(t, f); 81 | verifyAccessRank4(t, f); 82 | } 83 | 84 | #if 0 // asym-asym 85 | { 86 | using T = Tensor::tensori, Tensor::storage_asym<3>>; 87 | auto f = [](int i, int j, int k, int l) -> float { return i+j+k+l; }; 88 | auto t = T(f); 89 | verifyAccessRank4(t, f); 90 | verifyAccessRank4(t, f); 91 | } 92 | #endif 93 | 94 | { 95 | using Real = double; 96 | using Riemann2 = Tensor::tensori, Tensor::storage_asym<2>>; 97 | //using Riemann2 = Tensor::asym, 2>; // R_[ij][kl] 98 | //using Riemann2 = Tensor::sym, 2>; // ... R_(ij)[kl] ... 99 | // how would I define R_( [ij] [kl ) ... i.e. R_ijkl = R_klij and R_ijkl = -R_jikl ? 100 | auto r = Riemann2{{1}}; 101 | static_assert(Riemann2::rank == 4); 102 | static_assert(Riemann2::dim<0> == 2); 103 | static_assert(Riemann2::dim<1> == 2); 104 | static_assert(Riemann2::dim<2> == 2); 105 | static_assert(Riemann2::dim<3> == 2); 106 | static_assert(Riemann2::numNestings == 2); 107 | static_assert(Riemann2::count<0> == 1); 108 | static_assert(Riemann2::count<1> == 1); 109 | static_assert(sizeof(Riemann2) == sizeof(Real)); 110 | auto r00 = r(0,0); // this type will be a ZERO AntiSymRef wrapper around ... nothing ... 111 | ECHO(r00); 112 | TEST_EQ(r00, (Tensor::AntiSymRef>())); // r(0,0) is this type 113 | TEST_EQ(r00, (Tensor::asym{})); // ... and r(0,0)'s operator== accepts its wrapped type 114 | TEST_EQ(r00(0,0), (Tensor::AntiSymRef())); // r(0,0)(0,0) is this 115 | TEST_EQ(r00(0,0).how, Tensor::Sign::ZERO); 116 | TEST_EQ(r00(0,0), 0.); 117 | TEST_EQ(r00(0,1), 0.); 118 | TEST_EQ(r00(1,0), 0.); 119 | TEST_EQ(r00(1,1), 0.); 120 | auto r01 = r(0,1); // this will point to the positive r.x_y element 121 | TEST_EQ(r01, (Tensor::asym{1})); 122 | TEST_EQ(r01(0,0), 0); //why would this get a bad ref? 123 | TEST_EQ(r01(0,1), 1); 124 | TEST_EQ(r01(1,0), -1); 125 | TEST_EQ(r01(1,1), 0); 126 | auto r10 = r(1,0); 127 | TEST_EQ(r10, (Tensor::asym{-1})); 128 | TEST_EQ(r10(0,0), 0); 129 | TEST_EQ(r10(0,1), -1); 130 | TEST_EQ(r10(1,0), 1); 131 | TEST_EQ(r10(1,1), 0); 132 | auto r11 = r(1,1); 133 | TEST_EQ(r11(0,0), 0.); 134 | TEST_EQ(r11(0,1), 0.); 135 | TEST_EQ(r11(1,0), 0.); 136 | TEST_EQ(r11(1,1), 0.); 137 | } 138 | { 139 | constexpr int N = 3; 140 | using Riemann3 = Tensor::tensori, Tensor::storage_asym>; 141 | auto r = Riemann3(); 142 | static_assert(Riemann3::rank == 4); 143 | static_assert(Riemann3::dim<0> == N); 144 | static_assert(Riemann3::dim<1> == N); 145 | static_assert(Riemann3::dim<2> == N); 146 | static_assert(Riemann3::dim<3> == N); 147 | static_assert(Riemann3::numNestings == 2); 148 | static_assert(Riemann3::count<0> == 3); //3x3 antisymmetric has 3 unique components 149 | static_assert(Riemann3::count<1> == 3); 150 | //TODO some future work: R_ijkl = R_klij, so it's also symmetri between 1&2 and 3&4 ... 151 | // ... and optimizing for those should put us at only 6 unique values instead of 9 152 | static_assert(sizeof(Riemann3) == sizeof(double) * 9); 153 | 154 | double e = 0; 155 | for (int i = 0; i < N; ++i) { 156 | for (int j = 0; j < i; ++j) { 157 | for (int k = 0; k < N; ++k) { 158 | for (int l = 0; l < N; ++l) { 159 | r(i,j)(k,l) = ++e; 160 | if (i == j || k == l) { 161 | TEST_EQ(r(i,j)(k,l), 0); 162 | } else { 163 | TEST_EQ(r(i,j)(k,l), e); 164 | TEST_EQ(r(i,j)(l,k), -e); 165 | TEST_EQ(r(j,i)(k,l), -e); 166 | TEST_EQ(r(j,i)(l,k), e); 167 | 168 | TEST_EQ(r(i,j,k,l), e); 169 | TEST_EQ(r(i,j,l,k), -e); 170 | TEST_EQ(r(j,i,k,l), -e); 171 | TEST_EQ(r(j,i,l,k), e); 172 | } 173 | } 174 | } 175 | } 176 | } 177 | 178 | // TODO change tensor generic ctor to (requires tensors and) accept any type, iterate over write elements, assign one by one. 179 | 180 | // auto m = Tensor::ExpandAllIndexes([&](Tensor::int4 i) -> double { 181 | // return r(i); 182 | // }); 183 | } 184 | 185 | { 186 | // TODO verify 3- nestings deep of antisym works 187 | } 188 | 189 | // old libraries' tests 190 | { 191 | using Real = double; 192 | using Vector = Tensor::tensor; 193 | 194 | Vector v = {1,2,3}; 195 | TEST_EQ(v, Tensor::double3(1,2,3)); 196 | 197 | using Metric = Tensor::tensori>; 198 | Metric g; 199 | for (int i = 0; i < 3; ++i) { 200 | g(i,i) = 1; 201 | } 202 | TEST_EQ(g, Metric(1,0,1,0,0,1)); 203 | 204 | using Matrix = Tensor::tensor; 205 | Matrix h; 206 | int index = 0; 207 | for (int i = 0; i < 3; ++i) { 208 | for (int j = 0; j < 3; ++j) { 209 | h(i,j) = ++index; 210 | } 211 | } 212 | TEST_EQ(h, (Matrix{{1,2,3},{4,5,6},{7,8,9}})); 213 | 214 | //iterator access 215 | int j = 0; 216 | Tensor::tensor ta; 217 | for (auto i = ta.begin(); i != ta.end(); ++i) { 218 | *i = j++; 219 | } 220 | for (int i = 0; i < 3; ++i) { 221 | for (int j = 0; j < 3; ++j) { 222 | for (int k = 0; k < 3; ++k) { 223 | if constexpr (Tensor::int2::useReadIteratorOuter) { 224 | TEST_EQ(ta(i,j,k), k + 3 * (j + 3 * i)); 225 | } else { 226 | TEST_EQ(ta(i,j,k), i + 3 * (j + 3 * k)); 227 | } 228 | } 229 | } 230 | } 231 | 232 | //subtensor access not working 233 | Tensor::tensor tb; 234 | for (auto i = tb.begin(); i != tb.end(); ++i) *i = 2.f; 235 | TEST_EQ(tb, Matrix(2.f)); 236 | ta(0) = tb; 237 | TEST_EQ(ta, (Tensor::tensor{ 238 | {{2, 2, 2}, {2, 2, 2}, {2, 2, 2}}, 239 | ta(1),//{{1, 10, 19}, {4, 13, 22}, {7, 16, 25}}, // these are whatever the original ta was 240 | ta(2),//{{2, 11, 20}, {5, 14, 23}, {8, 17, 26}} 241 | } )); 242 | Tensor::tensor tc; 243 | for (auto i = tc.begin(); i != tc.end(); ++i) *i = 3.; 244 | TEST_EQ(Tensor::double3(3), tc); 245 | ta(0,0) = tc; 246 | TEST_EQ(ta, (Tensor::tensor{ 247 | {{3, 3, 3}, {2, 2, 2}, {2, 2, 2}}, 248 | ta(1),//{{1, 10, 19}, {4, 13, 22}, {7, 16, 25}}, 249 | ta(2),//{{2, 11, 20}, {5, 14, 23}, {8, 17, 26}} 250 | })); 251 | 252 | //inverse 253 | Matrix m; 254 | for (int i = 0; i < m.dim<0>; ++i) { 255 | for (int j = 0; j < m.dim<1>; ++j) { 256 | m(i,j) = i == j ? 1 : 0; 257 | } 258 | } 259 | 260 | // convert the sym diagonal to mat 261 | // TODO operator== between matrices 262 | auto d = diagonal(Tensor::vec>(1)); 263 | TEST_EQ(m, d); 264 | TEST_EQ(m, (Matrix{{1,0,0},{0,1,0},{0,0,1}})); 265 | TEST_EQ(Tensor::determinant(m), 1); 266 | } 267 | 268 | { 269 | using namespace Tensor; 270 | using I = ident; 271 | { 272 | auto a = ident(4); 273 | TEST_EQ(a, I(4)); 274 | } 275 | { 276 | auto a = ident(); 277 | TEST_EQ(a, I(0)); 278 | } 279 | { 280 | auto t = ident{5}; 281 | t.s[0] = 1; 282 | TEST_EQ(t, I(1)); 283 | t[0][0] = 2; 284 | TEST_EQ(t, I(2)); 285 | t(0,0) = 3; 286 | TEST_EQ(t, I(3)); 287 | t(int2(0,0)) = 4; 288 | TEST_EQ(t, I(4)); 289 | } 290 | #if 0 // TODO vector * ident3x3 291 | { 292 | auto x = float3{1,2,3} * ident(1.); 293 | ECHO(x); 294 | } 295 | #endif 296 | } 297 | 298 | // can you vector non-numeric types? 299 | { 300 | using namespace Tensor; 301 | using namespace std; 302 | // "attempt to use a deleted function" 303 | // "destructor of 'vec<..., 3>' is implicitly deleted because variant field '' has a non-trivial destructor" 304 | //auto t = vec, 3>(); // fails 305 | //auto t = vec>, 3>(); // fails 306 | auto t = vec>, 5>(); // works 307 | //auto t = vec, 4>(); // works, but meh 308 | //ECHO(t); 309 | //auto t = vec(); // hmm my fixed-size specialization vectors can't ctor 310 | //auto t = vec(); // but the default case works fine 311 | //ECHO(t); 312 | } 313 | 314 | // multiply (operator*) and tensor functions 315 | 316 | { 317 | Tensor::float3x3 m = { 318 | {1,2,3}, 319 | {4,5,6}, 320 | {7,8,9}, 321 | }; 322 | 323 | // TODO since operator* is based on tensor products, maybe put it later? 324 | // put all the tensor operations at the end 325 | // operator * 326 | { 327 | auto a = Tensor::int2{7, -2}; 328 | TEST_EQ((Tensor::contract<0,0>(a)), 5); 329 | auto b = Tensor::int2x2 330 | {{6, 9}, 331 | {6, -6}}; 332 | TEST_EQ((Tensor::contract<0,1>(b)), 0); 333 | TEST_EQ((Tensor::contract<1,0>(b)), 0); 334 | static_assert(std::is_same_v, Tensor::int2>); 335 | ECHO((Tensor::contract<0,0>(b))); 336 | ECHO((Tensor::contract<1,1>(b))); 337 | 338 | auto aouterb = Tensor::tensorr{{{42, 63}, {42, -42}}, {{-12, -18}, {-12, 12}}}; 339 | TEST_EQ(outer(a,b), aouterb); 340 | ECHO((Tensor::contract<0,0>(aouterb))); 341 | 342 | static_assert(std::is_same_v::RemoveIndex<0>, Tensor::int2x2>); 343 | static_assert(std::is_same_v::RemoveIndex<1>, Tensor::int2x2>); 344 | static_assert(std::is_same_v::RemoveIndex<2>, Tensor::int2x2>); 345 | static_assert(std::is_same_v, Tensor::int2>); 346 | static_assert(std::is_same_v, Tensor::int2>); 347 | static_assert(std::is_same_v::RemoveIndex<0,1>, Tensor::int2>); 348 | ECHO((Tensor::contract<0,1>(aouterb))); 349 | 350 | ECHO((Tensor::contract<0,2>(aouterb))); 351 | ECHO((Tensor::contract<1,0>(aouterb))); 352 | ECHO((Tensor::contract<1,1>(aouterb))); 353 | ECHO((Tensor::contract<1,2>(aouterb))); 354 | ECHO((Tensor::contract<2,0>(aouterb))); 355 | ECHO((Tensor::contract<2,1>(aouterb))); 356 | ECHO((Tensor::contract<2,2>(aouterb))); 357 | auto atimesb = Tensor::int2{30, 75}; 358 | TEST_EQ(a * b, atimesb); 359 | 360 | TEST_EQ( (Tensor::int2{-3, 6} 361 | * Tensor::int2x3{ 362 | {-5, 0, -3}, 363 | {1, 3, 0}}), 364 | (Tensor::int3{21, 18, 9})) 365 | TEST_EQ( (Tensor::int2{9, 9} 366 | * Tensor::int2x4{ 367 | {3, -8, -10, -8}, 368 | {5, 2, -5, 6}}), 369 | (Tensor::int4{72, -54, -135, -18})) 370 | TEST_EQ( (Tensor::int3{-7, -2, -8} 371 | * Tensor::int3x2{ 372 | {-8, 0}, 373 | {10, 7}, 374 | {-6, 2}}), 375 | (Tensor::int2{84, -30})) 376 | TEST_EQ( (Tensor::int3{-4, 3, 1} 377 | * Tensor::int3x3{ 378 | {0, 6, -2}, 379 | {10, 1, 8}, 380 | {-4, 6, -5}}), 381 | (Tensor::int3{26, -15, 27})) 382 | TEST_EQ( (Tensor::int3{-3, 6, 9} 383 | * Tensor::int3x4{ 384 | {-9, -9, 8, -10}, 385 | {9, -6, -3, -1}, 386 | {1, 3, -9, -9}}), 387 | (Tensor::int4{90, 18, -123, -57})) 388 | TEST_EQ( (Tensor::int4{-5, 10, 8, 7} 389 | * Tensor::int4x2{ 390 | {-5, 0}, 391 | {1, 4}, 392 | {-3, 1}, 393 | {5, -10}}), 394 | (Tensor::int2{46, -22})) 395 | TEST_EQ( (Tensor::int4{-1, 9, 9, 5} 396 | * Tensor::int4x3{ 397 | {-5, 4, 7}, 398 | {5, -7, -4}, 399 | {3, -1, -6}, 400 | {-3, 8, 8}}), 401 | (Tensor::int3{62, -36, -57})) 402 | TEST_EQ( (Tensor::int4{-3, 4, 10, 2} 403 | * Tensor::int4x4{ 404 | {3, -2, 0, -7}, 405 | {8, 7, -6, 8}, 406 | {-1, 4, 9, 3}, 407 | {9, 9, 9, -1}}), 408 | (Tensor::int4{31, 92, 84, 81})) 409 | } 410 | 411 | // TODO make sure operator* matrix/vector, matrix/matrix, vector/matrix works 412 | // TODO I don't think I have marix *= working yet 413 | 414 | auto m2 = elemMul(m,m); 415 | for (int i = 0; i < m.dim<0>; ++i) { 416 | for (int j = 0; j < m.dim<1>; ++j) { 417 | TEST_EQ(m2(i,j), m(i,j) * m(i,j)); 418 | } 419 | } 420 | 421 | //determinant 422 | 423 | TEST_EQ(determinant(m), 0); 424 | 425 | // transpose 426 | 427 | TEST_EQ(Tensor::float3x3( 428 | {1,2,3}, 429 | {4,5,6}, 430 | {7,8,9} 431 | ), Tensor::transpose(Tensor::float3x3( 432 | {1,4,7}, 433 | {2,5,8}, 434 | {3,6,9} 435 | ))); 436 | 437 | TEST_EQ(Tensor::trace(Tensor::float3x3( 438 | {1,2,3}, 439 | {4,5,6}, 440 | {7,8,9} 441 | )), 15); 442 | } 443 | 444 | // TODO this all goes in a tensor-math test case 445 | { 446 | using float3x3x3x3 = Tensor::tensorr; 447 | auto a = float3x3x3x3([](int i, int j, int k, int l) -> float { 448 | return i - 2 * j + 3 * k - 4 * l; 449 | }); 450 | TEST_EQ(a, (float3x3x3x3 451 | {{{{0, -4, -8}, 452 | {3, -1, -5}, 453 | {6, 2, -2}}, 454 | {{-2, -6, -10}, 455 | {1, -3, -7}, 456 | {4, 0, -4}}, 457 | {{-4, -8, -12}, 458 | {-1, -5, -9}, 459 | {2, -2, -6}}}, 460 | {{{1, -3, -7}, 461 | {4, 0, -4}, 462 | {7, 3, -1}}, 463 | {{-1, -5, -9}, 464 | {2, -2, -6}, 465 | {5, 1, -3}}, 466 | {{-3, -7, -11}, 467 | {0, -4, -8}, 468 | {3, -1, -5}}}, 469 | {{{2, -2, -6}, 470 | {5, 1, -3}, 471 | {8, 4, 0}}, 472 | {{0, -4, -8}, 473 | {3, -1, -5}, 474 | {6, 2, -2}}, 475 | {{-2, -6, -10}, 476 | {1, -3, -7}, 477 | {4, 0, -4}}}} 478 | )); 479 | auto b = float3x3x3x3([](int i, int j, int k, int l) -> float { 480 | return 5 * l - 6 * k + 7 * j - 8 * i; 481 | }); 482 | TEST_EQ(b, (float3x3x3x3 483 | {{{{0, 5, 10}, 484 | {-6, -1, 4}, 485 | {-12, -7, -2}}, 486 | {{7, 12, 17}, 487 | {1, 6, 11}, 488 | {-5, 0, 5}}, 489 | {{14, 19, 24}, 490 | {8, 13, 18}, 491 | {2, 7, 12}}}, 492 | {{{-8, -3, 2}, 493 | {-14, -9, -4}, 494 | {-20, -15, -10}}, 495 | {{-1, 4, 9}, 496 | {-7, -2, 3}, 497 | {-13, -8, -3}}, 498 | {{6, 11, 16}, 499 | {0, 5, 10}, 500 | {-6, -1, 4}}}, 501 | {{{-16, -11, -6}, 502 | {-22, -17, -12}, 503 | {-28, -23, -18}}, 504 | {{-9, -4, 1}, 505 | {-15, -10, -5}, 506 | {-21, -16, -11}}, 507 | {{-2, 3, 8}, 508 | {-8, -3, 2}, 509 | {-14, -9, -4}}}} 510 | )); 511 | 512 | //c_ijkl = a_ijmn b_mnkl 513 | auto c = Tensor::interior<2>(a, b); 514 | TEST_EQ(c, (float3x3x3x3 515 | {{{{-303, -348, -393}, 516 | {-249, -294, -339}, 517 | {-195, -240, -285}}, 518 | {{-285, -420, -555}, 519 | {-123, -258, -393}, 520 | {39, -96, -231}}, 521 | {{-267, -492, -717}, 522 | {3, -222, -447}, 523 | {273, 48, -177}}}, 524 | {{{-312, -312, -312}, 525 | {-312, -312, -312}, 526 | {-312, -312, -312}}, 527 | {{-294, -384, -474}, 528 | {-186, -276, -366}, 529 | {-78, -168, -258}}, 530 | {{-276, -456, -636}, 531 | {-60, -240, -420}, 532 | {156, -24, -204}}}, 533 | {{{-321, -276, -231}, 534 | {-375, -330, -285}, 535 | {-429, -384, -339}}, 536 | {{-303, -348, -393}, 537 | {-249, -294, -339}, 538 | {-195, -240, -285}}, 539 | {{-285, -420, -555}, 540 | {-123, -258, -393}, 541 | {39, -96, -231}}}} 542 | )); 543 | } 544 | 545 | { 546 | using real = double; 547 | 548 | // turns out rank 1x1x...xN ctors doesn't' work ... unless it's 1x1x ... x1 549 | #if 0 550 | // TODO why isn't this working? 551 | using real2 = Tensor::tensor; 552 | auto i = real2{1,3}; 553 | auto j = real2{2,4}; 554 | using real1x2 = Tensor::tensor; 555 | #if 1 // works 556 | auto ii = real1x2{i}; 557 | auto jj = real1x2{j}; 558 | #endif 559 | #if 0 // fails 560 | auto ii = real1x2(i); // doesn't work 561 | auto jj = real1x2(j); 562 | #endif 563 | TEST_EQ(Tensor::inner(ii, jj), 14); 564 | TEST_EQ(Tensor::inner(Tensor::tensor{{1,3,2}}, Tensor::tensor{{2,4,3}}), 20); 565 | #endif 566 | 567 | 568 | 569 | // rank-1 vectors 570 | TEST_EQ(Tensor::inner(Tensor::tensor{3}, Tensor::tensor{4}), 12); 571 | TEST_EQ(Tensor::inner(Tensor::tensor{1,3}, Tensor::tensor{2,4}), 14); 572 | TEST_EQ(Tensor::inner(Tensor::tensor{1,3,2}, Tensor::tensor{2,4,3}), 20); 573 | 574 | // rank-2 dense vectors 575 | TEST_EQ(Tensor::inner(Tensor::tensor{{3}}, Tensor::tensor{{5}}), 15); 576 | TEST_EQ(Tensor::inner(Tensor::tensor{{1},{3}}, Tensor::tensor{{2},{4}}), 14); 577 | 578 | TEST_EQ(Tensor::inner(Tensor::tensor{{1, 2},{3, 4}}, Tensor::tensor{{4, 3},{2, 1}}), 20); 579 | 580 | TEST_EQ(Tensor::inner(Tensor::tensor{{1},{3},{2}}, Tensor::tensor{{2},{4},{3}}), 20); 581 | 582 | TEST_EQ(Tensor::inner(Tensor::tensor{{1,2},{3,4},{2,5}}, Tensor::tensor{{2,1},{4,2},{3,3}}), 45); 583 | TEST_EQ(Tensor::inner(Tensor::tensor{{1,2,3},{4,5,6},{7,8,9}}, Tensor::tensor{{9,8,7},{6,5,4},{3,2,1}}), 165); 584 | 585 | 586 | // rank-3 dense 587 | 588 | // rank-2 ident 589 | 590 | TEST_EQ(Tensor::inner(Tensor::ident(2), Tensor::ident(3)), 18); 591 | TEST_EQ(Tensor::inner(Tensor::tensor{{2,0,0},{0,2,0},{0,0,2}}, Tensor::ident(3)), 18); 592 | TEST_EQ(Tensor::inner(Tensor::tensor{{2,0,0},{0,2,0},{0,0,2}}, Tensor::tensor{{3,0,0},{0,3,0},{0,0,3}}), 18); 593 | 594 | TEST_EQ(Tensor::inner(Tensor::tensorx(), Tensor::ident(3)), 0); 595 | 596 | // rank-2 sym*sym 597 | // rank-2 sym*asym 598 | TEST_EQ(Tensor::inner(Tensor::sym(2), Tensor::asym(3)), 0); 599 | TEST_EQ(Tensor::inner(Tensor::asym(2), Tensor::sym(3)), 0); 600 | // rank-2 asym*sym 601 | // rank-2 asym*asym 602 | 603 | // rank-3 same 604 | TEST_EQ(Tensor::inner(Tensor::tensorx(2), Tensor::tensorx(3)), 0); 605 | TEST_EQ(Tensor::inner(Tensor::tensorx(2), Tensor::tensorx(3)), 0); 606 | 607 | TEST_EQ(Tensor::inner(Tensor::tensorx(2), Tensor::tensorx(3)), 0); 608 | TEST_EQ(Tensor::inner(Tensor::tensorx(2), Tensor::tensorx(3)), 0); 609 | TEST_EQ(Tensor::inner(Tensor::tensorx(2), Tensor::tensorx(3)), 0); 610 | TEST_EQ(Tensor::inner(Tensor::tensorx(2), Tensor::tensorx(3)), 0); 611 | 612 | TEST_EQ(Tensor::inner(Tensor::tensorx(2), Tensor::tensorx(3)), 0); 613 | TEST_EQ(Tensor::inner(Tensor::tensorx(2), Tensor::tensorx(3)), 0); 614 | TEST_EQ(Tensor::inner(Tensor::tensorx(2), Tensor::tensorx(3)), 0); 615 | TEST_EQ(Tensor::inner(Tensor::tensorx(2), Tensor::tensorx(3)), 0); 616 | 617 | TEST_BOOL((Tensor::hasMatchingSymAndAsymIndexes, Tensor::tensorx>)); 618 | TEST_BOOL((Tensor::hasMatchingSymAndAsymIndexes, Tensor::tensorx>)); 619 | 620 | TEST_BOOL((Tensor::hasMatchingSymAndAsymIndexes, Tensor::tensorx>)); 621 | TEST_BOOL((Tensor::hasMatchingSymAndAsymIndexes, Tensor::tensorx>)); 622 | TEST_BOOL((Tensor::hasMatchingSymAndAsymIndexes, Tensor::tensorx>)); 623 | TEST_BOOL((Tensor::hasMatchingSymAndAsymIndexes, Tensor::tensorx>)); 624 | 625 | TEST_BOOL((Tensor::hasMatchingSymAndAsymIndexes, Tensor::tensorx>)); 626 | TEST_BOOL((Tensor::hasMatchingSymAndAsymIndexes, Tensor::tensorx>)); 627 | TEST_BOOL((Tensor::hasMatchingSymAndAsymIndexes, Tensor::tensorx>)); 628 | TEST_BOOL((Tensor::hasMatchingSymAndAsymIndexes, Tensor::tensorx>)); 629 | 630 | // rank-4 same 631 | 632 | // rank-4 ident outer rank-2 633 | 634 | 635 | } 636 | } 637 | --------------------------------------------------------------------------------