├── thirdparty ├── Vulkan_utils │ ├── separate_files_from_KhronosGroup_repos │ └── vk_format_utils.h └── crc64 │ ├── crc64.h │ └── crc64.cpp ├── python ├── test │ ├── fei.png │ ├── SpiderTex.jpg │ ├── CMakeLists.txt │ ├── test_for.py │ ├── spider.mtl │ ├── test_reduce.py │ ├── test_multithread.py │ ├── test_compute.py │ ├── test_tex2d.py │ ├── test_rasterization.py │ ├── test_obj.py │ └── test_raytrace.py ├── VkInline │ ├── NativeEX.py │ ├── __init__.py │ ├── Native.py │ ├── SVCombine.py │ ├── SVObjBuffer.py │ ├── SVObjVector.py │ ├── SVBuffer.py │ ├── Cubemap.py │ ├── Texture3D.py │ ├── Texture2D.py │ ├── utils.py │ ├── ContextEX.py │ ├── SVVector.py │ ├── cffi.py │ ├── cffi_build.py │ ├── Context.py │ └── ShaderViewable.py ├── setup.py ├── api_SVCombine.cpp ├── api_SVObjBuffer.cpp ├── api_SVBuffer.cpp ├── api_ex.h ├── api_Cubemap.cpp ├── api_Texture3D.cpp ├── api_Texture2D.cpp ├── api_utils.cpp ├── CMakeLists.txt ├── api_Context_ex.inl ├── api_ShaderViewable.cpp ├── api_Context.cpp └── api.h ├── doc ├── raytrace_result.png └── rasterization_result.png ├── test ├── CMakeLists.txt └── test.cpp ├── SVCombine.h ├── .gitmodules ├── SVObjBuffer.h ├── SVBuffer.h ├── SVCombine.cpp ├── SVBuffer.cpp ├── SVObjBuffer.cpp ├── Context_ex.h ├── LICENSE ├── CMakeLists.txt ├── ShaderViewable.h ├── internal └── internal_context_ex.h ├── Context.h ├── Context_ex.inl ├── glslc.cpp └── README.md /thirdparty/Vulkan_utils/separate_files_from_KhronosGroup_repos: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/test/fei.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fynv/VkInline/HEAD/python/test/fei.png -------------------------------------------------------------------------------- /doc/raytrace_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fynv/VkInline/HEAD/doc/raytrace_result.png -------------------------------------------------------------------------------- /python/test/SpiderTex.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fynv/VkInline/HEAD/python/test/SpiderTex.jpg -------------------------------------------------------------------------------- /doc/rasterization_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fynv/VkInline/HEAD/doc/rasterization_result.png -------------------------------------------------------------------------------- /thirdparty/crc64/crc64.h: -------------------------------------------------------------------------------- 1 | #ifndef _crc64_h 2 | #define _crc64_h 3 | 4 | #include 5 | 6 | uint64_t crc64(uint64_t crc, const unsigned char *s, uint64_t l); 7 | 8 | #endif -------------------------------------------------------------------------------- /test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required (VERSION 3.0) 2 | 3 | add_executable(test_vki test.cpp) 4 | 5 | target_link_libraries(test_vki VkInline) 6 | 7 | install(TARGETS test_vki RUNTIME DESTINATION test_cpp) 8 | 9 | -------------------------------------------------------------------------------- /python/VkInline/NativeEX.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import site 4 | from .cffi import ffi 5 | 6 | if os.name == 'nt': 7 | fn_vkinline = 'PyVkInlineEX.dll' 8 | elif os.name == "posix": 9 | fn_vkinline = 'libPyVkInlineEX.so' 10 | 11 | path_vkinline = os.path.dirname(__file__)+"/"+fn_vkinline 12 | 13 | native = ffi.dlopen(path_vkinline) 14 | 15 | -------------------------------------------------------------------------------- /python/test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(PYTHON_TEST 2 | fei.png 3 | SpiderTex.jpg 4 | spider.mtl 5 | spider.obj 6 | 7 | test_compute.py 8 | test_for.py 9 | test_reduce.py 10 | test_multithread.py 11 | test_tex2d.py 12 | test_rasterization.py 13 | test_obj.py 14 | test_raytrace.py 15 | ) 16 | 17 | install(FILES ${PYTHON_TEST} DESTINATION test_python ) 18 | -------------------------------------------------------------------------------- /python/VkInline/__init__.py: -------------------------------------------------------------------------------- 1 | from .Native import native 2 | if native.n_vkinline_try_init()==0: 3 | raise ImportError('cannot import VkInline') 4 | 5 | from .Context import * 6 | from .ContextEX import BaseLevelAS, TopLevelAS, HitShaders, RayTracer 7 | from .ShaderViewable import * 8 | from .SVVector import * 9 | from .SVObjVector import * 10 | from .Texture2D import * 11 | from .Texture3D import * 12 | from .Cubemap import * 13 | -------------------------------------------------------------------------------- /python/VkInline/Native.py: -------------------------------------------------------------------------------- 1 | from .NativeEX import native 2 | if native.n_vkinline_try_init()==0: 3 | import os 4 | import sys 5 | import site 6 | from .cffi import ffi 7 | 8 | if os.name == 'nt': 9 | fn_vkinline = 'PyVkInline.dll' 10 | elif os.name == "posix": 11 | fn_vkinline = 'libPyVkInline.so' 12 | 13 | path_vkinline = os.path.dirname(__file__)+"/"+fn_vkinline 14 | native = ffi.dlopen(path_vkinline) 15 | else: 16 | from .NativeEX import ffi 17 | 18 | -------------------------------------------------------------------------------- /python/VkInline/SVCombine.py: -------------------------------------------------------------------------------- 1 | from .Native import ffi, native 2 | from .utils import * 3 | 4 | def SVCombine_Create(elem_map, operations): 5 | param_names = [param_name for param_name, elem in elem_map.items()] 6 | o_param_names = StrArray(param_names) 7 | elems = [elem for param_name, elem in elem_map.items()] 8 | o_elems = ObjArray(elems) 9 | return native.n_svcombine_create(o_elems.m_cptr, o_param_names.m_cptr, operations.encode('utf-8')) 10 | 11 | -------------------------------------------------------------------------------- /SVCombine.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ShaderViewable.h" 4 | 5 | namespace VkInline 6 | { 7 | class SVCombine : public ShaderViewable 8 | { 9 | public: 10 | SVCombine(const std::vector& elem_map, const char* operations); 11 | virtual ~SVCombine() {} 12 | virtual ViewBuf view() const; 13 | virtual void apply_barriers(const Internal::CommandBuffer& cmdbuf, unsigned dstFlags) const; 14 | 15 | protected: 16 | std::vector m_components; 17 | std::vector m_offsets; 18 | }; 19 | } 20 | 21 | -------------------------------------------------------------------------------- /python/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from codecs import open 3 | 4 | setup( 5 | name = 'VkInline', 6 | version = '0.3.7', 7 | description = 'A tool making it easy to use Vulkan for Python', 8 | url='https://github.com/fynv/VkInline', 9 | license='Anti 996', 10 | author='Fei Yang', 11 | author_email='hyangfeih@gmail.com', 12 | keywords='GPU Vulkan Python offscreen-rendering ray-tracing', 13 | packages=['VkInline'], 14 | package_data = { 'VkInline': ['*.dll', '*.so']}, 15 | install_requires = ['cffi','numpy', 'pyglm'], 16 | ) 17 | 18 | 19 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "thirdparty/unqlite"] 2 | path = thirdparty/unqlite 3 | url = https://github.com/symisc/unqlite.git 4 | [submodule "thirdparty/glslang"] 5 | path = thirdparty/glslang 6 | url = https://github.com/KhronosGroup/glslang.git 7 | [submodule "thirdparty/SPIRV-Cross"] 8 | path = thirdparty/SPIRV-Cross 9 | url = https://github.com/KhronosGroup/SPIRV-Cross.git 10 | [submodule "thirdparty/Vulkan-Headers"] 11 | path = thirdparty/Vulkan-Headers 12 | url = https://github.com/KhronosGroup/Vulkan-Headers.git 13 | [submodule "thirdparty/volk"] 14 | path = thirdparty/volk 15 | url = https://github.com/zeux/volk.git 16 | -------------------------------------------------------------------------------- /python/api_SVCombine.cpp: -------------------------------------------------------------------------------- 1 | #include "api.h" 2 | #include "SVCombine.h" 3 | using namespace VkInline; 4 | 5 | typedef std::vector StrArray; 6 | typedef std::vector PtrArray; 7 | 8 | void* n_svcombine_create(void* ptr_svs, void* ptr_names, const char* operations) 9 | { 10 | PtrArray* svs = (PtrArray*)ptr_svs; 11 | StrArray* names = (StrArray*)ptr_names; 12 | size_t num_params = svs->size(); 13 | std::vector arg_map(num_params); 14 | for (size_t i = 0; i < num_params; i++) 15 | { 16 | arg_map[i].obj_name = (*names)[i].c_str(); 17 | arg_map[i].obj = (*svs)[i]; 18 | } 19 | 20 | return new SVCombine(arg_map, operations); 21 | } 22 | 23 | 24 | -------------------------------------------------------------------------------- /python/VkInline/SVObjBuffer.py: -------------------------------------------------------------------------------- 1 | from .Native import ffi, native 2 | from .ShaderViewable import ShaderViewable 3 | from .utils import * 4 | 5 | class SVObjBuffer(ShaderViewable): 6 | def __init__(self, lst_svobjs): 7 | self.lst_svobjs = lst_svobjs 8 | o_svobjs = ObjArray(lst_svobjs) 9 | self.m_cptr = native.n_svobjbuffer_create(o_svobjs.m_cptr) 10 | 11 | def name_elem_type(self): 12 | return ffi.string(native.n_svobjbuffer_name_elem_type(self.m_cptr)).decode('utf-8') 13 | 14 | def elem_size(self): 15 | return native.n_svobjbuffer_elem_size(self.m_cptr) 16 | 17 | def size(self): 18 | return native.n_svobjbuffer_size(self.m_cptr) 19 | 20 | def update(self): 21 | native.n_svobjbuffer_update(self.m_cptr) 22 | 23 | 24 | -------------------------------------------------------------------------------- /python/test/test_for.py: -------------------------------------------------------------------------------- 1 | import VkInline as vki 2 | import numpy as np 3 | 4 | # interface with numpy 5 | harr = np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype='float32') 6 | darr = vki.device_vector_from_numpy(harr) 7 | 8 | harr2 = np.array([6,7,8,9,10], dtype='int32') 9 | darr2 = vki.device_vector_from_numpy(harr2) 10 | 11 | # test launching non-templated for 12 | forLoop = vki.For(['arr_in','arr_out','k'], "inner", 13 | ''' 14 | void inner(uint idx) 15 | { 16 | set_value(arr_out, idx, get_value(arr_in, idx)*k); 17 | } 18 | ''') 19 | 20 | darr_out = vki.SVVector('float', 5) 21 | forLoop.launch(0, 5, [darr, darr_out, vki.SVFloat(10.0)]) 22 | print (darr_out.to_host()) 23 | 24 | darr_out = vki.SVVector('int', 5) 25 | forLoop.launch_n(5, [darr2, darr_out, vki.SVInt32(5)]) 26 | print (darr_out.to_host()) 27 | 28 | -------------------------------------------------------------------------------- /SVObjBuffer.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ShaderViewable.h" 4 | 5 | namespace VkInline 6 | { 7 | namespace Internal 8 | { 9 | class UploadBuffer; 10 | } 11 | 12 | class SVObjBuffer : public ShaderViewable 13 | { 14 | public: 15 | std::string name_elem_type() const { return m_elem_type; } 16 | size_t elem_size() const { return m_elem_size; } 17 | size_t size() const { return m_size; } 18 | 19 | SVObjBuffer(const std::vector& elems); 20 | ~SVObjBuffer(); 21 | 22 | void update(); 23 | 24 | virtual ViewBuf view() const; 25 | virtual void apply_barriers(const Internal::CommandBuffer& cmdbuf, unsigned dstFlags) const; 26 | 27 | protected: 28 | std::vector m_elems; 29 | std::string m_elem_type; 30 | size_t m_elem_size; 31 | size_t m_size; 32 | Internal::UploadBuffer* m_data; 33 | 34 | }; 35 | 36 | } 37 | 38 | -------------------------------------------------------------------------------- /python/VkInline/SVObjVector.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .Context import * 3 | from .ShaderViewable import * 4 | from .SVObjBuffer import * 5 | from .SVCombine import SVCombine_Create 6 | 7 | class SVObjVector(ShaderViewable): 8 | def __init__(self, lst_svobjs): 9 | self.m_size = SVUInt32(len(lst_svobjs)) 10 | self.m_buf = SVObjBuffer(lst_svobjs) 11 | self.m_cptr = SVCombine_Create({'size': self.m_size, 'data': self.m_buf}, ''' 12 | uint get_size(in Comb_#hash# vec) 13 | {{ 14 | return vec.size; 15 | }} 16 | 17 | {0} get_value(in Comb_#hash# vec, in uint id) 18 | {{ 19 | return vec.data[id].v; 20 | }} 21 | '''.format(self.name_elem_type())) 22 | 23 | def name_elem_type(self): 24 | return self.m_buf.name_elem_type() 25 | 26 | def elem_size(self): 27 | return self.m_buf.elem_size() 28 | 29 | def size(self): 30 | return self.m_buf.size() 31 | -------------------------------------------------------------------------------- /python/api_SVObjBuffer.cpp: -------------------------------------------------------------------------------- 1 | #include "api.h" 2 | #include "SVObjBuffer.h" 3 | using namespace VkInline; 4 | 5 | typedef std::vector PtrArray; 6 | 7 | void* n_svobjbuffer_create(void* ptr_svs) 8 | { 9 | PtrArray* elems = (PtrArray*)ptr_svs; 10 | return new SVObjBuffer(*elems); 11 | } 12 | 13 | const char* n_svobjbuffer_name_elem_type(void* cptr) 14 | { 15 | SVObjBuffer* svbuf = (SVObjBuffer*)cptr; 16 | return svbuf->name_elem_type().c_str(); 17 | } 18 | 19 | unsigned long long n_svobjbuffer_elem_size(void* cptr) 20 | { 21 | SVObjBuffer* svbuf = (SVObjBuffer*)cptr; 22 | return svbuf->elem_size(); 23 | } 24 | 25 | unsigned long long n_svobjbuffer_size(void* cptr) 26 | { 27 | SVObjBuffer* svbuf = (SVObjBuffer*)cptr; 28 | return svbuf->size(); 29 | } 30 | 31 | void n_svobjbuffer_update(void* cptr) 32 | { 33 | SVObjBuffer* svbuf = (SVObjBuffer*)cptr; 34 | svbuf->update(); 35 | } 36 | 37 | -------------------------------------------------------------------------------- /python/test/spider.mtl: -------------------------------------------------------------------------------- 1 | # 2 | # spider.mtl 3 | # 4 | 5 | newmtl Skin 6 | Ka 0.200000 0.200000 0.200000 7 | Kd 0.827451 0.792157 0.772549 8 | Ks 0.000000 0.000000 0.000000 9 | Ns 0.000000 10 | map_Kd .\wal67ar_small.jpg 11 | 12 | newmtl Brusttex 13 | Ka 0.200000 0.200000 0.200000 14 | Kd 0.800000 0.800000 0.800000 15 | Ks 0.000000 0.000000 0.000000 16 | Ns 0.000000 17 | map_Kd .\wal69ar_small.jpg 18 | 19 | newmtl HLeibTex 20 | Ka 0.200000 0.200000 0.200000 21 | Kd 0.690196 0.639216 0.615686 22 | Ks 0.000000 0.000000 0.000000 23 | Ns 0.000000 24 | map_Kd .\SpiderTex.jpg 25 | 26 | newmtl BeinTex 27 | Ka 0.200000 0.200000 0.200000 28 | Kd 0.800000 0.800000 0.800000 29 | Ks 0.000000 0.000000 0.000000 30 | Ns 0.000000 31 | map_Kd .\drkwood2.jpg 32 | 33 | newmtl Augentex 34 | Ka 0.200000 0.200000 0.200000 35 | Kd 0.800000 0.800000 0.800000 36 | Ks 0.000000 0.000000 0.000000 37 | Ns 0.000000 38 | map_Kd .\engineflare1.jpg -------------------------------------------------------------------------------- /python/test/test_reduce.py: -------------------------------------------------------------------------------- 1 | import VkInline as vki 2 | import numpy as np 3 | 4 | darr = vki.device_vector_from_list(range(1,1025), 'int') 5 | BLOCK_SIZE = 256 6 | 7 | kernel = vki.Computer(['dst', 'src', 'n'], 8 | ''' 9 | shared {0} s_buf[{1}]; 10 | void main() 11 | {{ 12 | uint tid = gl_LocalInvocationID.x; 13 | uint i = gl_GlobalInvocationID.x; 14 | if (i0; s>>=1) 17 | {{ 18 | if (tid < s && i+s1: 28 | src = dst 29 | n = src.size() 30 | blocks = int((n + BLOCK_SIZE - 1) / BLOCK_SIZE) 31 | dst = vki.SVVector("int", blocks) 32 | kernel.launch(blocks, BLOCK_SIZE, [dst, src, vki.SVUInt32(n)]) 33 | 34 | print(dst.to_host()[0]) 35 | 36 | 37 | -------------------------------------------------------------------------------- /python/api_SVBuffer.cpp: -------------------------------------------------------------------------------- 1 | #include "api.h" 2 | #include "SVBuffer.h" 3 | using namespace VkInline; 4 | 5 | void* n_svbuffer_create(const char* elem_type, unsigned long long size, void* hdata) 6 | { 7 | return new SVBuffer(elem_type, size, hdata); 8 | } 9 | 10 | const char* n_svbuffer_name_elem_type(void* cptr) 11 | { 12 | SVBuffer* svbuf = (SVBuffer*)cptr; 13 | return svbuf->name_elem_type().c_str(); 14 | } 15 | 16 | unsigned long long n_svbuffer_elem_size(void* cptr) 17 | { 18 | SVBuffer* svbuf = (SVBuffer*)cptr; 19 | return svbuf->elem_size(); 20 | } 21 | 22 | unsigned long long n_svbuffer_size(void* cptr) 23 | { 24 | SVBuffer* svbuf = (SVBuffer*)cptr; 25 | return svbuf->size(); 26 | } 27 | 28 | void n_svbuffer_from_host(void* cptr, void* hdata) 29 | { 30 | SVBuffer* svbuf = (SVBuffer*)cptr; 31 | svbuf->from_host(hdata); 32 | } 33 | 34 | void n_svbuffer_to_host(void* cptr, void* hdata, unsigned long long begin, unsigned long long end) 35 | { 36 | SVBuffer* svbuf = (SVBuffer*)cptr; 37 | svbuf->to_host(hdata, begin, end); 38 | } 39 | 40 | 41 | -------------------------------------------------------------------------------- /SVBuffer.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "ShaderViewable.h" 4 | 5 | namespace VkInline 6 | { 7 | namespace Internal 8 | { 9 | class DeviceBuffer; 10 | } 11 | 12 | class SVBuffer : public ShaderViewable 13 | { 14 | public: 15 | const std::string& name_elem_type() const { return m_elem_type; } 16 | size_t elem_size() const { return m_elem_size; } 17 | size_t size() const { return m_size; } 18 | Internal::DeviceBuffer* internal() { return m_data; } 19 | const Internal::DeviceBuffer* internal() const { return m_data; } 20 | 21 | SVBuffer(const char* elem_type, size_t size, void* hdata = nullptr); 22 | ~SVBuffer(); 23 | 24 | void from_host(void* hdata); 25 | void to_host(void* hdata, size_t begin = 0, size_t end = (size_t)(-1)) const; 26 | virtual ViewBuf view() const; 27 | virtual void apply_barriers(const Internal::CommandBuffer& cmdbuf, unsigned dstFlags) const; 28 | 29 | protected: 30 | std::string m_elem_type; 31 | size_t m_elem_size; 32 | size_t m_size; 33 | Internal::DeviceBuffer* m_data; 34 | 35 | }; 36 | 37 | } 38 | -------------------------------------------------------------------------------- /python/test/test_multithread.py: -------------------------------------------------------------------------------- 1 | import VkInline as vki 2 | import numpy as np 3 | import threading 4 | 5 | harr = np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype='float32') 6 | darr = vki.device_vector_from_numpy(harr) 7 | 8 | harr2 = np.array([6,7,8,9,10], dtype='int32') 9 | darr2 = vki.device_vector_from_numpy(harr2) 10 | 11 | forLoop = vki.For(['arr_in','arr_out','k'], "inner", 12 | ''' 13 | void inner(uint idx) 14 | { 15 | set_value(arr_out, idx, get_value(arr_in, idx)*k); 16 | } 17 | ''') 18 | 19 | def thread_func(): 20 | darr_out = vki.SVVector('float', 5) 21 | darr_out2 = vki.SVVector('int', 5) 22 | forLoop.launch_n(5, [darr, darr_out, vki.SVFloat(10.0)]) 23 | forLoop.launch_n(5, [darr2, darr_out2, vki.SVInt32(5)]) 24 | print (darr_out.to_host()) 25 | print (darr_out2.to_host()) 26 | 27 | 28 | a = threading.Thread(target = thread_func) 29 | b = threading.Thread(target = thread_func) 30 | c = threading.Thread(target = thread_func) 31 | 32 | a.start() 33 | b.start() 34 | c.start() 35 | c.join() 36 | b.join() 37 | a.join() 38 | 39 | 40 | -------------------------------------------------------------------------------- /python/VkInline/SVBuffer.py: -------------------------------------------------------------------------------- 1 | from .Native import ffi, native 2 | from .ShaderViewable import ShaderViewable 3 | import ctypes 4 | 5 | class SVBuffer(ShaderViewable): 6 | def __init__(self, elem_type, size, ptr_host_data=None): 7 | ffiptr = ffi.NULL 8 | if ptr_host_data!=None: 9 | ffiptr = ffi.cast("void *", ptr_host_data) 10 | self.m_cptr = native.n_svbuffer_create(elem_type.encode('utf-8'), size, ffiptr) 11 | 12 | def name_elem_type(self): 13 | return ffi.string(native.n_svbuffer_name_elem_type(self.m_cptr)).decode('utf-8') 14 | 15 | def elem_size(self): 16 | return native.n_svbuffer_elem_size(self.m_cptr) 17 | 18 | def size(self): 19 | return native.n_svbuffer_size(self.m_cptr) 20 | 21 | def from_host(self, ptr_host_data): 22 | native.n_svbuffer_from_host(self.m_cptr, ffi.cast("void *", ptr_host_data)) 23 | 24 | def to_host(self, ptr_host_data, begin = 0, end = -1): 25 | native.n_svbuffer_to_host(self.m_cptr, ffi.cast("void *", ptr_host_data), ctypes.c_ulonglong(begin).value, ctypes.c_ulonglong(end).value) 26 | 27 | -------------------------------------------------------------------------------- /python/VkInline/Cubemap.py: -------------------------------------------------------------------------------- 1 | from .Native import ffi, native 2 | import numpy as np 3 | 4 | class Cubemap: 5 | def __init__(self, width, height, vkformat): 6 | self.m_cptr = native.n_cubemap_create(width, height, vkformat) 7 | 8 | def __del__(self): 9 | native.n_cubemap_release(self.m_cptr) 10 | 11 | def width(self): 12 | return native.n_cubemap_width(self.m_cptr) 13 | 14 | def height(self): 15 | return native.n_cubemap_height(self.m_cptr) 16 | 17 | def pixel_size(self): 18 | return native.n_cubemap_pixelsize(self.m_cptr) 19 | 20 | def channel_count(self): 21 | return native.n_cubemap_channelcount(self.m_cptr) 22 | 23 | def vkformat(self): 24 | return native.n_cubemap_vkformat(self.m_cptr) 25 | 26 | def upload(self, nparr): 27 | ffiptr = ffi.cast("void *", nparr.__array_interface__['data'][0]) 28 | native.n_cubemap_upload(self.m_cptr, ffiptr) 29 | 30 | def download(self, nparr): 31 | ffiptr = ffi.cast("void *", nparr.__array_interface__['data'][0]) 32 | native.n_cubemap_download(self.m_cptr, ffiptr) 33 | 34 | 35 | -------------------------------------------------------------------------------- /python/api_ex.h: -------------------------------------------------------------------------------- 1 | extern "C" 2 | { 3 | PY_VkInline_API void* n_blas_create_triangles(void* indBuf, void* posBuf); 4 | PY_VkInline_API void* n_blas_create_procedure(void* aabbBuf); 5 | PY_VkInline_API void n_blas_destroy(void* ptr_blas); 6 | PY_VkInline_API void* n_mat4_create(const float* v); 7 | PY_VkInline_API void n_mat4_destroy(void* ptr); 8 | PY_VkInline_API void* n_tlas_create(void* ptr_blases, void* ptr_transes); 9 | PY_VkInline_API void n_tlas_destroy(void* ptr); 10 | PY_VkInline_API void* n_hit_shaders_create(const char* closest_hit, const char* intersection); 11 | PY_VkInline_API void n_hit_shaders_destroy(void* ptr); 12 | PY_VkInline_API void* n_raytracer_create(void* ptr_param_list, const char* body_raygen, void* ptr_body_miss, void* ptr_body_hit, unsigned maxRecursionDepth, unsigned type_locked); 13 | PY_VkInline_API void n_raytracer_destroy(void* cptr); 14 | PY_VkInline_API int n_raytracer_num_params(void* cptr); 15 | PY_VkInline_API int n_raytracer_launch(void* ptr_raytracer, void* ptr_glbDim, void* ptr_arg_list, void* ptr_tlas_list, void* ptr_tex2d_list, void* ptr_tex3d_list, void* ptr_cubemap_list, unsigned times_submission); 16 | } 17 | 18 | -------------------------------------------------------------------------------- /python/test/test_compute.py: -------------------------------------------------------------------------------- 1 | import VkInline as vki 2 | import numpy as np 3 | 4 | # interface with numpy 5 | harr = np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype='float32') 6 | darr = vki.device_vector_from_numpy(harr) 7 | print(darr.to_host()) 8 | 9 | # GLSL data type 10 | print(darr.name_view_type()) 11 | 12 | harr2 = np.array([6,7,8,9,10], dtype='int32') 13 | darr2 = vki.device_vector_from_numpy(harr2) 14 | 15 | # kernel with auto parameters, launched twice with different types 16 | kernel = vki.Computer(['arr_in', 'arr_out', 'k'], 17 | ''' 18 | void main() 19 | { 20 | uint id = gl_GlobalInvocationID.x; 21 | if (id >= get_size(arr_in)) return; 22 | set_value(arr_out, id, get_value(arr_in, id)*k); 23 | } 24 | ''') 25 | 26 | darr_out = vki.SVVector('float', 5) 27 | kernel.launch(1,128, [darr, darr_out, vki.SVFloat(10.0)]) 28 | print (darr_out.to_host()) 29 | 30 | darr_out = vki.SVVector('int', 5) 31 | kernel.launch(1,128, [darr2, darr_out, vki.SVInt32(5)]) 32 | print (darr_out.to_host()) 33 | 34 | # create a vector from python list with GLSL type specified 35 | darr3 = vki.device_vector_from_list([3.0, 5.0, 7.0, 9.0 , 11.0], 'float') 36 | print(darr3.to_host()) 37 | -------------------------------------------------------------------------------- /python/VkInline/Texture3D.py: -------------------------------------------------------------------------------- 1 | from .Native import ffi, native 2 | import numpy as np 3 | 4 | class Texture3D: 5 | def __init__(self, dimX, dimY, dimZ, vkformat): 6 | self.m_cptr = native.n_texture3d_create(dimX, dimY, dimZ, vkformat) 7 | 8 | def __del__(self): 9 | native.n_texture3d_release(self.m_cptr) 10 | 11 | def dimX(self): 12 | return native.n_texture3d_dimX(self.m_cptr) 13 | 14 | def dimY(self): 15 | return native.n_texture3d_dimY(self.m_cptr) 16 | 17 | def dimZ(self): 18 | return native.n_texture3d_dimZ(self.m_cptr) 19 | 20 | def pixel_size(self): 21 | return native.n_texture3d_pixelsize(self.m_cptr) 22 | 23 | def channel_count(self): 24 | return native.n_texture3d_channelcount(self.m_cptr) 25 | 26 | def vkformat(self): 27 | return native.n_texture3d_vkformat(self.m_cptr) 28 | 29 | def upload(self, nparr): 30 | ffiptr = ffi.cast("void *", nparr.__array_interface__['data'][0]) 31 | native.n_texture3d_upload(self.m_cptr, ffiptr) 32 | 33 | def download(self, nparr): 34 | ffiptr = ffi.cast("void *", nparr.__array_interface__['data'][0]) 35 | native.n_texture3d_download(self.m_cptr, ffiptr) 36 | 37 | 38 | -------------------------------------------------------------------------------- /python/test/test_tex2d.py: -------------------------------------------------------------------------------- 1 | import VkInline as vki 2 | import numpy as np 3 | from PIL import Image 4 | 5 | image_in = np.array(Image.open('fei.png').convert('RGBA')) 6 | # print(image_in.shape, image_in.dtype) 7 | 8 | VK_FORMAT_R8G8B8A8_SRGB = 43 9 | 10 | width = image_in.shape[1] 11 | height = image_in.shape[0] 12 | 13 | tex2d = vki.Texture2D(width, height, VK_FORMAT_R8G8B8A8_SRGB) 14 | tex2d.upload(image_in) 15 | 16 | darr = vki.SVVector('vec4', width*height) 17 | 18 | kernel = vki.Computer(['width', 'height', 'arr'], 19 | ''' 20 | void main() 21 | { 22 | uint x = gl_GlobalInvocationID.x; 23 | uint y = gl_GlobalInvocationID.y; 24 | if (x >= width || y>=height) return; 25 | 26 | float u = (float(x)+0.5)/float(width); 27 | float v = (float(y)+0.5)/float(height); 28 | 29 | vec4 rgba = texture(arr_tex2d[0], vec2(u,v)); 30 | 31 | set_value(arr, x+y*width, rgba); 32 | } 33 | ''') 34 | 35 | blockSize = (8,8) 36 | gridSize = (int((width+7)/8), int((height+7)/8)) 37 | 38 | kernel.launch(gridSize, blockSize, [vki.SVInt32(width), vki.SVInt32(height), darr], tex2ds=[tex2d]) 39 | 40 | harr = darr.to_host() 41 | harr = np.reshape(harr, (height, width, 4))*255.0 42 | harr = harr.astype(np.uint8) 43 | 44 | img_out = Image.fromarray(harr, 'RGBA') 45 | img_out.save('output.png') 46 | 47 | 48 | -------------------------------------------------------------------------------- /python/VkInline/Texture2D.py: -------------------------------------------------------------------------------- 1 | from .Native import ffi, native 2 | import numpy as np 3 | 4 | class Texture2D: 5 | def __init__(self, width, height, vkformat, isDepth = False, isStencil=False, samples = 1): 6 | self.m_cptr = native.n_texture2d_create(width, height, vkformat, isDepth, isStencil, samples) 7 | 8 | def __del__(self): 9 | native.n_texture2d_release(self.m_cptr) 10 | 11 | def width(self): 12 | return native.n_texture2d_width(self.m_cptr) 13 | 14 | def height(self): 15 | return native.n_texture2d_height(self.m_cptr) 16 | 17 | def pixel_size(self): 18 | return native.n_texture2d_pixelsize(self.m_cptr) 19 | 20 | def channel_count(self): 21 | return native.n_texture2d_channelcount(self.m_cptr) 22 | 23 | def sample_count(self): 24 | return native.n_texture2d_samplecount(self.m_cptr) 25 | 26 | def vkformat(self): 27 | return native.n_texture2d_vkformat(self.m_cptr) 28 | 29 | def upload(self, nparr): 30 | ffiptr = ffi.cast("void *", nparr.__array_interface__['data'][0]) 31 | native.n_texture2d_upload(self.m_cptr, ffiptr) 32 | 33 | def download(self, nparr): 34 | ffiptr = ffi.cast("void *", nparr.__array_interface__['data'][0]) 35 | native.n_texture2d_download(self.m_cptr, ffiptr) 36 | 37 | 38 | -------------------------------------------------------------------------------- /python/api_Cubemap.cpp: -------------------------------------------------------------------------------- 1 | #include "api.h" 2 | #include "Context.h" 3 | using namespace VkInline; 4 | 5 | void* n_cubemap_create(int width, int height, unsigned vkformat) 6 | { 7 | return new Cubemap(width, height, vkformat); 8 | } 9 | 10 | void n_cubemap_release(void* cubemap) 11 | { 12 | delete (Cubemap*)cubemap; 13 | } 14 | 15 | int n_cubemap_width(void* _cubemap) 16 | { 17 | Cubemap* cubemap = (Cubemap*)_cubemap; 18 | return cubemap->width(); 19 | } 20 | 21 | int n_cubemap_height(void* _cubemap) 22 | { 23 | Cubemap* cubemap = (Cubemap*)_cubemap; 24 | return cubemap->height(); 25 | } 26 | 27 | unsigned n_cubemap_pixelsize(void* _cubemap) 28 | { 29 | Cubemap* cubemap = (Cubemap*)_cubemap; 30 | return cubemap->pixel_size(); 31 | } 32 | 33 | unsigned n_cubemap_channelcount(void* _cubemap) 34 | { 35 | Cubemap* cubemap = (Cubemap*)_cubemap; 36 | return cubemap->channel_count(); 37 | } 38 | 39 | unsigned n_cubemap_vkformat(void* _cubemap) 40 | { 41 | Cubemap* cubemap = (Cubemap*)_cubemap; 42 | return cubemap->vkformat(); 43 | } 44 | 45 | void n_cubemap_upload(void* _cubemap, void* hdata) 46 | { 47 | Cubemap* cubemap = (Cubemap*)_cubemap; 48 | return cubemap->upload(hdata); 49 | } 50 | 51 | void n_cubemap_download(void* _cubemap, void* hdata) 52 | { 53 | Cubemap* cubemap = (Cubemap*)_cubemap; 54 | return cubemap->download(hdata); 55 | } 56 | -------------------------------------------------------------------------------- /SVCombine.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "SVCombine.h" 3 | #include "Context.h" 4 | 5 | namespace VkInline 6 | { 7 | SVCombine::SVCombine(const std::vector& elem_map, const char* operations) 8 | { 9 | std::string dynamic_code= 10 | "struct Comb_#hash#\n" 11 | "{\n"; 12 | 13 | m_components.resize(elem_map.size()); 14 | for (size_t i = 0; i < elem_map.size(); i++) 15 | { 16 | dynamic_code += std::string(" ") + elem_map[i].obj->name_view_type() + " " + elem_map[i].obj_name + ";\n"; 17 | m_components[i] = elem_map[i].obj; 18 | } 19 | dynamic_code += "};\n"; 20 | dynamic_code += operations; 21 | 22 | m_name_view_type = std::string("Comb_")+Add_Dynamic_Code(dynamic_code.c_str()); 23 | m_offsets.resize(elem_map.size() + 1); 24 | QueryStruct(m_name_view_type.c_str(), m_offsets.data()); 25 | } 26 | 27 | ViewBuf SVCombine::view() const 28 | { 29 | ViewBuf ret(m_offsets[m_components.size()]); 30 | for (size_t i = 0; i < m_components.size(); i++) 31 | { 32 | ViewBuf elem_view = m_components[i]->view(); 33 | memcpy(ret.data() + m_offsets[i], elem_view.data(), elem_view.size()); 34 | } 35 | return ret; 36 | } 37 | 38 | void SVCombine::apply_barriers(const Internal::CommandBuffer& cmdbuf, unsigned dstFlags) const 39 | { 40 | for (size_t i = 0; i < m_components.size(); i++) 41 | { 42 | m_components[i]->apply_barriers(cmdbuf, dstFlags); 43 | } 44 | } 45 | 46 | } 47 | -------------------------------------------------------------------------------- /python/api_Texture3D.cpp: -------------------------------------------------------------------------------- 1 | #include "api.h" 2 | #include "Context.h" 3 | using namespace VkInline; 4 | 5 | void* n_texture3d_create(int dimX, int dimY, int dimZ, unsigned vkformat) 6 | { 7 | return new Texture3D(dimX, dimY, dimZ, vkformat); 8 | } 9 | 10 | void n_texture3d_release(void* tex3d) 11 | { 12 | delete (Texture3D*)tex3d; 13 | } 14 | 15 | int n_texture3d_dimX(void* _tex3d) 16 | { 17 | Texture3D* tex3d = (Texture3D*)_tex3d; 18 | return tex3d->dimX(); 19 | } 20 | 21 | int n_texture3d_dimY(void* _tex3d) 22 | { 23 | Texture3D* tex3d = (Texture3D*)_tex3d; 24 | return tex3d->dimY(); 25 | } 26 | 27 | int n_texture3d_dimZ(void* _tex3d) 28 | { 29 | Texture3D* tex3d = (Texture3D*)_tex3d; 30 | return tex3d->dimZ(); 31 | } 32 | 33 | unsigned n_texture3d_pixelsize(void* _tex3d) 34 | { 35 | Texture3D* tex3d = (Texture3D*)_tex3d; 36 | return tex3d->pixel_size(); 37 | } 38 | 39 | unsigned n_texture3d_channelcount(void* _tex3d) 40 | { 41 | Texture3D* tex3d = (Texture3D*)_tex3d; 42 | return tex3d->channel_count(); 43 | } 44 | 45 | unsigned n_texture3d_vkformat(void* _tex3d) 46 | { 47 | Texture3D* tex3d = (Texture3D*)_tex3d; 48 | return tex3d->vkformat(); 49 | } 50 | 51 | void n_texture3d_upload(void* _tex3d, void* hdata) 52 | { 53 | Texture3D* tex3d = (Texture3D*)_tex3d; 54 | tex3d->upload(hdata); 55 | } 56 | 57 | void n_texture3d_download(void* _tex3d, void* hdata) 58 | { 59 | Texture3D* tex3d = (Texture3D*)_tex3d; 60 | tex3d->download(hdata); 61 | } 62 | 63 | -------------------------------------------------------------------------------- /python/api_Texture2D.cpp: -------------------------------------------------------------------------------- 1 | #include "api.h" 2 | #include "Context.h" 3 | using namespace VkInline; 4 | 5 | void* n_texture2d_create(int width, int height, unsigned vkformat, unsigned isDepth, unsigned isStencil, unsigned sampleCount) 6 | { 7 | return new Texture2D(width, height, vkformat, isDepth!=0, isStencil!=0, sampleCount); 8 | } 9 | 10 | void n_texture2d_release(void* tex2d) 11 | { 12 | delete (Texture2D*)tex2d; 13 | } 14 | 15 | int n_texture2d_width(void* _tex2d) 16 | { 17 | Texture2D* tex2d = (Texture2D*)_tex2d; 18 | return tex2d->width(); 19 | } 20 | 21 | int n_texture2d_height(void* _tex2d) 22 | { 23 | Texture2D* tex2d = (Texture2D*)_tex2d; 24 | return tex2d->height(); 25 | } 26 | 27 | unsigned n_texture2d_pixelsize(void* _tex2d) 28 | { 29 | Texture2D* tex2d = (Texture2D*)_tex2d; 30 | return tex2d->pixel_size(); 31 | } 32 | 33 | unsigned n_texture2d_channelcount(void* _tex2d) 34 | { 35 | Texture2D* tex2d = (Texture2D*)_tex2d; 36 | return tex2d->channel_count(); 37 | } 38 | 39 | unsigned n_texture2d_samplecount(void* _tex2d) 40 | { 41 | Texture2D* tex2d = (Texture2D*)_tex2d; 42 | return tex2d->sample_count(); 43 | } 44 | 45 | unsigned n_texture2d_vkformat(void* _tex2d) 46 | { 47 | Texture2D* tex2d = (Texture2D*)_tex2d; 48 | return tex2d->vkformat(); 49 | } 50 | 51 | void n_texture2d_upload(void* _tex2d, void* hdata) 52 | { 53 | Texture2D* tex2d = (Texture2D*)_tex2d; 54 | tex2d->upload(hdata); 55 | } 56 | 57 | void n_texture2d_download(void* _tex2d, void* hdata) 58 | { 59 | Texture2D* tex2d = (Texture2D*)_tex2d; 60 | tex2d->download(hdata); 61 | } 62 | 63 | -------------------------------------------------------------------------------- /python/VkInline/utils.py: -------------------------------------------------------------------------------- 1 | from .Native import ffi,native 2 | import numbers 3 | 4 | class StrArray: 5 | def __init__(self, arr): 6 | c_strs = [ffi.from_buffer('char[]', s.encode('utf-8')) for s in arr] 7 | self.m_cptr = native.n_string_array_create(len(c_strs), c_strs) 8 | 9 | def __del__(self): 10 | native.n_string_array_destroy(self.m_cptr) 11 | 12 | def size(self): 13 | return native.n_string_array_size(self.m_cptr) 14 | 15 | class ObjArray: 16 | def __init__(self, arr): 17 | c_ptrs = [obj.m_cptr for obj in arr] 18 | self.m_cptr = native.n_pointer_array_create(len(c_ptrs), c_ptrs) 19 | 20 | def __del__(self): 21 | native.n_pointer_array_destroy(self.m_cptr) 22 | 23 | def size(self): 24 | return native.n_pointer_array_size(self.m_cptr) 25 | 26 | class Dim3: 27 | def __init__(self, t): 28 | tp = [1,1,1] 29 | if type(t) is tuple: 30 | tp[0:len(t)] = t[:] 31 | else: 32 | tp[0]=t 33 | 34 | self.m_cptr = native.n_dim3_create(tp[0],tp[1],tp[2]) 35 | 36 | def __del__(self): 37 | native.n_dim3_destroy(self.m_cptr) 38 | 39 | class LaunchParam: 40 | def __init__(self, obj): 41 | if isinstance(obj, numbers.Number): 42 | self.m_cptr = native.n_launch_param_from_count(obj) 43 | else: 44 | self.m_buf = obj.m_buf 45 | self.m_cptr = native.n_launch_param_from_buffer(obj.m_buf.m_cptr) 46 | 47 | def __del__(self): 48 | native.n_launch_param_destroy(self.m_cptr) 49 | 50 | -------------------------------------------------------------------------------- /python/api_utils.cpp: -------------------------------------------------------------------------------- 1 | #include "api.h" 2 | #include "Context.h" 3 | #include "SVBuffer.h" 4 | using namespace VkInline; 5 | #include 6 | #include 7 | 8 | typedef std::vector StrArray; 9 | typedef std::vector PtrArray; 10 | typedef std::vector Tex2DArray; 11 | typedef std::vector Tex3DArray; 12 | 13 | void* n_string_array_create(unsigned long long size, const char* const* strs) 14 | { 15 | StrArray* ret = new StrArray(size); 16 | for (size_t i = 0; i < size; i++) 17 | (*ret)[i] = strs[i]; 18 | 19 | return ret; 20 | } 21 | 22 | unsigned long long n_string_array_size(void* ptr_arr) 23 | { 24 | StrArray* arr = (StrArray*)ptr_arr; 25 | return arr->size(); 26 | } 27 | 28 | void n_string_array_destroy(void* ptr_arr) 29 | { 30 | StrArray* arr = (StrArray*)ptr_arr; 31 | delete arr; 32 | } 33 | 34 | void* n_pointer_array_create(unsigned long long size, const void* const* ptrs) 35 | { 36 | PtrArray* ret = new PtrArray(size); 37 | memcpy(ret->data(), ptrs, sizeof(void*)*size); 38 | return ret; 39 | } 40 | 41 | unsigned long long n_pointer_array_size(void* ptr_arr) 42 | { 43 | PtrArray* arr = (PtrArray*)ptr_arr; 44 | return arr->size(); 45 | } 46 | 47 | void n_pointer_array_destroy(void* ptr_arr) 48 | { 49 | PtrArray* arr = (PtrArray*)ptr_arr; 50 | delete arr; 51 | } 52 | 53 | void* n_dim3_create(unsigned x, unsigned y, unsigned z) 54 | { 55 | dim_type* ret = new dim_type({ x,y,z }); 56 | return ret; 57 | } 58 | 59 | void n_dim3_destroy(void* cptr) 60 | { 61 | dim_type* v = (dim_type*)cptr; 62 | delete v; 63 | } 64 | 65 | void* n_launch_param_from_count(unsigned count) 66 | { 67 | return new Rasterizer::LaunchParam({ count, nullptr }); 68 | } 69 | 70 | void* n_launch_param_from_buffer(void* _buf) 71 | { 72 | SVBuffer* buf = (SVBuffer*)_buf; 73 | return new Rasterizer::LaunchParam({ (unsigned)buf->size(), buf }); 74 | } 75 | 76 | void n_launch_param_destroy(void* lp) 77 | { 78 | delete (Rasterizer::LaunchParam*)lp; 79 | } 80 | 81 | 82 | -------------------------------------------------------------------------------- /SVBuffer.cpp: -------------------------------------------------------------------------------- 1 | #include "internal_context.h" 2 | #include "SVBuffer.h" 3 | #include "Context.h" 4 | 5 | namespace VkInline 6 | { 7 | SVBuffer::SVBuffer(const char* elem_type, size_t size, void* hdata) 8 | { 9 | m_elem_type = elem_type; 10 | m_elem_size = SizeOf(elem_type); 11 | m_size = size; 12 | 13 | unsigned alignment = 4; 14 | if (m_elem_size % 8 == 0) alignment = 8; 15 | if (m_elem_size % 16 == 0) alignment = 16; 16 | 17 | char line[1024]; 18 | sprintf(line, "layout(buffer_reference, scalar, buffer_reference_align = %u) buffer Buf_#hash#\n", alignment); 19 | std::string code = std::string(line) + 20 | "{\n " + elem_type + 21 | " v;\n" 22 | "};\n"; 23 | 24 | m_name_view_type = std::string("Buf_") + Add_Dynamic_Code(code.c_str()); 25 | VkBufferUsageFlags usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT; 26 | #ifdef _VkInlineEX 27 | usage |= VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR; 28 | #endif 29 | m_data = new Internal::DeviceBuffer(m_elem_size*m_size, usage); 30 | if (hdata != nullptr) 31 | m_data->upload(hdata); 32 | else 33 | m_data->zero(); 34 | } 35 | 36 | SVBuffer::~SVBuffer() 37 | { 38 | delete m_data; 39 | } 40 | 41 | void SVBuffer::from_host(void* hdata) 42 | { 43 | if (m_size > 0) 44 | m_data->upload(hdata); 45 | } 46 | 47 | void SVBuffer::to_host(void* hdata, size_t begin, size_t end) const 48 | { 49 | if (end == (size_t)(-1) || end > m_size) end = m_size; 50 | size_t n = end - begin; 51 | if (n > 0) 52 | m_data->download(hdata, begin*m_elem_size, end*m_elem_size); 53 | } 54 | 55 | ViewBuf SVBuffer::view() const 56 | { 57 | ViewBuf buf(sizeof(VkDeviceAddress)); 58 | VkDeviceAddress* pview = (VkDeviceAddress*)buf.data(); 59 | *pview = m_data->get_device_address(); 60 | return buf; 61 | } 62 | 63 | void SVBuffer::apply_barriers(const Internal::CommandBuffer& cmdbuf, unsigned dstFlags) const 64 | { 65 | m_data->apply_barrier(cmdbuf, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, dstFlags); 66 | } 67 | } 68 | 69 | -------------------------------------------------------------------------------- /python/test/test_rasterization.py: -------------------------------------------------------------------------------- 1 | import VkInline as vki 2 | import numpy as np 3 | from PIL import Image 4 | 5 | image_in = np.array(Image.open('fei.png').convert('RGBA')) 6 | 7 | VK_FORMAT_R8G8B8A8_SRGB = 43 8 | 9 | width = image_in.shape[1] 10 | height = image_in.shape[0] 11 | 12 | tex2d = vki.Texture2D(width, height, VK_FORMAT_R8G8B8A8_SRGB) 13 | tex2d.upload(image_in) 14 | 15 | colorBuf = vki.Texture2D(width, height, VK_FORMAT_R8G8B8A8_SRGB) 16 | 17 | positions = np.array([ [0.0, -0.5, 0.5], [0.5, 0.5, 0.5], [-0.5, 0.5, 0.5] ], dtype = np.float32) 18 | gpuPos = vki.device_vector_from_numpy(positions) 19 | 20 | colors = np.array([ [0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], dtype = np.float32) 21 | gpuColors = vki.device_vector_from_numpy(colors) 22 | 23 | indices = np.array([0, 1, 2], dtype = np.uint32) 24 | gpuIndices = vki.device_vector_from_numpy(indices) 25 | 26 | rp = vki.Rasterizer(['pos', 'col']) 27 | 28 | rp.add_draw_call(vki.DrawCall( 29 | ''' 30 | layout (location = 0) out vec2 vUV; 31 | void main() 32 | { 33 | vec2 grid = vec2((gl_VertexIndex << 1) & 2, gl_VertexIndex & 2); 34 | vec2 vpos = grid * vec2(2.0f, 2.0f) + vec2(-1.0f, -1.0f); 35 | gl_Position = vec4(vpos, 1.0f, 1.0f); 36 | vUV = grid; 37 | } 38 | ''', 39 | ''' 40 | layout (location = 0) in vec2 vUV; 41 | layout (location = 0) out vec4 outColor; 42 | 43 | void main() 44 | { 45 | outColor = texture(arr_tex2d[0], vUV); 46 | } 47 | ''')) 48 | 49 | rp.add_draw_call(vki.DrawCall( 50 | ''' 51 | layout (location = 0) out vec3 vColor; 52 | void main() 53 | { 54 | gl_Position = vec4(get_value(pos, gl_VertexIndex), 1.0); 55 | vColor = get_value(col, gl_VertexIndex); 56 | } 57 | ''', 58 | ''' 59 | layout (location = 0) in vec3 vColor; 60 | layout (location = 0) out vec4 outColor; 61 | 62 | void main() 63 | { 64 | outColor = vec4(vColor, 1.0); 65 | } 66 | ''')) 67 | 68 | 69 | rp.launch([3, gpuIndices], [colorBuf], None, [0.5, 0.5, 0.5, 1.0], 1.0, [gpuPos, gpuColors], [tex2d]) 70 | 71 | 72 | image_out = np.empty((height, width, 4), dtype=np.uint8) 73 | colorBuf.download(image_out) 74 | 75 | Image.fromarray(image_out, 'RGBA').save('output.png') 76 | 77 | -------------------------------------------------------------------------------- /python/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required (VERSION 3.0) 2 | 3 | project(PyVkInline) 4 | 5 | add_custom_target(Run_CFFIBuild 6 | COMMAND python VkInline/cffi_build.py 7 | WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} 8 | COMMENT "Running cffi_build") 9 | 10 | set (SRC 11 | api.h 12 | api_utils.cpp 13 | api_Context.cpp 14 | api_ShaderViewable.cpp 15 | api_SVBuffer.cpp 16 | api_SVCombine.cpp 17 | api_SVObjBuffer.cpp 18 | api_Texture2D.cpp 19 | api_Texture3D.cpp 20 | api_Cubemap.cpp 21 | api_ex.h 22 | api_Context_ex.inl 23 | ) 24 | 25 | set (INCLUDE_DIR 26 | . 27 | .. 28 | ) 29 | 30 | 31 | if (WIN32) 32 | set (DEFINES ${DEFINES} 33 | -D"_CRT_SECURE_NO_DEPRECATE" 34 | -D"_SCL_SECURE_NO_DEPRECATE" 35 | -D"_CRT_SECURE_NO_WARNINGS" 36 | ) 37 | else() 38 | add_definitions(-std=c++17) 39 | add_compile_options(-fPIC) 40 | endif() 41 | 42 | 43 | include_directories(${INCLUDE_DIR}) 44 | add_definitions(${DEFINES}) 45 | 46 | add_library(PyVkInline SHARED ${SRC}) 47 | add_library(PyVkInlineEX SHARED ${SRC}) 48 | target_link_libraries(PyVkInline VkInline) 49 | target_link_libraries(PyVkInlineEX VkInlineEX) 50 | 51 | 52 | if (WIN32) 53 | install(TARGETS PyVkInline RUNTIME DESTINATION test_python/VkInline) 54 | install(TARGETS PyVkInlineEX RUNTIME DESTINATION test_python/VkInline) 55 | else() 56 | install(TARGETS PyVkInline DESTINATION test_python/VkInline) 57 | install(TARGETS PyVkInlineEX DESTINATION test_python/VkInline) 58 | endif() 59 | 60 | 61 | set(PYTHON 62 | VkInline/__init__.py 63 | VkInline/cffi.py 64 | VkInline/Native.py 65 | VkInline/NativeEX.py 66 | VkInline/utils.py 67 | VkInline/Context.py 68 | VkInline/ContextEX.py 69 | VkInline/ShaderViewable.py 70 | VkInline/SVBuffer.py 71 | VkInline/SVCombine.py 72 | VkInline/SVObjBuffer.py 73 | VkInline/SVVector.py 74 | VkInline/SVObjVector.py 75 | VkInline/Texture2D.py 76 | VkInline/Texture3D.py 77 | VkInline/Cubemap.py 78 | ) 79 | 80 | install(FILES ${PYTHON} DESTINATION test_python/VkInline) 81 | install(FILES setup.py DESTINATION test_python) 82 | 83 | set(VKINLINE_INCLUDE_PYTESTS false CACHE BOOL "Include tests") 84 | 85 | if (VKINLINE_INCLUDE_PYTESTS) 86 | add_subdirectory(test) 87 | endif() 88 | 89 | -------------------------------------------------------------------------------- /test/test.cpp: -------------------------------------------------------------------------------- 1 | #include "Context.h" 2 | #include "SVBuffer.h" 3 | #include "SVCombine.h" 4 | using namespace VkInline; 5 | 6 | class SVVector : public SVCombine 7 | { 8 | public: 9 | const std::string& name_elem_type() const 10 | { 11 | return ((SVBuffer*)m_components[1])->name_elem_type(); 12 | } 13 | size_t elem_size() const 14 | { 15 | return ((SVBuffer*)m_components[1])->elem_size(); 16 | } 17 | size_t size() const 18 | { 19 | return ((SVBuffer*)m_components[1])->size(); 20 | } 21 | 22 | SVVector(const char* elem_type, size_t size, void* hdata = nullptr) 23 | :SVCombine({ 24 | {"size", new SVUInt32((unsigned)size)}, 25 | {"data", new SVBuffer(elem_type, size, hdata)} }, 26 | (std::string("") + 27 | "uint get_size(in Comb_#hash# vec)\n" 28 | "{\n" 29 | " return vec.size;\n" 30 | "}\n" + 31 | elem_type + " get_value(in Comb_#hash# vec, in uint id)\n" 32 | "{\n" 33 | " return vec.data[id].v;\n" 34 | "}\n" 35 | "void set_value(in Comb_#hash# vec, in uint id, in " + elem_type + " value)\n" 36 | "{\n" 37 | " vec.data[id].v = value;\n" 38 | "}\n").c_str()){} 39 | 40 | ~SVVector() 41 | { 42 | delete m_components[0]; 43 | delete m_components[1]; 44 | } 45 | 46 | void to_host(void* hdata, size_t begin = 0, size_t end = (size_t)(-1)) const 47 | { 48 | size_t _size = size(); 49 | if (end == (size_t)(-1) || end > _size) end = _size; 50 | ((SVBuffer*)m_components[1])->to_host(hdata, begin, end); 51 | } 52 | 53 | }; 54 | 55 | int main() 56 | { 57 | Computer ker( 58 | { "arr_in", "arr_out", "k" }, 59 | "void main()\n" 60 | "{\n" 61 | " uint id = gl_GlobalInvocationID.x;\n" 62 | " if (id >= get_size(arr_in)) return;\n" 63 | " set_value(arr_out, id, get_value(arr_in, id)*k);\n" 64 | "}\n" 65 | ); 66 | 67 | float test_f[5] = { 1.0f, 2.0f, 3.0f, 4.0f, 5.0 }; 68 | SVVector svbuf_in("float", 5, test_f); 69 | SVVector svbuf_out("float", 5); 70 | SVFloat k1(10.0); 71 | const ShaderViewable* args_f[] = { &svbuf_in, &svbuf_out, &k1 }; 72 | ker.launch({ 1,1,1 }, { 128, 1, 1 }, args_f, {}, {}, {}); 73 | svbuf_out.to_host(test_f); 74 | printf("%f %f %f %f %f\n", test_f[0], test_f[1], test_f[2], test_f[3], test_f[4]); 75 | 76 | 77 | 78 | return 0; 79 | } 80 | 81 | -------------------------------------------------------------------------------- /SVObjBuffer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "internal_context.h" 3 | #include "SVObjBuffer.h" 4 | #include "Context.h" 5 | 6 | namespace VkInline 7 | { 8 | SVObjBuffer::SVObjBuffer(const std::vector& elems) 9 | { 10 | if (elems.size() < 1) 11 | { 12 | printf("SVObjBuffer: cannot create with empty input.\n"); 13 | exit(0); 14 | } 15 | m_elem_type = elems[0]->name_view_type(); 16 | for (size_t i=1; iname_view_type() != m_elem_type) 18 | { 19 | printf("SVObjBuffer: input elements must be the same type.\n"); 20 | exit(0); 21 | } 22 | m_elems = elems; 23 | m_elem_size = SizeOf(m_elem_type.c_str()); 24 | m_size = elems.size(); 25 | 26 | unsigned alignment = 4; 27 | if (m_elem_size % 8 == 0) alignment = 8; 28 | if (m_elem_size % 16 == 0) alignment = 16; 29 | 30 | char line[1024]; 31 | sprintf(line, "layout(buffer_reference, scalar, buffer_reference_align = %u) buffer Buf_#hash#\n", alignment); 32 | std::string code = std::string(line) + 33 | "{\n " + m_elem_type + 34 | " v;\n" 35 | "};\n"; 36 | 37 | m_name_view_type = std::string("Buf_") + Add_Dynamic_Code(code.c_str()); 38 | 39 | m_data = new Internal::UploadBuffer(m_elem_size*m_size, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT); 40 | 41 | update(); 42 | } 43 | 44 | SVObjBuffer::~SVObjBuffer() 45 | { 46 | delete m_data; 47 | } 48 | 49 | void SVObjBuffer::update() 50 | { 51 | ViewBuf whole(m_elem_size*m_size); 52 | for (size_t i = 0; i < m_elems.size(); i++) 53 | { 54 | ViewBuf elem_view = m_elems[i]->view(); 55 | memcpy(whole.data() + m_elem_size * i, elem_view.data(), elem_view.size()); 56 | } 57 | m_data->upload(whole.data()); 58 | } 59 | 60 | ViewBuf SVObjBuffer::view() const 61 | { 62 | ViewBuf buf(sizeof(VkDeviceAddress)); 63 | VkDeviceAddress* pview = (VkDeviceAddress*)buf.data(); 64 | *pview = m_data->get_device_address(); 65 | return buf; 66 | } 67 | 68 | void SVObjBuffer::apply_barriers(const Internal::CommandBuffer& cmdbuf, unsigned dstFlags) const 69 | { 70 | for (size_t i = 0; i < m_elems.size(); i++) 71 | { 72 | m_elems[i]->apply_barriers(cmdbuf, dstFlags); 73 | } 74 | m_data->apply_barrier(cmdbuf, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, dstFlags); 75 | } 76 | 77 | } 78 | -------------------------------------------------------------------------------- /Context_ex.h: -------------------------------------------------------------------------------- 1 | namespace VkInline 2 | { 3 | namespace Internal 4 | { 5 | class BaseLevelAS; 6 | class TopLevelAS; 7 | } 8 | 9 | class BaseLevelAS 10 | { 11 | public: 12 | BaseLevelAS(SVBuffer* indBuf, SVBuffer* posBuf); 13 | BaseLevelAS(SVBuffer* aabbBuf); 14 | ~BaseLevelAS(); 15 | 16 | Internal::BaseLevelAS* internal() { return m_blas; } 17 | const Internal::BaseLevelAS* internal() const { return m_blas; } 18 | 19 | private: 20 | Internal::BaseLevelAS* m_blas; 21 | 22 | }; 23 | 24 | class Mat4 25 | { 26 | public: 27 | Mat4(const float* data); 28 | const float* data() const { return m_data; } 29 | 30 | private: 31 | float m_data[16]; 32 | }; 33 | 34 | struct BLAS_EX 35 | { 36 | const BaseLevelAS* blas; 37 | const Mat4* trans; 38 | }; 39 | 40 | class TopLevelAS 41 | { 42 | public: 43 | TopLevelAS(const std::vector>& blases); 44 | ~TopLevelAS(); 45 | 46 | Internal::TopLevelAS* internal() { return m_tlas; } 47 | const Internal::TopLevelAS* internal() const { return m_tlas; } 48 | 49 | private: 50 | Internal::TopLevelAS* m_tlas; 51 | }; 52 | 53 | class BodyHitShaders 54 | { 55 | public: 56 | BodyHitShaders(const char* body_closest_hit, const char* body_intersection); 57 | 58 | const char* body_closest_hit() const; 59 | const char* body_intersection() const; 60 | 61 | private: 62 | std::string m_body_closest_hit; 63 | std::string m_body_intersection; 64 | }; 65 | 66 | class RayTracer 67 | { 68 | public: 69 | size_t num_params() const { return m_param_names.size(); } 70 | RayTracer(const std::vector& param_names, const char* body_raygen, const std::vector& body_miss, const std::vector& body_hit, unsigned maxRecursionDepth, bool type_locked = false); 71 | bool launch(dim_type glbDim, const ShaderViewable** args, const std::vector& arr_tlas, 72 | const std::vector& tex2ds, const std::vector& tex3ds, const std::vector& cubemaps, size_t times_submission = 1); 73 | 74 | private: 75 | std::vector m_param_names; 76 | std::string m_body_raygen; 77 | std::vector m_body_miss; 78 | std::vector m_body_hit; 79 | unsigned m_maxRecursionDepth; 80 | 81 | bool m_type_locked; 82 | unsigned m_kid; 83 | std::vector m_offsets; 84 | std::mutex m_mu_type_lock; 85 | 86 | }; 87 | 88 | 89 | 90 | 91 | } 92 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019 Fei Yang 2 | 3 | "Anti 996" License Version 1.0 (Draft) 4 | 5 | Permission is hereby granted to any individual or legal entity 6 | obtaining a copy of this licensed work (including the source code, 7 | documentation and/or related items, hereinafter collectively referred 8 | to as the "licensed work"), free of charge, to deal with the licensed 9 | work for any purpose, including without limitation, the rights to use, 10 | reproduce, modify, prepare derivative works of, distribute, publish 11 | and sublicense the licensed work, subject to the following conditions: 12 | 13 | 1. The individual or the legal entity must conspicuously display, 14 | without modification, this License and the notice on each redistributed 15 | or derivative copy of the Licensed Work. 16 | 17 | 2. The individual or the legal entity must strictly comply with all 18 | applicable laws, regulations, rules and standards of the jurisdiction 19 | relating to labor and employment where the individual is physically 20 | located or where the individual was born or naturalized; or where the 21 | legal entity is registered or is operating (whichever is stricter). In 22 | case that the jurisdiction has no such laws, regulations, rules and 23 | standards or its laws, regulations, rules and standards are 24 | unenforceable, the individual or the legal entity are required to 25 | comply with Core International Labor Standards. 26 | 27 | 3. The individual or the legal entity shall not induce, metaphor or force 28 | its employee(s), whether full-time or part-time, or its independent 29 | contractor(s), in any methods, to agree in oral or written form, to 30 | directly or indirectly restrict, weaken or relinquish his or her 31 | rights or remedies under such laws, regulations, rules and standards 32 | relating to labor and employment as mentioned above, no matter whether 33 | such written or oral agreement are enforceable under the laws of the 34 | said jurisdiction, nor shall such individual or the legal entity 35 | limit, in any methods, the rights of its employee(s) or independent 36 | contractor(s) from reporting or complaining to the copyright holder or 37 | relevant authorities monitoring the compliance of the license about 38 | its violation(s) of the said license. 39 | 40 | THE LICENSED WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 41 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 42 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 43 | IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, 44 | DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 45 | OTHERWISE, ARISING FROM, OUT OF OR IN ANY WAY CONNECTION WITH THE 46 | LICENSED WORK OR THE USE OR OTHER DEALINGS IN THE LICENSED WORK. 47 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required (VERSION 3.0) 2 | 3 | set (SKIP_GLSLANG_INSTALL true) 4 | set (SPIRV_SKIP_TESTS true) 5 | set (SPIRV_CROSS_ENABLE_TESTS false) 6 | set (SPIRV_CROSS_SKIP_INSTALL true) 7 | set (SPIRV_CROSS_STATIC true) 8 | 9 | project(VkInline) 10 | 11 | set (INCLUDE_DIR 12 | thirdparty 13 | thirdparty/volk 14 | thirdparty/glslang 15 | thirdparty/SPIRV-Cross 16 | thirdparty/Vulkan-Headers/include 17 | thirdparty/crc64 18 | thirdparty/unqlite 19 | thirdparty/Vulkan_utils 20 | . 21 | ./internal 22 | ) 23 | 24 | 25 | if (WIN32) 26 | set (DEFINES ${DEFINES} 27 | -D"_CRT_SECURE_NO_DEPRECATE" 28 | -D"_SCL_SECURE_NO_DEPRECATE" 29 | -D"_CRT_SECURE_NO_WARNINGS" 30 | ) 31 | else() 32 | add_definitions(-std=c++17) 33 | add_compile_options(-fPIC) 34 | endif() 35 | 36 | add_definitions(-D"VK_ENABLE_BETA_EXTENSIONS") 37 | include_directories(${INCLUDE_DIR}) 38 | add_subdirectory(thirdparty/volk) 39 | add_subdirectory(thirdparty/glslang) 40 | add_subdirectory(thirdparty/SPIRV-Cross) 41 | 42 | set (LIB_SOURCES 43 | thirdparty/crc64/crc64.cpp 44 | thirdparty/Vulkan_utils/vk_format_utils.cpp 45 | internal/internal_context.cpp 46 | glslc.cpp 47 | Context.cpp 48 | SVBuffer.cpp 49 | SVCombine.cpp 50 | SVObjBuffer.cpp 51 | ) 52 | 53 | set (LIB_HEADERS 54 | ShaderViewable.h 55 | Context.h 56 | SVBuffer.h 57 | SVCombine.h 58 | SVObjBuffer.h 59 | Context_ex.h 60 | Context_ex.inl 61 | ) 62 | 63 | 64 | set(INTERNAL_HEADERS 65 | thirdparty/crc64/crc64.h 66 | internal/internal_context.h 67 | internal/impl_context.inl 68 | internal/impl_context_ex.inl 69 | internal/internal_context_ex.h 70 | internal/internal_context_ex.inl 71 | ) 72 | 73 | add_definitions(${DEFINES}) 74 | add_library(unqlite STATIC thirdparty/unqlite/unqlite.h thirdparty/unqlite/unqlite.c) 75 | 76 | add_library(VkInline ${LIB_SOURCES} ${LIB_HEADERS} ${INTERNAL_HEADERS}) 77 | target_link_libraries(VkInline volk glslang SPIRV spirv-cross-glsl unqlite) 78 | 79 | add_library(VkInlineEX ${LIB_SOURCES} ${LIB_HEADERS} ${INTERNAL_HEADERS}) 80 | target_compile_definitions(VkInlineEX PUBLIC _VkInlineEX) 81 | target_link_libraries(VkInlineEX volk glslang SPIRV spirv-cross-glsl unqlite) 82 | 83 | 84 | if (WIN32) 85 | else() 86 | target_link_libraries(VkInline dl) 87 | target_link_libraries(VkInlineEX dl) 88 | endif() 89 | 90 | 91 | IF(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) 92 | SET(CMAKE_INSTALL_PREFIX ../install CACHE PATH "Install path" FORCE) 93 | ENDIF(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) 94 | 95 | install(FILES ${LIB_HEADERS} DESTINATION include) 96 | install(TARGETS VkInline DESTINATION lib) 97 | install(TARGETS VkInlineEX DESTINATION lib) 98 | 99 | set(VKINLINE_BUILD_TESTS false CACHE BOOL "Build tests") 100 | 101 | if (VKINLINE_BUILD_TESTS) 102 | add_subdirectory(test) 103 | endif() 104 | 105 | set(BUILD_PYTHON_BINDINGS true CACHE BOOL "Build Python Bindings") 106 | 107 | if (BUILD_PYTHON_BINDINGS) 108 | add_subdirectory(python) 109 | endif() 110 | -------------------------------------------------------------------------------- /python/test/test_obj.py: -------------------------------------------------------------------------------- 1 | import VkInline as vki 2 | import numpy as np 3 | import tinyobjloader 4 | from PIL import Image 5 | import glm 6 | 7 | reader = tinyobjloader.ObjReader() 8 | reader.ParseFromFile('spider.obj') 9 | attrib = reader.GetAttrib() 10 | 11 | positions = np.array(attrib.vertices, dtype=np.float32) 12 | positions = np.reshape(positions, (-1, 3)) 13 | 14 | normals = np.array(attrib.normals, dtype=np.float32) 15 | normals = np.reshape(normals, (-1, 3)) 16 | 17 | shapes = reader.GetShapes() 18 | lst_vertex_inds = [] 19 | lst_normal_inds = [] 20 | for shape in shapes: 21 | for ind in shape.mesh.indices: 22 | lst_vertex_inds += [ind.vertex_index] 23 | lst_normal_inds += [ind.normal_index] 24 | 25 | vertex_inds = np.array(lst_vertex_inds, dtype=np.uint32) 26 | normal_inds = np.array(lst_normal_inds, dtype=np.uint32) 27 | 28 | VK_FORMAT_R8G8B8A8_SRGB = 43 29 | VK_FORMAT_D32_SFLOAT = 126 30 | 31 | width = 640 32 | height = 480 33 | 34 | colorBuf = vki.Texture2D(width, height, VK_FORMAT_R8G8B8A8_SRGB) 35 | depthBuf = vki.Texture2D(width, height, VK_FORMAT_D32_SFLOAT, isDepth=True) 36 | gpuPos = vki.device_vector_from_numpy(positions) 37 | gpuNormals = vki.device_vector_from_numpy(normals) 38 | gpuInd_pos = vki.device_vector_from_numpy(vertex_inds) 39 | gpuInd_norm = vki.device_vector_from_numpy(normal_inds) 40 | 41 | 42 | rp = vki.Rasterizer(['arr_pos', 'arr_norm', 'ind_pos', 'ind_norm', 'mat_pos', 'mat_norm']) 43 | 44 | rp.add_draw_call(vki.DrawCall( 45 | ''' 46 | layout (location = 0) out vec3 vNorm; 47 | void main() 48 | { 49 | vec3 pos = get_value(arr_pos, get_value(ind_pos, gl_VertexIndex)); 50 | vec3 norm = get_value(arr_norm, get_value(ind_norm, gl_VertexIndex)); 51 | vec4 pos_trans = mat_pos*vec4(pos, 1.0); 52 | pos_trans.y = -pos_trans.y; 53 | pos_trans.z = (pos_trans.z + pos_trans.w) / 2.0; 54 | gl_Position = pos_trans; 55 | vec4 norm_trans = mat_norm*vec4(norm, 0.0); 56 | vNorm = norm_trans.xyz; 57 | } 58 | ''', 59 | ''' 60 | layout (location = 0) in vec3 vNorm; 61 | layout (location = 0) out vec4 outColor; 62 | 63 | void main() 64 | { 65 | outColor = vec4(0.0, 0.0, 0.0, 1.0); 66 | 67 | vec3 norm = normalize(vNorm); 68 | vec3 light_dir = normalize(vec3(0.2, 0.5, -1.0)); 69 | light_dir = reflect(light_dir, norm); 70 | 71 | if (light_dir.z>0.0) 72 | { 73 | float intensity = pow(light_dir.z, 5.0)*0.8; 74 | outColor += vec4(intensity, intensity, intensity, 0.0); 75 | } 76 | 77 | outColor = clamp(outColor, 0.0, 1.0); 78 | 79 | } 80 | ''')) 81 | 82 | proj = glm.perspective(glm.radians(45.0), width/height, 0.1, 2000.0) 83 | modelView = glm.lookAt(glm.vec3(-100.0, 200.0, 200.0), glm.vec3(0.0,0.0,0.0), glm.vec3(0.0, 1.0, 0.0)) 84 | mat_pos = vki.SVMat4x4(proj*modelView) 85 | mat_norm = vki.SVMat4x4(glm.transpose(glm.inverse(modelView))) 86 | 87 | rp.launch([len(vertex_inds)], [colorBuf], depthBuf, [0.5, 0.5, 0.5, 1.0], 1.0, [gpuPos, gpuNormals, gpuInd_pos, gpuInd_norm, mat_pos, mat_norm]) 88 | 89 | image_out = np.empty((height, width, 4), dtype=np.uint8) 90 | colorBuf.download(image_out) 91 | Image.fromarray(image_out, 'RGBA').save('output.png') 92 | 93 | 94 | 95 | -------------------------------------------------------------------------------- /ShaderViewable.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | namespace VkInline 9 | { 10 | namespace Internal 11 | { 12 | class CommandBuffer; 13 | } 14 | 15 | typedef std::vector ViewBuf; 16 | 17 | // root class of all shader-viewable objects 18 | class ShaderViewable 19 | { 20 | public: 21 | ShaderViewable() {} 22 | virtual ~ShaderViewable() {} 23 | virtual ViewBuf view() const = 0; 24 | const std::string& name_view_type() const { return m_name_view_type; } 25 | virtual void apply_barriers(const Internal::CommandBuffer& cmdbuf, unsigned dstFlags) const {} 26 | 27 | protected: 28 | std::string m_name_view_type; 29 | }; 30 | 31 | struct CapturedShaderViewable 32 | { 33 | const char* obj_name; 34 | const ShaderViewable* obj; 35 | }; 36 | 37 | class SomeShaderViewable : public ShaderViewable 38 | { 39 | public: 40 | SomeShaderViewable(const char* name_view_type, const void* data_view = "", size_t size_view = 1) 41 | { 42 | m_name_view_type = name_view_type; 43 | m_view_buf.resize(size_view); 44 | memcpy(m_view_buf.data(), data_view, size_view); 45 | } 46 | 47 | virtual ViewBuf view() const 48 | { 49 | return m_view_buf; 50 | } 51 | 52 | private: 53 | ViewBuf m_view_buf; 54 | }; 55 | 56 | #define DECLAR_SV_BASIC(clsname, type_host, type_dev)\ 57 | class clsname : public SomeShaderViewable\ 58 | {\ 59 | public:\ 60 | clsname(type_host in) : SomeShaderViewable(#type_dev, &in, sizeof(type_host)) {}\ 61 | }; 62 | DECLAR_SV_BASIC(SVInt32, int32_t, int) 63 | DECLAR_SV_BASIC(SVUInt32, uint32_t, uint) 64 | DECLAR_SV_BASIC(SVFloat, float, float) 65 | DECLAR_SV_BASIC(SVDouble, double, double) 66 | 67 | #define DECLAR_SV_VEC(clsname, type_elem_host, type_dev, num_elem)\ 68 | class clsname : public SomeShaderViewable\ 69 | {\ 70 | public:\ 71 | clsname(const type_elem_host* in) : SomeShaderViewable(#type_dev, in, sizeof(type_elem_host)*num_elem){}\ 72 | }; 73 | DECLAR_SV_VEC(SVIVec2, int32_t, ivec2, 2) 74 | DECLAR_SV_VEC(SVIVec3, int32_t, ivec3, 3) 75 | DECLAR_SV_VEC(SVIVec4, int32_t, ivec4, 4) 76 | 77 | DECLAR_SV_VEC(SVUVec2, uint32_t, uvec2, 2) 78 | DECLAR_SV_VEC(SVUVec3, uint32_t, uvec3, 3) 79 | DECLAR_SV_VEC(SVUVec4, uint32_t, uvec4, 4) 80 | 81 | DECLAR_SV_VEC(SVVec2, float, vec2, 2) 82 | DECLAR_SV_VEC(SVVec3, float, vec3, 3) 83 | DECLAR_SV_VEC(SVVec4, float, vec4, 4) 84 | 85 | DECLAR_SV_VEC(SVDVec2, double, dvec2, 2) 86 | DECLAR_SV_VEC(SVDVec3, double, dvec3, 3) 87 | DECLAR_SV_VEC(SVDVec4, double, dvec4, 4) 88 | 89 | DECLAR_SV_VEC(SVMat2x2, float, mat2x2, 2 * 2) 90 | DECLAR_SV_VEC(SVMat2x3, float, mat2x3, 2 * 3) 91 | DECLAR_SV_VEC(SVMat2x4, float, mat2x4, 2 * 4) 92 | DECLAR_SV_VEC(SVMat3x2, float, mat3x2, 3 * 2) 93 | DECLAR_SV_VEC(SVMat3x3, float, mat3x3, 3 * 3) 94 | DECLAR_SV_VEC(SVMat3x4, float, mat3x4, 3 * 4) 95 | DECLAR_SV_VEC(SVMat4x2, float, mat4x2, 4 * 2) 96 | DECLAR_SV_VEC(SVMat4x3, float, mat4x3, 4 * 3) 97 | DECLAR_SV_VEC(SVMat4x4, float, mat4x4, 4 * 4) 98 | 99 | DECLAR_SV_VEC(SVDMat2x2, double, dmat2x2, 2 * 2) 100 | DECLAR_SV_VEC(SVDMat2x3, double, dmat2x3, 2 * 3) 101 | DECLAR_SV_VEC(SVDMat2x4, double, dmat2x4, 2 * 4) 102 | DECLAR_SV_VEC(SVDMat3x2, double, dmat3x2, 3 * 2) 103 | DECLAR_SV_VEC(SVDMat3x3, double, dmat3x3, 3 * 3) 104 | DECLAR_SV_VEC(SVDMat3x4, double, dmat3x4, 3 * 4) 105 | DECLAR_SV_VEC(SVDMat4x2, double, dmat4x2, 4 * 2) 106 | DECLAR_SV_VEC(SVDMat4x3, double, dmat4x3, 4 * 3) 107 | DECLAR_SV_VEC(SVDMat4x4, double, dmat4x4, 4 * 4) 108 | } 109 | 110 | -------------------------------------------------------------------------------- /python/test/test_raytrace.py: -------------------------------------------------------------------------------- 1 | import VkInline as vki 2 | import numpy as np 3 | from PIL import Image 4 | import glm 5 | 6 | width = 800 7 | height = 400 8 | 9 | aabb_unit_sphere = np.array([-1.0, -1.0, -1.0, 1.0, 1.0, 1.0], dtype = np.float32) 10 | d_aabb_unit_sphere = vki.device_vector_from_numpy(aabb_unit_sphere) 11 | blas_unit_sphere = vki.BaseLevelAS(gpuAABB = d_aabb_unit_sphere) 12 | transform = glm.identity(glm.mat4) 13 | transform = glm.translate(transform, glm.vec3(0.0, 0.0, -1.0)) 14 | transform = glm.scale(transform, glm.vec3(0.5, 0.5, 0.5)) 15 | tlas = vki.TopLevelAS([[(blas_unit_sphere, transform)]]) 16 | 17 | darr_out = vki.SVVector('vec3', width*height) 18 | 19 | raytracer = vki.RayTracer(['arr_out', 'width', 'height'], 20 | ''' 21 | struct Payload 22 | { 23 | float t; 24 | vec3 color; 25 | }; 26 | 27 | layout(location = 0) rayPayloadEXT Payload payload; 28 | 29 | void main() 30 | { 31 | int x = int(gl_LaunchIDEXT.x); 32 | int y = int(gl_LaunchIDEXT.y); 33 | if (x>=width || y>height) return; 34 | 35 | vec3 lower_left_corner = vec3(-2.0, -1.0, -1.0); 36 | vec3 horizontal = vec3(4.0, 0.0, 0.0); 37 | vec3 vertical = vec3(0.0, 2.0, 0.0); 38 | vec3 origin = vec3(0.0, 0.0, 0.0); 39 | 40 | float u = (float(x)+0.5)/float(width); 41 | float v = 1.0 - (float(y)+0.5)/float(height); 42 | 43 | vec3 direction = normalize(lower_left_corner + u * horizontal + v * vertical); 44 | 45 | uint rayFlags = gl_RayFlagsOpaqueEXT; 46 | uint cullMask = 0xff; 47 | float tmin = 0.001; 48 | float tmax = 1000000.0; 49 | 50 | traceRayEXT(arr_tlas[0], rayFlags, cullMask, 0, 0, 0, origin, tmin, direction, tmax, 0); 51 | 52 | set_value(arr_out, x+y*width, payload.color); 53 | } 54 | 55 | ''', [ 56 | ''' 57 | struct Payload 58 | { 59 | float t; 60 | vec3 color; 61 | }; 62 | 63 | layout(location = 0) rayPayloadInEXT Payload payload; 64 | 65 | void main() 66 | { 67 | payload.t = -1.0; 68 | vec3 direction = gl_WorldRayDirectionEXT; 69 | float t = 0.5 * (direction.y + 1.0); 70 | payload.color = (1.0 - t)*vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0); 71 | } 72 | '''], [vki.HitShaders( 73 | closest_hit = ''' 74 | struct Payload 75 | { 76 | float t; 77 | vec3 color; 78 | }; 79 | 80 | layout(location = 0) rayPayloadInEXT Payload payload; 81 | hitAttributeEXT vec3 hitpoint; 82 | 83 | void main() 84 | { 85 | vec3 normal = normalize(hitpoint); 86 | payload.t = gl_HitTEXT; 87 | payload.color = (normal+vec3(1.0, 1.0, 1.0))*0.5; 88 | } 89 | 90 | ''', 91 | intersection = ''' 92 | hitAttributeEXT vec3 hitpoint; 93 | 94 | void main() 95 | { 96 | vec3 origin = gl_ObjectRayOriginEXT; 97 | vec3 direction = gl_ObjectRayDirectionEXT; 98 | float tMin = gl_RayTminEXT; 99 | float tMax = gl_RayTmaxEXT; 100 | 101 | const float a = dot(direction, direction); 102 | const float b = dot(origin, direction); 103 | const float c = dot(origin, origin) - 1.0; 104 | const float discriminant = b * b - a * c; 105 | 106 | if (discriminant >= 0) 107 | { 108 | const float t1 = (-b - sqrt(discriminant)) / a; 109 | const float t2 = (-b + sqrt(discriminant)) / a; 110 | 111 | if ((tMin <= t1 && t1 < tMax) || (tMin <= t2 && t2 < tMax)) 112 | { 113 | float t = t1; 114 | if (tMin <= t1 && t1 < tMax) 115 | { 116 | hitpoint = origin + direction * t1; 117 | } 118 | else 119 | { 120 | t = t2; 121 | hitpoint = origin + direction * t2; 122 | } 123 | reportIntersectionEXT(t, 0); 124 | } 125 | } 126 | 127 | } 128 | ''' 129 | )]) 130 | 131 | svwidth = vki.SVInt32(width) 132 | svheight = vki.SVInt32(height) 133 | 134 | raytracer.launch((width, height), [darr_out, svwidth, svheight], [tlas]) 135 | 136 | out = darr_out.to_host() 137 | out = out.reshape((height,width,3))*255.0 138 | out = out.astype(np.uint8) 139 | Image.fromarray(out, 'RGB').save('output.png') 140 | 141 | 142 | 143 | 144 | -------------------------------------------------------------------------------- /python/VkInline/ContextEX.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .Native import ffi, native 3 | from .ShaderViewable import * 4 | from .utils import * 5 | 6 | class BaseLevelAS: 7 | def __init__(self, gpuInd = None, gpuPos = None, gpuAABB = None): 8 | if gpuAABB == None: 9 | if gpuInd == None: 10 | self.m_buf_ind = None 11 | p_buf_ind = ffi.NULL 12 | else: 13 | self.m_buf_ind = gpuInd.m_buf 14 | p_buf_ind = self.m_buf_ind.m_cptr 15 | self.m_buf_pos = gpuPos.m_buf 16 | p_buf_pos = self.m_buf_pos.m_cptr 17 | self.m_cptr = native.n_blas_create_triangles(p_buf_ind, p_buf_pos) 18 | else: 19 | self.m_buf_aabb = gpuAABB.m_buf 20 | p_buf_aabb = self.m_buf_aabb.m_cptr 21 | self.m_cptr = native.n_blas_create_procedure(p_buf_aabb) 22 | 23 | def __del__(self): 24 | native.n_blas_destroy(self.m_cptr) 25 | 26 | class Mat4: 27 | def __init__(self, value): 28 | self.m_cptr = native.n_mat4_create((value[0].x, value[0].y, value[0].z, value[0].w, value[1].x, value[1].y, value[1].z, value[1].w, value[2].x, value[2].y, value[2].z, value[2].w, value[3].x, value[3].y, value[3].z, value[3].w)) 29 | 30 | def __del__(self): 31 | native.n_mat4_destroy(self.m_cptr) 32 | 33 | 34 | class TopLevelAS: 35 | def __init__(self, l2_lst_blas_trans_pairs): 36 | self.m_lst_lst_blases = [[p[0] for p in lst_i] for lst_i in l2_lst_blas_trans_pairs] 37 | self.m_lst_lst_transes = [[Mat4(p[1]) for p in lst_i] for lst_i in l2_lst_blas_trans_pairs] 38 | lst_obj_arr_blases = [ObjArray(lst_i) for lst_i in self.m_lst_lst_blases] 39 | lst_obj_arr_transes = [ObjArray(lst_i) for lst_i in self.m_lst_lst_transes] 40 | obj_arr_obj_arr_blases = ObjArray(lst_obj_arr_blases) 41 | obj_arr_obj_arr_transes = ObjArray(lst_obj_arr_transes) 42 | self.m_cptr = native.n_tlas_create(obj_arr_obj_arr_blases.m_cptr, obj_arr_obj_arr_transes.m_cptr) 43 | 44 | def __del__(self): 45 | native.n_tlas_destroy(self.m_cptr) 46 | 47 | 48 | class HitShaders: 49 | def __init__(self, closest_hit, intersection=None): 50 | p_closest_hit = closest_hit.encode('utf-8') 51 | if intersection == None: 52 | p_intersection = ffi.NULL 53 | else: 54 | p_intersection = intersection.encode('utf-8') 55 | self.m_cptr = native.n_hit_shaders_create(p_closest_hit, p_intersection) 56 | 57 | def __del__(self): 58 | native.n_hit_shaders_destroy(self.m_cptr) 59 | 60 | 61 | class RayTracer: 62 | def __init__(self, param_names, body_raygen, lst_body_miss, lst_body_hit, max_recursion_depth=1, type_locked=False): 63 | o_param_names = StrArray(param_names) 64 | o_body_miss = StrArray(lst_body_miss) 65 | self.m_lst_body_hit = lst_body_hit 66 | o_body_hit = ObjArray(lst_body_hit) 67 | self.m_cptr = native.n_raytracer_create(o_param_names.m_cptr, body_raygen.encode('utf-8'), o_body_miss.m_cptr, o_body_hit.m_cptr, max_recursion_depth, type_locked) 68 | 69 | def __del__(self): 70 | native.n_raytracer_destroy(self.m_cptr) 71 | 72 | def num_params(self): 73 | return native.n_raytracer_num_params(self.m_cptr) 74 | 75 | def launch(self, glbDim, args, lst_tlas, tex2ds=[], tex3ds=[], cubemaps=[], times_submission = 1): 76 | d_glbDim = Dim3(glbDim) 77 | arg_list = ObjArray(args) 78 | tlas_list = ObjArray(lst_tlas) 79 | tex2d_list = ObjArray(tex2ds) 80 | tex3d_list = ObjArray(tex3ds) 81 | cubemap_list = ObjArray(cubemaps) 82 | native.n_raytracer_launch( 83 | self.m_cptr, 84 | d_glbDim.m_cptr, 85 | arg_list.m_cptr, 86 | tlas_list.m_cptr, 87 | tex2d_list.m_cptr, 88 | tex3d_list.m_cptr, 89 | cubemap_list.m_cptr, 90 | times_submission) 91 | 92 | 93 | -------------------------------------------------------------------------------- /python/api_Context_ex.inl: -------------------------------------------------------------------------------- 1 | void* n_blas_create_triangles(void* indBuf, void* posBuf) 2 | { 3 | return new BaseLevelAS((SVBuffer*)indBuf, (SVBuffer*)posBuf); 4 | } 5 | 6 | void* n_blas_create_procedure(void* aabbBuf) 7 | { 8 | return new BaseLevelAS((SVBuffer*)aabbBuf); 9 | } 10 | 11 | void n_blas_destroy(void* ptr_blas) 12 | { 13 | delete (BaseLevelAS*)ptr_blas; 14 | } 15 | 16 | void* n_mat4_create(const float* v) 17 | { 18 | return new Mat4(v); 19 | } 20 | 21 | void n_mat4_destroy(void* ptr) 22 | { 23 | delete (Mat4*)ptr; 24 | } 25 | 26 | void* n_tlas_create(void* ptr_blases, void* ptr_transes) 27 | { 28 | std::vector*>* blases = (std::vector*>*)ptr_blases; 29 | std::vector*>* transes = (std::vector*>*)ptr_transes; 30 | size_t num_hitgroups = blases->size(); 31 | std::vector> blasex(num_hitgroups); 32 | for (size_t i = 0; i < num_hitgroups; i++) 33 | { 34 | std::vector* pblases = (*blases)[i]; 35 | std::vector* ptranses = (*transes)[i]; 36 | size_t num_blases = pblases->size(); 37 | blasex[i].resize(num_blases); 38 | for (size_t j = 0; j < num_blases; j++) 39 | { 40 | blasex[i][j].blas = (*pblases)[j]; 41 | blasex[i][j].trans = (*ptranses)[j]; 42 | } 43 | } 44 | return new TopLevelAS(blasex); 45 | } 46 | 47 | void n_tlas_destroy(void* ptr) 48 | { 49 | delete (TopLevelAS*)ptr; 50 | } 51 | 52 | void* n_hit_shaders_create(const char* closest_hit, const char* intersection) 53 | { 54 | return new BodyHitShaders(closest_hit, intersection); 55 | } 56 | 57 | void n_hit_shaders_destroy(void* ptr) 58 | { 59 | delete (BodyHitShaders*)ptr; 60 | } 61 | 62 | void* n_raytracer_create(void* ptr_param_list, const char* body_raygen, void* ptr_body_miss, void* ptr_body_hit, unsigned maxRecursionDepth, unsigned type_locked) 63 | { 64 | StrArray* param_list = (StrArray*)ptr_param_list; 65 | size_t num_params = param_list->size(); 66 | StrArray* body_miss = (StrArray*)ptr_body_miss; 67 | size_t num_miss = body_miss->size(); 68 | std::vector* body_hit = (std::vector*)ptr_body_hit; 69 | 70 | std::vector params(num_params); 71 | for (size_t i = 0; i < num_params; i++) 72 | params[i] = (*param_list)[i].c_str(); 73 | 74 | std::vector body_misses(num_miss); 75 | for (size_t i = 0; i < num_miss; i++) 76 | body_misses[i] = (*body_miss)[i].c_str(); 77 | 78 | return new RayTracer(params, body_raygen, body_misses, *body_hit, maxRecursionDepth, type_locked != 0); 79 | } 80 | 81 | void n_raytracer_destroy(void* cptr) 82 | { 83 | delete (RayTracer*)cptr; 84 | } 85 | 86 | int n_raytracer_num_params(void* cptr) 87 | { 88 | RayTracer* raytracer = (RayTracer*)cptr; 89 | return (int)raytracer->num_params(); 90 | } 91 | 92 | int n_raytracer_launch(void* ptr_raytracer, void* ptr_glbDim, void* ptr_arg_list, void* ptr_tlas_list, void* ptr_tex2d_list, void* ptr_tex3d_list, void* ptr_cubemap_list, unsigned times_submission) 93 | { 94 | RayTracer* raytracer = (RayTracer*)ptr_raytracer; 95 | size_t num_params = raytracer->num_params(); 96 | 97 | dim_type* glbDim = (dim_type*)ptr_glbDim; 98 | 99 | PtrArray* arg_list = (PtrArray*)ptr_arg_list; 100 | std::vector* tlas_list = (std::vector*)ptr_tlas_list; 101 | Tex2DArray* tex2d_list = (Tex2DArray*)ptr_tex2d_list; 102 | Tex3DArray* tex3d_list = (Tex3DArray*)ptr_tex3d_list; 103 | CubemapArray* cubemap_list = (CubemapArray*)ptr_cubemap_list; 104 | 105 | size_t size = arg_list->size(); 106 | if (num_params != size) 107 | { 108 | printf("Wrong number of arguments received. %d required, %d received.", (int)num_params, (int)size); 109 | return -1; 110 | } 111 | 112 | if (raytracer->launch(*glbDim, arg_list->data(), *tlas_list, *tex2d_list, *tex3d_list, *cubemap_list, times_submission)) 113 | return 0; 114 | else 115 | return -1; 116 | } 117 | 118 | 119 | 120 | -------------------------------------------------------------------------------- /internal/internal_context_ex.h: -------------------------------------------------------------------------------- 1 | /*************** 2 | Known-limitations: 3 | * Objects are opaque. No anyhit-shader for now. 4 | ******************/ 5 | namespace VkInline 6 | { 7 | namespace Internal 8 | { 9 | class AS 10 | { 11 | public: 12 | const VkAccelerationStructureKHR & structure() const { return m_structure; } 13 | 14 | protected: 15 | AS(); 16 | virtual ~AS(); 17 | VkAccelerationStructureKHR m_structure; 18 | }; 19 | 20 | class BaseLevelAS : public AS 21 | { 22 | public: 23 | BaseLevelAS( 24 | const VkAccelerationStructureBuildGeometryInfoKHR& geoBuildInfo, 25 | const VkAccelerationStructureGeometryKHR* pGeometries, 26 | const VkAccelerationStructureBuildRangeInfoKHR** ranges); 27 | 28 | virtual ~BaseLevelAS(); 29 | 30 | private: 31 | DeviceBuffer* m_scratchBuffer; 32 | DeviceBuffer* m_resultBuffer; 33 | }; 34 | 35 | class TopLevelAS : public AS 36 | { 37 | public: 38 | TopLevelAS(size_t num_hitgroups, size_t* num_instances, const VkAccelerationStructureKHR** pblases, const float*** ptransforms); 39 | virtual ~TopLevelAS(); 40 | 41 | private: 42 | DeviceBuffer* m_scratchBuffer; 43 | DeviceBuffer* m_resultBuffer; 44 | DeviceBuffer* m_instancesBuffer; 45 | }; 46 | 47 | struct HitShaders 48 | { 49 | const std::vector* closest_hit; 50 | const std::vector* intersection; 51 | // const std::vector* any_hit; 52 | }; 53 | 54 | class RayTracePipeline 55 | { 56 | public: 57 | RayTracePipeline(const std::vector& spv_raygen, 58 | const std::vector*>& spv_miss, 59 | const std::vector& spv_hit, 60 | unsigned maxRecursionDepth, size_t num_tlas, size_t num_tex2d, size_t num_tex3d, size_t num_cubemap); 61 | 62 | ~RayTracePipeline(); 63 | 64 | const VkDescriptorSetLayout& layout_desc() const { return m_descriptorSetLayout; } 65 | const VkPipelineLayout& layout_pipeline() const { return m_pipelineLayout; } 66 | const VkPipeline& pipeline() const { return m_pipeline; } 67 | const VkStridedDeviceAddressRegionKHR& sbt_entry_raygen() const { return m_sbt_entry_raygen; } 68 | const VkStridedDeviceAddressRegionKHR& sbt_entry_miss() const { return m_sbt_entry_miss; } 69 | const VkStridedDeviceAddressRegionKHR& sbt_entry_hit() const { return m_sbt_entry_hit; } 70 | const VkStridedDeviceAddressRegionKHR& sbt_entry_callable() const { return m_sbt_entry_callable; } 71 | 72 | size_t num_tlas() const { return m_num_tlas; } 73 | size_t num_tex2d() const { return m_num_tex2d; } 74 | size_t num_tex3d() const { return m_num_tex3d; } 75 | size_t num_cubemap() const { return m_num_cubemap; } 76 | Sampler* sampler() const { return m_sampler; } 77 | CommandBufferRecycler* recycler() const; 78 | 79 | 80 | private: 81 | VkDescriptorSetLayout m_descriptorSetLayout; 82 | VkPipelineLayout m_pipelineLayout; 83 | VkPipeline m_pipeline; 84 | DeviceBuffer* m_shaderBindingTableBuffer; 85 | VkStridedDeviceAddressRegionKHR m_sbt_entry_raygen; 86 | VkStridedDeviceAddressRegionKHR m_sbt_entry_miss; 87 | VkStridedDeviceAddressRegionKHR m_sbt_entry_hit; 88 | VkStridedDeviceAddressRegionKHR m_sbt_entry_callable; 89 | 90 | size_t m_num_tlas; 91 | size_t m_num_tex2d; 92 | size_t m_num_tex3d; 93 | size_t m_num_cubemap; 94 | Sampler* m_sampler; 95 | 96 | mutable std::unordered_map m_recyclers; 97 | mutable std::shared_mutex m_mu_streams; 98 | 99 | }; 100 | 101 | class RayTraceCommandBuffer : public CommandBuffer 102 | { 103 | public: 104 | RayTraceCommandBuffer(const RayTracePipeline* pipeline, size_t ubo_size); 105 | ~RayTraceCommandBuffer(); 106 | 107 | virtual void Recycle(); 108 | void trace(void* param_data, TopLevelAS** arr_tlas, Texture2D** tex2ds, Texture3D** tex3ds, TextureCube** cubemaps, unsigned dim_x, unsigned dim_y, unsigned dim_z); 109 | 110 | private: 111 | const RayTracePipeline* m_pipeline; 112 | DeviceBuffer* m_ubo; 113 | VkDescriptorPool m_descriptorPool; 114 | VkDescriptorSet m_descriptorSet; 115 | 116 | }; 117 | } 118 | } 119 | 120 | -------------------------------------------------------------------------------- /Context.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "ShaderViewable.h" 9 | 10 | namespace VkInline 11 | { 12 | namespace Internal 13 | { 14 | class Texture2D; 15 | class Texture3D; 16 | class TextureCube; 17 | class CommandBufferRecycler; 18 | struct GraphicsPipelineStates; 19 | } 20 | 21 | struct dim_type 22 | { 23 | unsigned int x, y, z; 24 | }; 25 | 26 | bool TryInit(); 27 | void SetVerbose(bool verbose = true); 28 | 29 | // reflection 30 | size_t SizeOf(const char* cls); 31 | bool QueryStruct(const char* name_struct, size_t* offsets); 32 | 33 | void AddBuiltInHeader(const char* name, const char* content); 34 | void AddCodeBlock(const char* code); 35 | void AddInlcudeFilename(const char* fn); 36 | std::string Add_Dynamic_Code(const char* code); 37 | 38 | void Wait(); 39 | 40 | class Texture2D 41 | { 42 | public: 43 | int width() const; 44 | int height() const; 45 | unsigned pixel_size() const; 46 | unsigned channel_count() const; 47 | unsigned sample_count() const; 48 | unsigned vkformat() const; 49 | 50 | Internal::Texture2D* internal() { return m_tex; } 51 | const Internal::Texture2D* internal() const { return m_tex; } 52 | 53 | Texture2D(int width, int height, unsigned vkformat, bool isDepth = false, bool isStencil = false, unsigned sampleCount = 1); 54 | ~Texture2D(); 55 | 56 | void upload(const void* hdata); 57 | void download(void* hdata) const; 58 | 59 | private: 60 | Internal::Texture2D* m_tex; 61 | }; 62 | 63 | class Texture3D 64 | { 65 | public: 66 | int dimX() const; 67 | int dimY() const; 68 | int dimZ() const; 69 | unsigned pixel_size() const; 70 | unsigned channel_count() const; 71 | unsigned vkformat() const; 72 | 73 | Internal::Texture3D* internal() { return m_tex; } 74 | const Internal::Texture3D* internal() const { return m_tex; } 75 | 76 | Texture3D(int dimX, int dimY, int dimZ, unsigned vkformat); 77 | ~Texture3D(); 78 | 79 | void upload(const void* hdata); 80 | void download(void* hdata) const; 81 | 82 | private: 83 | Internal::Texture3D* m_tex; 84 | }; 85 | 86 | class Cubemap 87 | { 88 | public: 89 | int width() const; 90 | int height() const; 91 | unsigned pixel_size() const; 92 | unsigned channel_count() const; 93 | unsigned vkformat() const; 94 | 95 | Internal::TextureCube* internal() { return m_tex; } 96 | const Internal::TextureCube* internal() const { return m_tex; } 97 | 98 | Cubemap(int width, int height, unsigned vkformat); 99 | ~Cubemap(); 100 | 101 | void upload(const void* hdata); 102 | void download(void* hdata) const; 103 | 104 | private: 105 | Internal::TextureCube* m_tex; 106 | }; 107 | 108 | class Computer 109 | { 110 | public: 111 | size_t num_params() const { return m_param_names.size(); } 112 | Computer(const std::vector& param_names, const char* code_body, bool type_locked = false); 113 | bool launch(dim_type gridDim, dim_type blockDim, const ShaderViewable** args, 114 | const std::vector& tex2ds, const std::vector& tex3ds, const std::vector& cubemaps, size_t times_submission = 1); 115 | 116 | private: 117 | std::vector m_param_names; 118 | std::string m_code_body; 119 | 120 | bool m_type_locked; 121 | unsigned m_kid; 122 | std::vector m_offsets; 123 | std::mutex m_mu_type_lock; 124 | 125 | }; 126 | 127 | class DrawCall 128 | { 129 | public: 130 | DrawCall(const char* code_body_vert, const char* code_body_frag); 131 | ~DrawCall(); 132 | 133 | void set_primitive_topology(unsigned topo); 134 | void set_primitive_restart(bool enable); 135 | 136 | void set_polygon_mode(unsigned mode); 137 | void set_cull_mode(unsigned mode); 138 | void set_front_face(unsigned mode); 139 | void set_line_width(float width); 140 | 141 | void set_depth_enable(bool enable); 142 | void set_depth_write(bool enable); 143 | void set_depth_comapre_op(unsigned op); 144 | 145 | void set_color_write(bool enable) { m_color_write_r = m_color_write_g = m_color_write_b = enable; } 146 | void set_color_write_r(bool enable) { m_color_write_r = enable; } 147 | void set_color_write_g(bool enable) { m_color_write_g = enable; } 148 | void set_color_write_b(bool enable) { m_color_write_b = enable; } 149 | void set_alpha_write(bool enable) { m_alpha_write = enable; } 150 | void set_blend_enable(bool enable) { m_blend_enable = enable; } 151 | void set_src_color_blend_factor(unsigned factor) { m_src_color_blend_factor = factor; } 152 | void set_dst_color_blend_factor(unsigned factor) { m_dst_color_blend_factor = factor; } 153 | void set_color_blend_op(unsigned op) { m_color_blend_op = op; } 154 | void set_src_alpha_blend_factor(unsigned factor) { m_src_alpha_blend_factor = factor; } 155 | void set_dst_alpha_blend_factor(unsigned factor) { m_dst_alpha_blend_factor = factor; } 156 | void set_alpha_blend_op(unsigned op) { m_alpha_blend_op = op; } 157 | 158 | void set_blend_constants(float r, float g, float b, float a); 159 | 160 | void set_ith_color_write(int i, bool enable); 161 | void set_ith_color_write_r(int i, bool enable); 162 | void set_ith_color_write_g(int i, bool enable); 163 | void set_ith_color_write_b(int i, bool enable); 164 | void set_ith_alpha_write(int i, bool enable); 165 | void set_ith_blend_enable(int i, bool enable); 166 | void set_ith_src_color_blend_factor(int i, unsigned factor); 167 | void set_ith_dst_color_blend_factor(int i, unsigned factor); 168 | void set_ith_color_blend_op(int i, unsigned op); 169 | void set_ith_src_alpha_blend_factor(int i, unsigned factor); 170 | void set_ith_dst_alpha_blend_factor(int i, unsigned factor); 171 | void set_ith_alpha_blend_op(int i, unsigned op); 172 | 173 | const char* code_body_vert() const { return m_code_body_vert.c_str(); } 174 | const char* code_body_frag() const { return m_code_body_frag.c_str(); } 175 | 176 | const Internal::GraphicsPipelineStates& get_states(int num_color_att) const; 177 | 178 | private: 179 | std::string m_code_body_vert; 180 | std::string m_code_body_frag; 181 | 182 | bool m_color_write_r; 183 | bool m_color_write_g; 184 | bool m_color_write_b; 185 | bool m_alpha_write; 186 | bool m_blend_enable; 187 | unsigned m_src_color_blend_factor; 188 | unsigned m_dst_color_blend_factor; 189 | unsigned m_color_blend_op; 190 | unsigned m_src_alpha_blend_factor; 191 | unsigned m_dst_alpha_blend_factor; 192 | unsigned m_alpha_blend_op; 193 | 194 | Internal::GraphicsPipelineStates* m_states; 195 | mutable std::mutex m_mu_colorBlendAttachments; 196 | void _resize_color_att(int num_color_att) const; 197 | 198 | }; 199 | 200 | class SVBuffer; 201 | 202 | class Rasterizer 203 | { 204 | public: 205 | size_t num_params() const { return m_param_names.size(); } 206 | Rasterizer(const std::vector& param_names, bool type_locked = false); 207 | 208 | void set_clear_color_buf(int i, bool clear); 209 | void set_clear_depth_buf(bool clear); 210 | 211 | void add_draw_call(const DrawCall* draw_call); 212 | 213 | struct LaunchParam 214 | { 215 | unsigned count; 216 | SVBuffer* indBuf; 217 | }; 218 | 219 | bool launch(const std::vector& colorBufs, Texture2D* depthBuf, const std::vector& resolveBufs, float* clear_colors, float clear_depth, const ShaderViewable** args, 220 | const std::vector& tex2ds, const std::vector& tex3ds, const std::vector& cubemaps, 221 | Rasterizer::LaunchParam** launch_params, size_t times_submission = 1); 222 | 223 | private: 224 | std::vector m_param_names; 225 | std::vector m_clear_color_buf; 226 | bool m_clear_depth_buf; 227 | std::vector m_draw_calls; 228 | 229 | bool m_type_locked; 230 | unsigned m_rpid; 231 | std::vector m_offsets; 232 | std::mutex m_mu_type_lock; 233 | }; 234 | } 235 | 236 | #ifdef _VkInlineEX 237 | #include "Context_ex.h" 238 | #endif 239 | 240 | -------------------------------------------------------------------------------- /Context_ex.inl: -------------------------------------------------------------------------------- 1 | namespace VkInline 2 | { 3 | BaseLevelAS::BaseLevelAS(SVBuffer* indBuf, SVBuffer* posBuf) 4 | { 5 | VkAccelerationStructureGeometryKHR acceleration_geometry = {}; 6 | acceleration_geometry.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR; 7 | acceleration_geometry.flags = VK_GEOMETRY_OPAQUE_BIT_KHR; 8 | acceleration_geometry.geometryType = VK_GEOMETRY_TYPE_TRIANGLES_KHR; 9 | acceleration_geometry.geometry.triangles.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR; 10 | acceleration_geometry.geometry.triangles.vertexData.deviceAddress = posBuf->internal()->get_device_address(); 11 | acceleration_geometry.geometry.triangles.maxVertex = (unsigned)posBuf->size(); 12 | acceleration_geometry.geometry.triangles.vertexStride = posBuf->elem_size(); 13 | acceleration_geometry.geometry.triangles.indexData.deviceAddress = indBuf == nullptr ? 0 : indBuf->internal()->get_device_address(); 14 | 15 | if (posBuf->name_elem_type() == "vec2") 16 | acceleration_geometry.geometry.triangles.vertexFormat = VK_FORMAT_R32G32_SFLOAT; 17 | else if (posBuf->name_elem_type() == "vec3") 18 | acceleration_geometry.geometry.triangles.vertexFormat = VK_FORMAT_R32G32B32_SFLOAT; 19 | 20 | acceleration_geometry.geometry.triangles.indexType = VK_INDEX_TYPE_NONE_KHR; 21 | if (indBuf != nullptr) 22 | acceleration_geometry.geometry.triangles.indexType = indBuf->elem_size() > 2 ? VK_INDEX_TYPE_UINT32 : VK_INDEX_TYPE_UINT16; 23 | 24 | VkAccelerationStructureBuildGeometryInfoKHR geoBuildInfo{}; 25 | geoBuildInfo.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR; 26 | geoBuildInfo.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR; 27 | geoBuildInfo.flags = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR; 28 | geoBuildInfo.geometryCount = 1; 29 | geoBuildInfo.pGeometries = &acceleration_geometry; 30 | 31 | VkAccelerationStructureBuildRangeInfoKHR range{}; 32 | range.primitiveCount = (unsigned)(indBuf == nullptr ? posBuf->size() / 3 : indBuf->size() / 3); 33 | range.primitiveOffset = 0; 34 | range.firstVertex = 0; 35 | range.transformOffset = 0; 36 | const VkAccelerationStructureBuildRangeInfoKHR* ranges = ⦥ 37 | 38 | m_blas = new Internal::BaseLevelAS(geoBuildInfo, &acceleration_geometry, &ranges); 39 | } 40 | 41 | BaseLevelAS::BaseLevelAS(SVBuffer* aabbBuf) 42 | { 43 | VkAccelerationStructureGeometryKHR acceleration_geometry = {}; 44 | acceleration_geometry.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR; 45 | acceleration_geometry.flags = VK_GEOMETRY_OPAQUE_BIT_KHR; 46 | acceleration_geometry.geometryType = VK_GEOMETRY_TYPE_AABBS_KHR; 47 | acceleration_geometry.geometry.aabbs.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR; 48 | acceleration_geometry.geometry.aabbs.data.deviceAddress = aabbBuf->internal()->get_device_address(); 49 | acceleration_geometry.geometry.aabbs.stride = 0; 50 | 51 | VkAccelerationStructureBuildGeometryInfoKHR geoBuildInfo{}; 52 | geoBuildInfo.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR; 53 | geoBuildInfo.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR; 54 | geoBuildInfo.flags = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR; 55 | geoBuildInfo.geometryCount = 1; 56 | geoBuildInfo.pGeometries = &acceleration_geometry; 57 | 58 | VkAccelerationStructureBuildRangeInfoKHR range{}; 59 | range.primitiveCount = 1; 60 | range.primitiveOffset = 0; 61 | range.firstVertex = 0; 62 | range.transformOffset = 0; 63 | const VkAccelerationStructureBuildRangeInfoKHR* ranges = ⦥ 64 | 65 | m_blas = new Internal::BaseLevelAS(geoBuildInfo, &acceleration_geometry, &ranges); 66 | } 67 | 68 | BaseLevelAS::~BaseLevelAS() 69 | { 70 | delete m_blas; 71 | } 72 | 73 | Mat4::Mat4(const float* data) 74 | { 75 | memcpy(m_data, data, sizeof(float) * 16); 76 | } 77 | 78 | TopLevelAS::TopLevelAS(const std::vector>& blases) 79 | { 80 | size_t num_hitgroups = blases.size(); 81 | std::vector num_instances(num_hitgroups); 82 | std::vector> vblases(num_hitgroups); 83 | std::vector pblases(num_hitgroups); 84 | std::vector> vtrans(num_hitgroups); 85 | std::vector ptrans(num_hitgroups); 86 | 87 | for (size_t i = 0; i < num_hitgroups; i++) 88 | { 89 | size_t num_blases = blases[i].size(); 90 | num_instances[i] = num_blases; 91 | vblases[i].resize(num_blases); 92 | vtrans[i].resize(num_blases); 93 | for (size_t j = 0; j < num_blases; j++) 94 | { 95 | vblases[i][j] = blases[i][j].blas->internal()->structure(); 96 | vtrans[i][j] = blases[i][j].trans->data(); 97 | } 98 | pblases[i] = vblases[i].data(); 99 | ptrans[i] = vtrans[i].data(); 100 | } 101 | 102 | m_tlas = new Internal::TopLevelAS(num_hitgroups, num_instances.data(), pblases.data(), ptrans.data()); 103 | } 104 | 105 | TopLevelAS::~TopLevelAS() 106 | { 107 | delete m_tlas; 108 | } 109 | 110 | BodyHitShaders::BodyHitShaders(const char* body_closest_hit, const char* body_intersection) 111 | { 112 | m_body_closest_hit = body_closest_hit; 113 | if (body_intersection!=nullptr) 114 | m_body_intersection = body_intersection; 115 | } 116 | 117 | const char* BodyHitShaders::body_closest_hit() const 118 | { 119 | return m_body_closest_hit.c_str(); 120 | } 121 | 122 | const char* BodyHitShaders::body_intersection() const 123 | { 124 | if (m_body_intersection.empty()) return nullptr; 125 | return m_body_intersection.c_str(); 126 | } 127 | 128 | RayTracer::RayTracer(const std::vector& param_names, const char* body_raygen, const std::vector& body_miss, const std::vector& body_hit, unsigned maxRecursionDepth, bool type_locked) 129 | : m_param_names(param_names.size()), m_body_raygen(body_raygen), m_body_miss(body_miss.size()), m_body_hit(body_hit), m_maxRecursionDepth(maxRecursionDepth), m_type_locked(type_locked) 130 | { 131 | for (size_t i = 0; i < param_names.size(); i++) 132 | m_param_names[i] = param_names[i]; 133 | 134 | for (size_t i = 0; i < body_miss.size(); i++) 135 | m_body_miss[i] = body_miss[i]; 136 | 137 | m_kid = (unsigned)(-1); 138 | } 139 | 140 | bool RayTracer::launch(dim_type glbDim, const ShaderViewable** args, 141 | const std::vector& arr_tlas, const std::vector& tex2ds, const std::vector& tex3ds, const std::vector& cubemaps, size_t times_submission) 142 | { 143 | Context& ctx = Context::get_context(); 144 | if (!m_type_locked) 145 | { 146 | std::vector arg_map(m_param_names.size()); 147 | for (size_t i = 0; i < m_param_names.size(); i++) 148 | { 149 | arg_map[i].obj_name = m_param_names[i].c_str(); 150 | arg_map[i].obj = args[i]; 151 | } 152 | 153 | std::vector p_body_miss(m_body_miss.size()); 154 | for (size_t i = 0; i < m_body_miss.size(); i++) 155 | p_body_miss[i] = m_body_miss[i].c_str(); 156 | 157 | return ctx.launch_raytrace(glbDim, arg_map, m_maxRecursionDepth, 158 | arr_tlas, tex2ds, tex3ds, cubemaps, m_body_raygen.c_str(), p_body_miss, m_body_hit, times_submission); 159 | } 160 | else 161 | { 162 | std::unique_lock locker(m_mu_type_lock); 163 | if (m_kid == (unsigned)(-1)) 164 | { 165 | std::vector arg_map(m_param_names.size()); 166 | for (size_t i = 0; i < m_param_names.size(); i++) 167 | { 168 | arg_map[i].obj_name = m_param_names[i].c_str(); 169 | arg_map[i].obj = args[i]; 170 | } 171 | std::vector p_body_miss(m_body_miss.size()); 172 | for (size_t i = 0; i < m_body_miss.size(); i++) 173 | p_body_miss[i] = m_body_miss[i].c_str(); 174 | 175 | m_offsets.resize(m_param_names.size() + 1); 176 | return ctx.launch_raytrace(glbDim, arg_map, m_maxRecursionDepth, 177 | arr_tlas, tex2ds, tex3ds, cubemaps, m_body_raygen.c_str(), p_body_miss, m_body_hit, m_kid, m_offsets.data(), times_submission); 178 | } 179 | else 180 | { 181 | locker.unlock(); 182 | return ctx.launch_raytrace(glbDim, m_param_names.size(), args, 183 | arr_tlas.data(), tex2ds.data(), tex3ds.data(), cubemaps.data(), m_kid, m_offsets.data(), times_submission); 184 | } 185 | } 186 | } 187 | 188 | } 189 | -------------------------------------------------------------------------------- /glslc.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | const TBuiltInResource DefaultTBuiltInResource = 9 | { 10 | /* .MaxLights = */ 32, 11 | /* .MaxClipPlanes = */ 6, 12 | /* .MaxTextureUnits = */ 32, 13 | /* .MaxTextureCoords = */ 32, 14 | /* .MaxVertexAttribs = */ 64, 15 | /* .MaxVertexUniformComponents = */ 4096, 16 | /* .MaxVaryingFloats = */ 64, 17 | /* .MaxVertexTextureImageUnits = */ 32, 18 | /* .MaxCombinedTextureImageUnits = */ 80, 19 | /* .MaxTextureImageUnits = */ 32, 20 | /* .MaxFragmentUniformComponents = */ 4096, 21 | /* .MaxDrawBuffers = */ 32, 22 | /* .MaxVertexUniformVectors = */ 128, 23 | /* .MaxVaryingVectors = */ 8, 24 | /* .MaxFragmentUniformVectors = */ 16, 25 | /* .MaxVertexOutputVectors = */ 16, 26 | /* .MaxFragmentInputVectors = */ 15, 27 | /* .MinProgramTexelOffset = */ -8, 28 | /* .MaxProgramTexelOffset = */ 7, 29 | /* .MaxClipDistances = */ 8, 30 | /* .MaxComputeWorkGroupCountX = */ 65535, 31 | /* .MaxComputeWorkGroupCountY = */ 65535, 32 | /* .MaxComputeWorkGroupCountZ = */ 65535, 33 | /* .MaxComputeWorkGroupSizeX = */ 1024, 34 | /* .MaxComputeWorkGroupSizeY = */ 1024, 35 | /* .MaxComputeWorkGroupSizeZ = */ 64, 36 | /* .MaxComputeUniformComponents = */ 1024, 37 | /* .MaxComputeTextureImageUnits = */ 16, 38 | /* .MaxComputeImageUniforms = */ 8, 39 | /* .MaxComputeAtomicCounters = */ 8, 40 | /* .MaxComputeAtomicCounterBuffers = */ 1, 41 | /* .MaxVaryingComponents = */ 60, 42 | /* .MaxVertexOutputComponents = */ 64, 43 | /* .MaxGeometryInputComponents = */ 64, 44 | /* .MaxGeometryOutputComponents = */ 128, 45 | /* .MaxFragmentInputComponents = */ 128, 46 | /* .MaxImageUnits = */ 8, 47 | /* .MaxCombinedImageUnitsAndFragmentOutputs = */ 8, 48 | /* .MaxCombinedShaderOutputResources = */ 8, 49 | /* .MaxImageSamples = */ 0, 50 | /* .MaxVertexImageUniforms = */ 0, 51 | /* .MaxTessControlImageUniforms = */ 0, 52 | /* .MaxTessEvaluationImageUniforms = */ 0, 53 | /* .MaxGeometryImageUniforms = */ 0, 54 | /* .MaxFragmentImageUniforms = */ 8, 55 | /* .MaxCombinedImageUniforms = */ 8, 56 | /* .MaxGeometryTextureImageUnits = */ 16, 57 | /* .MaxGeometryOutputVertices = */ 256, 58 | /* .MaxGeometryTotalOutputComponents = */ 1024, 59 | /* .MaxGeometryUniformComponents = */ 1024, 60 | /* .MaxGeometryVaryingComponents = */ 64, 61 | /* .MaxTessControlInputComponents = */ 128, 62 | /* .MaxTessControlOutputComponents = */ 128, 63 | /* .MaxTessControlTextureImageUnits = */ 16, 64 | /* .MaxTessControlUniformComponents = */ 1024, 65 | /* .MaxTessControlTotalOutputComponents = */ 4096, 66 | /* .MaxTessEvaluationInputComponents = */ 128, 67 | /* .MaxTessEvaluationOutputComponents = */ 128, 68 | /* .MaxTessEvaluationTextureImageUnits = */ 16, 69 | /* .MaxTessEvaluationUniformComponents = */ 1024, 70 | /* .MaxTessPatchComponents = */ 120, 71 | /* .MaxPatchVertices = */ 32, 72 | /* .MaxTessGenLevel = */ 64, 73 | /* .MaxViewports = */ 16, 74 | /* .MaxVertexAtomicCounters = */ 0, 75 | /* .MaxTessControlAtomicCounters = */ 0, 76 | /* .MaxTessEvaluationAtomicCounters = */ 0, 77 | /* .MaxGeometryAtomicCounters = */ 0, 78 | /* .MaxFragmentAtomicCounters = */ 8, 79 | /* .MaxCombinedAtomicCounters = */ 8, 80 | /* .MaxAtomicCounterBindings = */ 1, 81 | /* .MaxVertexAtomicCounterBuffers = */ 0, 82 | /* .MaxTessControlAtomicCounterBuffers = */ 0, 83 | /* .MaxTessEvaluationAtomicCounterBuffers = */ 0, 84 | /* .MaxGeometryAtomicCounterBuffers = */ 0, 85 | /* .MaxFragmentAtomicCounterBuffers = */ 1, 86 | /* .MaxCombinedAtomicCounterBuffers = */ 1, 87 | /* .MaxAtomicCounterBufferSize = */ 16384, 88 | /* .MaxTransformFeedbackBuffers = */ 4, 89 | /* .MaxTransformFeedbackInterleavedComponents = */ 64, 90 | /* .MaxCullDistances = */ 8, 91 | /* .MaxCombinedClipAndCullDistances = */ 8, 92 | /* .MaxSamples = */ 4, 93 | /* .maxMeshOutputVerticesNV = */0, 94 | /* .maxMeshOutputPrimitivesNV = */0, 95 | /* .maxMeshWorkGroupSizeX_NV = */0, 96 | /* .maxMeshWorkGroupSizeY_NV = */0, 97 | /* .maxMeshWorkGroupSizeZ_NV = */0, 98 | /* .maxTaskWorkGroupSizeX_NV = */0, 99 | /* .maxTaskWorkGroupSizeY_NV = */0, 100 | /* .maxTaskWorkGroupSizeZ_NV = */0, 101 | /* .maxMeshViewCountNV = */0, 102 | /* .maxDualSourceDrawBuffersEXT = */ 0, 103 | /* .limits = */ 104 | { 105 | /* .nonInductiveForLoops = */ 1, 106 | /* .whileLoops = */ 1, 107 | /* .doWhileLoops = */ 1, 108 | /* .generalUniformIndexing = */ 1, 109 | /* .generalAttributeMatrixVectorIndexing = */ 1, 110 | /* .generalVaryingIndexing = */ 1, 111 | /* .generalSamplerIndexing = */ 1, 112 | /* .generalVariableIndexing = */ 1, 113 | /* .generalConstantMatrixVectorIndexing = */ 1, 114 | } 115 | }; 116 | 117 | class BuiltInIncluder : public glslang::TShader::Includer 118 | { 119 | const std::unordered_map* m_headers; 120 | public: 121 | BuiltInIncluder(const std::unordered_map* headers = nullptr) 122 | { 123 | m_headers = headers; 124 | } 125 | 126 | virtual IncludeResult* includeSystem(const char* headerName, const char* includerName, size_t inclusionDepth) 127 | { 128 | if (m_headers != nullptr) 129 | { 130 | auto iter = m_headers->find(headerName); 131 | if (iter != m_headers->end()) 132 | { 133 | IncludeResult* result = new IncludeResult(headerName, iter->second.c_str(), iter->second.length(), nullptr); 134 | return result; 135 | } 136 | } 137 | return nullptr; 138 | } 139 | 140 | virtual void releaseInclude(IncludeResult* result) override 141 | { 142 | if (result!=nullptr) delete result; 143 | } 144 | }; 145 | 146 | bool GLSL2SPV(const char* InputCString, const std::unordered_map* headers, std::vector& SpirV, EShLanguage ShaderType) 147 | { 148 | glslang::InitializeProcess(); 149 | glslang::TShader Shader(ShaderType); 150 | Shader.setStrings(&InputCString, 1); 151 | 152 | const int ClientInputSemanticsVersion = 100; 153 | #ifndef _VkInlineEX 154 | glslang::EShTargetClientVersion VulkanClientVersion = glslang::EShTargetVulkan_1_1; 155 | glslang::EShTargetLanguageVersion TargetVersion = glslang::EShTargetSpv_1_0; 156 | #else 157 | glslang::EShTargetClientVersion VulkanClientVersion = glslang::EShTargetVulkan_1_2; 158 | glslang::EShTargetLanguageVersion TargetVersion = glslang::EShTargetSpv_1_4; 159 | #endif 160 | 161 | Shader.setEnvInput(glslang::EShSourceGlsl, ShaderType, glslang::EShClientVulkan, ClientInputSemanticsVersion); 162 | Shader.setEnvClient(glslang::EShClientVulkan, VulkanClientVersion); 163 | Shader.setEnvTarget(glslang::EShTargetSpv, TargetVersion); 164 | 165 | TBuiltInResource Resources; 166 | Resources = DefaultTBuiltInResource; 167 | EShMessages messages = (EShMessages)(EShMsgSpvRules | EShMsgVulkanRules); 168 | 169 | const int DefaultVersion = 110; 170 | 171 | BuiltInIncluder Includer(headers); 172 | 173 | if (!Shader.parse(&Resources, DefaultVersion, false, messages, Includer)) 174 | { 175 | puts("GLSL Parsing Failed for: "); 176 | puts(Shader.getInfoLog()); 177 | puts(Shader.getInfoDebugLog()); 178 | return false; 179 | } 180 | 181 | glslang::TProgram Program; 182 | Program.addShader(&Shader); 183 | if (!Program.link(messages)) 184 | { 185 | puts("GLSL Linking Failed for:"); 186 | puts(Shader.getInfoLog()); 187 | puts(Shader.getInfoDebugLog()); 188 | return false; 189 | } 190 | 191 | spv::SpvBuildLogger logger; 192 | glslang::SpvOptions spvOptions; 193 | 194 | glslang::GlslangToSpv(*Program.getIntermediate(ShaderType), SpirV, &logger, &spvOptions); 195 | 196 | if (logger.getAllMessages().length() > 0) 197 | { 198 | puts(logger.getAllMessages().c_str()); 199 | return false; 200 | } 201 | 202 | return true; 203 | 204 | } 205 | 206 | bool GLSL2SPV_Compute(const char* InputCString, const std::unordered_map* headers, std::vector& SpirV) 207 | { 208 | return GLSL2SPV(InputCString, headers, SpirV, EShLangCompute); 209 | } 210 | 211 | bool GLSL2SPV_Vertex(const char* InputCString, const std::unordered_map* headers, std::vector& SpirV) 212 | { 213 | return GLSL2SPV(InputCString, headers, SpirV, EShLangVertex); 214 | } 215 | 216 | bool GLSL2SPV_Fragment(const char* InputCString, const std::unordered_map* headers, std::vector& SpirV) 217 | { 218 | return GLSL2SPV(InputCString, headers, SpirV, EShLangFragment); 219 | } 220 | 221 | bool GLSL2SPV_Raygen(const char* InputCString, const std::unordered_map* headers, std::vector& SpirV) 222 | { 223 | return GLSL2SPV(InputCString, headers, SpirV, EShLangRayGen); 224 | } 225 | 226 | bool GLSL2SPV_Miss(const char* InputCString, const std::unordered_map* headers, std::vector& SpirV) 227 | { 228 | return GLSL2SPV(InputCString, headers, SpirV, EShLangMiss); 229 | } 230 | 231 | bool GLSL2SPV_ClosestHit(const char* InputCString, const std::unordered_map* headers, std::vector& SpirV) 232 | { 233 | return GLSL2SPV(InputCString, headers, SpirV, EShLangClosestHit); 234 | } 235 | 236 | bool GLSL2SPV_Intersect(const char* InputCString, const std::unordered_map* headers, std::vector& SpirV) 237 | { 238 | return GLSL2SPV(InputCString, headers, SpirV, EShLangIntersect); 239 | } 240 | -------------------------------------------------------------------------------- /python/VkInline/SVVector.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .Context import * 3 | from .ShaderViewable import * 4 | from .SVBuffer import * 5 | from .SVCombine import SVCombine_Create 6 | 7 | class SVVector(ShaderViewable): 8 | def __init__(self, elem_type, size, ptr_host_data=None): 9 | self.m_size = SVUInt32(size) 10 | self.m_buf = SVBuffer(elem_type, size, ptr_host_data) 11 | self.m_cptr = SVCombine_Create({'size': self.m_size, 'data': self.m_buf}, 12 | ''' 13 | uint get_size(in Comb_#hash# vec) 14 | {{ 15 | return vec.size; 16 | }} 17 | 18 | {0} get_value(in Comb_#hash# vec, in uint id) 19 | {{ 20 | return vec.data[id].v; 21 | }} 22 | 23 | void set_value(in Comb_#hash# vec, in uint id, in {0} value) 24 | {{ 25 | vec.data[id].v = value; 26 | }} 27 | '''.format(elem_type)) 28 | 29 | def name_elem_type(self): 30 | return self.m_buf.name_elem_type() 31 | 32 | def elem_size(self): 33 | return self.m_buf.elem_size() 34 | 35 | def size(self): 36 | return self.m_buf.size() 37 | 38 | def to_host(self, begin = 0, end = -1): 39 | elem_type = self.name_elem_type() 40 | shape = [1, 1, 1] 41 | if elem_type=='int': 42 | nptype = np.int32 43 | elif elem_type=='uint': 44 | nptype = np.uint32 45 | elif elem_type=='float': 46 | nptype = np.float32 47 | elif elem_type=='double': 48 | nptype = np.float64 49 | 50 | elif elem_type=='ivec2': 51 | nptype = np.int32 52 | shape[2] = 2 53 | elif elem_type=='ivec3': 54 | nptype = np.int32 55 | shape[2] = 3 56 | elif elem_type=='ivec4': 57 | nptype = np.int32 58 | shape[2] = 4 59 | 60 | elif elem_type=='uvec2': 61 | nptype = np.uint32 62 | shape[2] = 2 63 | elif elem_type=='uvec3': 64 | nptype = np.uint32 65 | shape[2] = 3 66 | elif elem_type=='uvec4': 67 | nptype = np.uint32 68 | shape[2] = 4 69 | 70 | elif elem_type=='vec2': 71 | nptype = np.float32 72 | shape[2] = 2 73 | elif elem_type=='vec3': 74 | nptype = np.float32 75 | shape[2] = 3 76 | elif elem_type=='vec4': 77 | nptype = np.float32 78 | shape[2] = 4 79 | 80 | elif elem_type=='dvec2': 81 | nptype = np.float64 82 | shape[2] = 2 83 | elif elem_type=='dvec3': 84 | nptype = np.float64 85 | shape[2] = 3 86 | elif elem_type=='dvec4': 87 | nptype = np.float64 88 | shape[2] = 4 89 | 90 | elif elem_type=='mat2x2': 91 | nptype = np.float32 92 | shape[1] = 2 93 | shape[2] = 2 94 | elif elem_type=='mat2x3': 95 | nptype = np.float32 96 | shape[1] = 2 97 | shape[2] = 3 98 | elif elem_type=='mat2x4': 99 | nptype = np.float32 100 | shape[1] = 2 101 | shape[2] = 4 102 | elif elem_type=='mat3x2': 103 | nptype = np.float32 104 | shape[1] = 3 105 | shape[2] = 2 106 | elif elem_type=='mat3x3': 107 | nptype = np.float32 108 | shape[1] = 3 109 | shape[2] = 3 110 | elif elem_type=='mat3x4': 111 | nptype = np.float32 112 | shape[1] = 3 113 | shape[2] = 4 114 | elif elem_type=='mat4x2': 115 | nptype = np.float32 116 | shape[1] = 4 117 | shape[2] = 2 118 | elif elem_type=='mat4x3': 119 | nptype = np.float32 120 | shape[1] = 4 121 | shape[2] = 3 122 | elif elem_type=='mat4x4': 123 | nptype = np.float32 124 | shape[1] = 4 125 | shape[2] = 4 126 | 127 | elif elem_type=='dmat2x2': 128 | nptype = np.float64 129 | shape[1] = 2 130 | shape[2] = 2 131 | elif elem_type=='dmat2x3': 132 | nptype = np.float64 133 | shape[1] = 2 134 | shape[2] = 3 135 | elif elem_type=='dmat2x4': 136 | nptype = np.float64 137 | shape[1] = 2 138 | shape[2] = 4 139 | elif elem_type=='dmat3x2': 140 | nptype = np.float64 141 | shape[1] = 3 142 | shape[2] = 2 143 | elif elem_type=='dmat3x3': 144 | nptype = np.float64 145 | shape[1] = 3 146 | shape[2] = 3 147 | elif elem_type=='dmat3x4': 148 | nptype = np.float64 149 | shape[1] = 3 150 | shape[2] = 4 151 | elif elem_type=='dmat4x2': 152 | nptype = np.float64 153 | shape[1] = 4 154 | shape[2] = 2 155 | elif elem_type=='dmat4x3': 156 | nptype = np.float64 157 | shape[1] = 4 158 | shape[2] = 3 159 | elif elem_type=='dmat4x4': 160 | nptype = np.float64 161 | shape[1] = 4 162 | shape[2] = 4 163 | 164 | if end == -1: 165 | end = self.size() 166 | shape[0] = end - begin 167 | if shape[1] == 1: 168 | if shape[2] == 1: # scalar 169 | ret = np.empty(shape[0], dtype=nptype) 170 | else: # vec 171 | ret = np.empty((shape[0], shape[2]), dtype=nptype) 172 | else: # matrix 173 | ret = np.empty(shape, dtype=nptype) 174 | 175 | self.m_buf.to_host(ret.__array_interface__['data'][0], begin, end) 176 | return ret 177 | 178 | def device_vector_from_numpy(nparr): 179 | shape = nparr.shape 180 | if len(shape)<2: 181 | shape = [shape[0], 1, 1] 182 | elif len(shape)<3: 183 | shape = [shape[0], 1, shape[1]] 184 | 185 | if nparr.dtype == np.int32: 186 | if shape[1]==1: 187 | if shape[2] == 1: 188 | elem_type = 'int' 189 | elif shape[2] == 2: 190 | elem_type = 'ivec2' 191 | elif shape[2] == 3: 192 | elem_type = 'ivec3' 193 | elif shape[2] == 4: 194 | elem_type = 'ivec4' 195 | 196 | elif nparr.dtype == np.uint32: 197 | if shape[1]==1: 198 | if shape[2] == 1: 199 | elem_type = 'uint' 200 | elif shape[2] == 2: 201 | elem_type = 'uvec2' 202 | elif shape[2] == 3: 203 | elem_type = 'uvec3' 204 | elif shape[2] == 4: 205 | elem_type = 'uvec4' 206 | 207 | elif nparr.dtype == np.float32: 208 | if shape[1]==1: 209 | if shape[2] == 1: 210 | elem_type = 'float' 211 | elif shape[2] == 2: 212 | elem_type = 'vec2' 213 | elif shape[2] == 3: 214 | elem_type = 'vec3' 215 | elif shape[2] == 4: 216 | elem_type = 'vec4' 217 | 218 | elif shape[1]==2: 219 | if shape[2] == 2: 220 | elem_type = 'mat2x2' 221 | elif shape[2] == 3: 222 | elem_type = 'mat2x3' 223 | elif shape[2] == 4: 224 | elem_type = 'mat2x4' 225 | 226 | elif shape[1]==3: 227 | if shape[2] == 2: 228 | elem_type = 'mat3x2' 229 | elif shape[2] == 3: 230 | elem_type = 'mat3x3' 231 | elif shape[2] == 4: 232 | elem_type = 'mat3x4' 233 | 234 | elif shape[1]==4: 235 | if shape[2] == 2: 236 | elem_type = 'mat4x2' 237 | elif shape[2] == 3: 238 | elem_type = 'mat4x3' 239 | elif shape[2] == 4: 240 | elem_type = 'mat4x4' 241 | 242 | elif nparr.dtype == np.float64: 243 | if shape[1]==1: 244 | if shape[2] == 1: 245 | elem_type = 'double' 246 | elif shape[2] == 2: 247 | elem_type = 'dvec2' 248 | elif shape[2] == 3: 249 | elem_type = 'dvec3' 250 | elif shape[2] == 4: 251 | elem_type = 'dvec4' 252 | 253 | elif shape[1]==2: 254 | if shape[2] == 2: 255 | elem_type = 'dmat2x2' 256 | elif shape[2] == 3: 257 | elem_type = 'dmat2x3' 258 | elif shape[2] == 4: 259 | elem_type = 'dmat2x4' 260 | 261 | elif shape[1]==3: 262 | if shape[2] == 2: 263 | elem_type = 'dmat3x2' 264 | elif shape[2] == 3: 265 | elem_type = 'dmat3x3' 266 | elif shape[2] == 4: 267 | elem_type = 'dmat3x4' 268 | 269 | elif shape[1]==4: 270 | if shape[2] == 2: 271 | elem_type = 'dmat4x2' 272 | elif shape[2] == 3: 273 | elem_type = 'dmat4x3' 274 | elif shape[2] == 4: 275 | elem_type = 'dmat4x4' 276 | 277 | ptr_host_data = nparr.__array_interface__['data'][0] 278 | return SVVector(elem_type, shape[0], ptr_host_data) 279 | 280 | def device_vector_from_list(lst, elem_type): 281 | if elem_type=='int': 282 | nptype = np.int32 283 | elif elem_type=='uint': 284 | nptype = np.uint32 285 | elif elem_type=='float': 286 | nptype = np.float32 287 | elif elem_type=='double': 288 | nptype = np.float64 289 | nparr = np.array(lst, dtype=nptype) 290 | return device_vector_from_numpy(nparr) 291 | -------------------------------------------------------------------------------- /python/api_ShaderViewable.cpp: -------------------------------------------------------------------------------- 1 | #include "api.h" 2 | #include "ShaderViewable.h" 3 | using namespace VkInline; 4 | 5 | const char* n_sv_name_view_type(void* cptr) 6 | { 7 | ShaderViewable* sv = (ShaderViewable*)cptr; 8 | return sv->name_view_type().c_str(); 9 | } 10 | 11 | void n_sv_destroy(void* cptr) 12 | { 13 | ShaderViewable* sv = (ShaderViewable*)cptr; 14 | delete sv; 15 | } 16 | 17 | 18 | void* n_svint32_create(int v) 19 | { 20 | return new SVInt32(v); 21 | } 22 | 23 | int n_svint32_value(void* cptr) 24 | { 25 | ShaderViewable* sv = (ShaderViewable*)cptr; 26 | return *(int32_t*)sv->view().data(); 27 | } 28 | 29 | void* n_svuint32_create(unsigned v) 30 | { 31 | return new SVUInt32(v); 32 | } 33 | 34 | unsigned n_svuint32_value(void* cptr) 35 | { 36 | ShaderViewable* dv = (ShaderViewable*)cptr; 37 | return *(uint32_t*)dv->view().data(); 38 | } 39 | 40 | void* n_svfloat_create(float v) 41 | { 42 | return new SVFloat(v); 43 | } 44 | 45 | float n_svfloat_value(void* cptr) 46 | { 47 | ShaderViewable* dv = (ShaderViewable*)cptr; 48 | return *(float*)dv->view().data(); 49 | } 50 | 51 | void* n_svdouble_create(double v) 52 | { 53 | return new SVDouble(v); 54 | } 55 | 56 | double n_svdouble_value(void* cptr) 57 | { 58 | ShaderViewable* dv = (ShaderViewable*)cptr; 59 | return *(double*)dv->view().data(); 60 | } 61 | 62 | 63 | void* n_svivec2_create(const int* v) 64 | { 65 | return new SVIVec2(v); 66 | } 67 | 68 | void n_svivec2_value(void* cptr, int* v) 69 | { 70 | ShaderViewable* dv = (ShaderViewable*)cptr; 71 | auto view = dv->view(); 72 | memcpy(v, view.data(), sizeof(int) * 2); 73 | } 74 | 75 | void* n_svivec3_create(const int* v) 76 | { 77 | return new SVIVec3(v); 78 | } 79 | 80 | void n_svivec3_value(void* cptr, int* v) 81 | { 82 | ShaderViewable* dv = (ShaderViewable*)cptr; 83 | auto view = dv->view(); 84 | memcpy(v, view.data(), sizeof(int) * 3); 85 | } 86 | 87 | void* n_svivec4_create(const int* v) 88 | { 89 | return new SVIVec4(v); 90 | } 91 | 92 | void n_svivec4_value(void* cptr, int* v) 93 | { 94 | ShaderViewable* dv = (ShaderViewable*)cptr; 95 | auto view = dv->view(); 96 | memcpy(v, view.data(), sizeof(int) * 4); 97 | } 98 | 99 | 100 | void* n_svuvec2_create(const unsigned* v) 101 | { 102 | return new SVUVec2(v); 103 | } 104 | 105 | void n_svuvec2_value(void* cptr, unsigned* v) 106 | { 107 | ShaderViewable* dv = (ShaderViewable*)cptr; 108 | auto view = dv->view(); 109 | memcpy(v, view.data(), sizeof(unsigned) * 2); 110 | } 111 | 112 | void* n_svuvec3_create(const unsigned* v) 113 | { 114 | return new SVUVec3(v); 115 | } 116 | 117 | void n_svuvec3_value(void* cptr, unsigned* v) 118 | { 119 | ShaderViewable* dv = (ShaderViewable*)cptr; 120 | auto view = dv->view(); 121 | memcpy(v, view.data(), sizeof(unsigned) * 3); 122 | } 123 | 124 | void* n_svuvec4_create(const unsigned* v) 125 | { 126 | return new SVUVec4(v); 127 | } 128 | 129 | void n_svuvec4_value(void* cptr, unsigned* v) 130 | { 131 | ShaderViewable* dv = (ShaderViewable*)cptr; 132 | auto view = dv->view(); 133 | memcpy(v, view.data(), sizeof(unsigned) * 4); 134 | } 135 | 136 | void* n_svvec2_create(const float* v) 137 | { 138 | return new SVVec2(v); 139 | } 140 | 141 | void n_svvec2_value(void* cptr, float* v) 142 | { 143 | ShaderViewable* dv = (ShaderViewable*)cptr; 144 | auto view = dv->view(); 145 | memcpy(v, view.data(), sizeof(float) * 2); 146 | } 147 | 148 | void* n_svvec3_create(const float* v) 149 | { 150 | return new SVVec3(v); 151 | } 152 | 153 | void n_svvec3_value(void* cptr, float* v) 154 | { 155 | ShaderViewable* dv = (ShaderViewable*)cptr; 156 | auto view = dv->view(); 157 | memcpy(v, view.data(), sizeof(float) * 3); 158 | } 159 | 160 | void* n_svvec4_create(const float* v) 161 | { 162 | return new SVVec4(v); 163 | } 164 | 165 | void n_svvec4_value(void* cptr, float* v) 166 | { 167 | ShaderViewable* dv = (ShaderViewable*)cptr; 168 | auto view = dv->view(); 169 | memcpy(v, view.data(), sizeof(float) * 4); 170 | } 171 | 172 | 173 | void* n_svdvec2_create(const double* v) 174 | { 175 | return new SVDVec2(v); 176 | } 177 | 178 | void n_svdvec2_value(void* cptr, double* v) 179 | { 180 | ShaderViewable* dv = (ShaderViewable*)cptr; 181 | auto view = dv->view(); 182 | memcpy(v, view.data(), sizeof(double) * 2); 183 | } 184 | 185 | void* n_svdvec3_create(const double* v) 186 | { 187 | return new SVDVec3(v); 188 | } 189 | 190 | void n_svdvec3_value(void* cptr, double* v) 191 | { 192 | ShaderViewable* dv = (ShaderViewable*)cptr; 193 | auto view = dv->view(); 194 | memcpy(v, view.data(), sizeof(double) * 3); 195 | } 196 | 197 | void* n_svdvec4_create(const double* v) 198 | { 199 | return new SVDVec4(v); 200 | } 201 | 202 | void n_svdvec4_value(void* cptr, double* v) 203 | { 204 | ShaderViewable* dv = (ShaderViewable*)cptr; 205 | auto view = dv->view(); 206 | memcpy(v, view.data(), sizeof(double) * 4); 207 | } 208 | 209 | void* n_svmat2x2_create(const float* v) 210 | { 211 | return new SVMat2x2(v); 212 | } 213 | 214 | void n_svmat2x2_value(void* cptr, float* v) 215 | { 216 | ShaderViewable* dv = (ShaderViewable*)cptr; 217 | auto view = dv->view(); 218 | memcpy(v, view.data(), sizeof(float) * 2 * 2); 219 | } 220 | 221 | 222 | void* n_svmat2x3_create(const float* v) 223 | { 224 | return new SVMat2x3(v); 225 | } 226 | 227 | void n_svmat2x3_value(void* cptr, float* v) 228 | { 229 | ShaderViewable* dv = (ShaderViewable*)cptr; 230 | auto view = dv->view(); 231 | memcpy(v, view.data(), sizeof(float) * 2 * 3); 232 | } 233 | 234 | void* n_svmat2x4_create(const float* v) 235 | { 236 | return new SVMat2x4(v); 237 | } 238 | 239 | void n_svmat2x4_value(void* cptr, float* v) 240 | { 241 | ShaderViewable* dv = (ShaderViewable*)cptr; 242 | auto view = dv->view(); 243 | memcpy(v, view.data(), sizeof(float) * 2 * 4); 244 | } 245 | 246 | void* n_svmat3x2_create(const float* v) 247 | { 248 | return new SVMat3x2(v); 249 | } 250 | 251 | void n_svmat3x2_value(void* cptr, float* v) 252 | { 253 | ShaderViewable* dv = (ShaderViewable*)cptr; 254 | auto view = dv->view(); 255 | memcpy(v, view.data(), sizeof(float) * 3 * 2); 256 | } 257 | 258 | void* n_svmat3x3_create(const float* v) 259 | { 260 | return new SVMat3x3(v); 261 | } 262 | 263 | void n_svmat3x3_value(void* cptr, float* v) 264 | { 265 | ShaderViewable* dv = (ShaderViewable*)cptr; 266 | auto view = dv->view(); 267 | memcpy(v, view.data(), sizeof(float) * 3 * 3); 268 | } 269 | 270 | void* n_svmat3x4_create(const float* v) 271 | { 272 | return new SVMat3x4(v); 273 | } 274 | 275 | void n_svmat3x4_value(void* cptr, float* v) 276 | { 277 | ShaderViewable* dv = (ShaderViewable*)cptr; 278 | auto view = dv->view(); 279 | memcpy(v, view.data(), sizeof(float) * 3 * 4); 280 | } 281 | 282 | void* n_svmat4x2_create(const float* v) 283 | { 284 | return new SVMat4x2(v); 285 | } 286 | 287 | void n_svmat4x2_value(void* cptr, float* v) 288 | { 289 | ShaderViewable* dv = (ShaderViewable*)cptr; 290 | auto view = dv->view(); 291 | memcpy(v, view.data(), sizeof(float) * 4 * 2); 292 | } 293 | 294 | 295 | void* n_svmat4x3_create(const float* v) 296 | { 297 | return new SVMat4x3(v); 298 | } 299 | 300 | void n_svmat4x3_value(void* cptr, float* v) 301 | { 302 | ShaderViewable* dv = (ShaderViewable*)cptr; 303 | auto view = dv->view(); 304 | memcpy(v, view.data(), sizeof(float) * 4 * 3); 305 | } 306 | 307 | void* n_svmat4x4_create(const float* v) 308 | { 309 | return new SVMat4x4(v); 310 | } 311 | 312 | void n_svmat4x4_value(void* cptr, float* v) 313 | { 314 | ShaderViewable* dv = (ShaderViewable*)cptr; 315 | auto view = dv->view(); 316 | memcpy(v, view.data(), sizeof(float) * 4 * 4); 317 | } 318 | 319 | void* n_svdmat2x2_create(const double* v) 320 | { 321 | return new SVDMat2x2(v); 322 | } 323 | 324 | void n_svdmat2x2_value(void* cptr, double* v) 325 | { 326 | ShaderViewable* dv = (ShaderViewable*)cptr; 327 | auto view = dv->view(); 328 | memcpy(v, view.data(), sizeof(double) * 2 * 2); 329 | } 330 | 331 | 332 | void* n_svdmat2x3_create(const double* v) 333 | { 334 | return new SVDMat2x3(v); 335 | } 336 | 337 | void n_svdmat2x3_value(void* cptr, double* v) 338 | { 339 | ShaderViewable* dv = (ShaderViewable*)cptr; 340 | auto view = dv->view(); 341 | memcpy(v, view.data(), sizeof(double) * 2 * 3); 342 | } 343 | 344 | void* n_svdmat2x4_create(const double* v) 345 | { 346 | return new SVDMat2x4(v); 347 | } 348 | 349 | void n_svdmat2x4_value(void* cptr, double* v) 350 | { 351 | ShaderViewable* dv = (ShaderViewable*)cptr; 352 | auto view = dv->view(); 353 | memcpy(v, view.data(), sizeof(double) * 2 * 4); 354 | } 355 | 356 | void* n_svdmat3x2_create(const double* v) 357 | { 358 | return new SVDMat3x2(v); 359 | } 360 | 361 | void n_svdmat3x2_value(void* cptr, double* v) 362 | { 363 | ShaderViewable* dv = (ShaderViewable*)cptr; 364 | auto view = dv->view(); 365 | memcpy(v, view.data(), sizeof(double) * 3 * 2); 366 | } 367 | 368 | void* n_svdmat3x3_create(const double* v) 369 | { 370 | return new SVDMat3x3(v); 371 | } 372 | 373 | void n_svdmat3x3_value(void* cptr, double* v) 374 | { 375 | ShaderViewable* dv = (ShaderViewable*)cptr; 376 | auto view = dv->view(); 377 | memcpy(v, view.data(), sizeof(double) * 3 * 3); 378 | } 379 | 380 | void* n_svdmat3x4_create(const double* v) 381 | { 382 | return new SVDMat3x4(v); 383 | } 384 | 385 | void n_svdmat3x4_value(void* cptr, double* v) 386 | { 387 | ShaderViewable* dv = (ShaderViewable*)cptr; 388 | auto view = dv->view(); 389 | memcpy(v, view.data(), sizeof(double) * 3 * 4); 390 | } 391 | 392 | void* n_svdmat4x2_create(const double* v) 393 | { 394 | return new SVDMat4x2(v); 395 | } 396 | 397 | void n_svdmat4x2_value(void* cptr, double* v) 398 | { 399 | ShaderViewable* dv = (ShaderViewable*)cptr; 400 | auto view = dv->view(); 401 | memcpy(v, view.data(), sizeof(double) * 4 * 2); 402 | } 403 | 404 | 405 | void* n_svdmat4x3_create(const double* v) 406 | { 407 | return new SVDMat4x3(v); 408 | } 409 | 410 | void n_svdmat4x3_value(void* cptr, double* v) 411 | { 412 | ShaderViewable* dv = (ShaderViewable*)cptr; 413 | auto view = dv->view(); 414 | memcpy(v, view.data(), sizeof(double) * 4 * 3); 415 | } 416 | 417 | void* n_svdmat4x4_create(const double* v) 418 | { 419 | return new SVDMat4x4(v); 420 | } 421 | 422 | void n_svdmat4x4_value(void* cptr, double* v) 423 | { 424 | ShaderViewable* dv = (ShaderViewable*)cptr; 425 | auto view = dv->view(); 426 | memcpy(v, view.data(), sizeof(double) * 4 * 4); 427 | } 428 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # VkInline 2 | 3 | Trying to develop another "easy way" to program GPU using a non-C++ host language. 4 | 5 | Previously, I did [ThrustRTC](https://github.com/fynv/ThrustRTC) and [CUDAInline](https://github.com/fynv/CUDAInline), 6 | both of which are based on CUDA (NVRTC). 7 | 8 | This time, however, I'm trying to do similar things basing on Vulkan. I found it more challenging than using CUDA (NVRTC) 9 | because of less friendly lauguage feature of GLSL comparing to CUDA C, and more complicated host API struture. 10 | However, I still found it attractive to do, because: 11 | 12 | * Vulkan is neutral to GPU vendors 13 | * Vulkan exposes more GPU features. Computing is only a small part, there are rasterization and ray-tracing pipelines. 14 | 15 | ## Progress 16 | 17 | ### Computation 18 | 19 | The computation part of VkInline has similar features as CUDAInline. You can easily launch a compute shader from Python, like: 20 | 21 | ```python 22 | 23 | import VkInline as vki 24 | import numpy as np 25 | 26 | harr = np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype='float32') 27 | darr = vki.device_vector_from_numpy(harr) 28 | 29 | kernel = vki.Computer(['arr_in', 'arr_out', 'k'], 30 | ''' 31 | void main() 32 | { 33 | uint id = gl_GlobalInvocationID.x; 34 | if (id >= get_size(arr_in)) return; 35 | set_value(arr_out, id, get_value(arr_in, id)*k); 36 | } 37 | ''') 38 | 39 | darr_out = vki.SVVector('float', 5) 40 | kernel.launch(1,128, [darr, darr_out, vki.SVFloat(10.0)]) 41 | print (darr_out.to_host()) 42 | 43 | ``` 44 | Result: 45 | ``` 46 | [10. 20. 30. 40. 50.] 47 | ``` 48 | 49 | GLSL lacks language features like "struct member functions" and "operator overloading". 50 | Therefore, array indexing doesn't look as nice as in CUDAInline. 51 | 52 | Native 2,3,4 component vector types and matrix types are supported. For example 53 | ```python 54 | v = vki.device_vector_from_list([[1, -1], [2, -3], [5, 1000]], 'double') 55 | print (v.name_elem_type(), v.elem_size()) 56 | print(v.to_host()) 57 | ``` 58 | 59 | You will get: 60 | ``` 61 | dvec2 16 62 | [[ 1. -1.] 63 | [ 2. -3.] 64 | [ 5. 1000.]] 65 | ``` 66 | 67 | ### Rasterization 68 | 69 | Rasterization is currently very much simplified in VkInline. The limiations are: 70 | 71 | * 1 vki.Rasterizer = 1 Vulkan render-pass with 1 subpass. Multi-subpass feature of Vulkan is mostly for tiled-caching applications, which will not be implemented in VkInline. 72 | * Currently only vertex-shader and fragment-shader programming are supported. 73 | * Currently only a sub-set of pipeline options can be configured, during the construction of a DrawCall object. Those not covered are set to default value. 74 | * Surfaces/Swapchains/Semaphores are not exposed. This is mainly for off-screen rendering, not quite suitable for video games. 75 | 76 | Example: 77 | 78 | ```python 79 | import VkInline as vki 80 | import numpy as np 81 | from PIL import Image 82 | 83 | VK_FORMAT_R8G8B8A8_SRGB = 43 84 | 85 | width = 640 86 | height = 480 87 | 88 | colorBuf = vki.Texture2D(width, height, VK_FORMAT_R8G8B8A8_SRGB) 89 | 90 | positions = np.array([ [0.0, -0.5, 0.5], [0.5, 0.5, 0.5], [-0.5, 0.5, 0.5] ], dtype = np.float32) 91 | gpuPos = vki.device_vector_from_numpy(positions) 92 | 93 | colors = np.array([ [0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], dtype = np.float32) 94 | gpuColors = vki.device_vector_from_numpy(colors) 95 | 96 | rp = vki.Rasterizer(['pos', 'col']) 97 | 98 | rp.add_draw_call(vki.DrawCall( 99 | ''' 100 | layout (location = 0) out vec3 vColor; 101 | void main() 102 | { 103 | gl_Position = vec4(get_value(pos, gl_VertexIndex), 1.0); 104 | vColor = get_value(col, gl_VertexIndex); 105 | } 106 | ''', 107 | ''' 108 | layout (location = 0) in vec3 vColor; 109 | layout (location = 0) out vec4 outColor; 110 | 111 | void main() 112 | { 113 | outColor = vec4(vColor, 1.0); 114 | } 115 | ''')) 116 | 117 | 118 | rp.launch([3], [colorBuf], None, [0.5, 0.5, 0.5, 1.0], 1.0, [gpuPos, gpuColors]) 119 | 120 | image_out = np.empty((height, width, 4), dtype=np.uint8) 121 | colorBuf.download(image_out) 122 | 123 | Image.fromarray(image_out, 'RGBA').save('output.png') 124 | 125 | ``` 126 | The code generates the following image: 127 | 128 | 129 | 130 | ### Ray-tracing 131 | 132 | In order to enable VK_KHR_ray_tracing, Vulkan 1.2, with a bunch of additional extensions are required. 133 | Currently, the feature is only tested with [Nvidia Beta driver](https://developer.nvidia.com/vulkan-driver). 134 | 135 | Example: 136 | 137 | ```python 138 | import VkInline as vki 139 | import numpy as np 140 | from PIL import Image 141 | import glm 142 | 143 | width = 800 144 | height = 400 145 | 146 | aabb_unit_sphere = np.array([-1.0, -1.0, -1.0, 1.0, 1.0, 1.0], dtype = np.float32) 147 | d_aabb_unit_sphere = vki.device_vector_from_numpy(aabb_unit_sphere) 148 | blas_unit_sphere = vki.BaseLevelAS(gpuAABB = d_aabb_unit_sphere) 149 | transform = glm.identity(glm.mat4) 150 | transform = glm.translate(transform, glm.vec3(0.0, 0.0, -1.0)) 151 | transform = glm.scale(transform, glm.vec3(0.5, 0.5, 0.5)) 152 | tlas = vki.TopLevelAS([[(blas_unit_sphere, transform)]]) 153 | 154 | darr_out = vki.SVVector('vec3', width*height) 155 | 156 | raytracer = vki.RayTracer(['arr_out', 'width', 'height'], 157 | ''' 158 | struct Payload 159 | { 160 | float t; 161 | vec3 color; 162 | }; 163 | 164 | layout(location = 0) rayPayloadEXT Payload payload; 165 | 166 | void main() 167 | { 168 | int x = int(gl_LaunchIDEXT.x); 169 | int y = int(gl_LaunchIDEXT.y); 170 | if (x>=width || y>height) return; 171 | 172 | vec3 lower_left_corner = vec3(-2.0, -1.0, -1.0); 173 | vec3 horizontal = vec3(4.0, 0.0, 0.0); 174 | vec3 vertical = vec3(0.0, 2.0, 0.0); 175 | vec3 origin = vec3(0.0, 0.0, 0.0); 176 | 177 | float u = (float(x)+0.5)/float(width); 178 | float v = 1.0 - (float(y)+0.5)/float(height); 179 | 180 | vec3 direction = normalize(lower_left_corner + u * horizontal + v * vertical); 181 | 182 | uint rayFlags = gl_RayFlagsOpaqueEXT; 183 | uint cullMask = 0xff; 184 | float tmin = 0.001; 185 | float tmax = 1000000.0; 186 | 187 | traceRayEXT(arr_tlas[0], rayFlags, cullMask, 0, 0, 0, origin, tmin, direction, tmax, 0); 188 | 189 | set_value(arr_out, x+y*width, payload.color); 190 | } 191 | 192 | ''', [ 193 | ''' 194 | struct Payload 195 | { 196 | float t; 197 | vec3 color; 198 | }; 199 | 200 | layout(location = 0) rayPayloadInEXT Payload payload; 201 | 202 | void main() 203 | { 204 | payload.t = -1.0; 205 | vec3 direction = gl_WorldRayDirectionEXT; 206 | float t = 0.5 * (direction.y + 1.0); 207 | payload.color = (1.0 - t)*vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0); 208 | } 209 | '''], [vki.HitShaders( 210 | closest_hit = ''' 211 | struct Payload 212 | { 213 | float t; 214 | vec3 color; 215 | }; 216 | 217 | layout(location = 0) rayPayloadInEXT Payload payload; 218 | hitAttributeEXT vec3 hitpoint; 219 | 220 | void main() 221 | { 222 | vec3 normal = normalize(hitpoint); 223 | payload.t = gl_HitTEXT; 224 | payload.color = (normal+vec3(1.0, 1.0, 1.0))*0.5; 225 | } 226 | 227 | ''', 228 | intersection = ''' 229 | hitAttributeEXT vec3 hitpoint; 230 | 231 | void main() 232 | { 233 | vec3 origin = gl_ObjectRayOriginEXT; 234 | vec3 direction = gl_ObjectRayDirectionEXT; 235 | float tMin = gl_RayTminEXT; 236 | float tMax = gl_RayTmaxEXT; 237 | 238 | const float a = dot(direction, direction); 239 | const float b = dot(origin, direction); 240 | const float c = dot(origin, origin) - 1.0; 241 | const float discriminant = b * b - a * c; 242 | 243 | if (discriminant >= 0) 244 | { 245 | const float t1 = (-b - sqrt(discriminant)) / a; 246 | const float t2 = (-b + sqrt(discriminant)) / a; 247 | 248 | if ((tMin <= t1 && t1 < tMax) || (tMin <= t2 && t2 < tMax)) 249 | { 250 | float t = t1; 251 | if (tMin <= t1 && t1 < tMax) 252 | { 253 | hitpoint = origin + direction * t1; 254 | } 255 | else 256 | { 257 | t = t2; 258 | hitpoint = origin + direction * t2; 259 | } 260 | reportIntersectionEXT(t, 0); 261 | } 262 | } 263 | 264 | } 265 | ''' 266 | )]) 267 | 268 | svwidth = vki.SVInt32(width) 269 | svheight = vki.SVInt32(height) 270 | 271 | raytracer.launch((width, height), [darr_out, svwidth, svheight], [tlas]) 272 | 273 | out = darr_out.to_host() 274 | out = out.reshape((height,width,3))*255.0 275 | out = out.astype(np.uint8) 276 | Image.fromarray(out, 'RGB').save('output.png') 277 | ``` 278 | 279 | The code generates the following image: 280 | 281 | 282 | 283 | 284 | ## Installation 285 | 286 | ### Install from Source Code 287 | 288 | Source code of VkInline is available at: 289 | https://github.com/fynv/VkInline 290 | 291 | At build time, you will need: 292 | * UnQLite source code, as submodule: thirdparty/unqlite 293 | * glslang, as submodule: thirdparty/glslang 294 | * SPIRV-Cross, as submodule: thirdparty/SPIRV-Cross 295 | * Vulkan-Headers, as submodule: thirdparty/Vulkan-Headers 296 | * volk, as submodule: thirdparty/volk 297 | * CMake 3.x 298 | 299 | After cloning the repo from github and resolving the submodules, you can build it 300 | with CMake. 301 | 302 | ``` 303 | $ mkdir build 304 | $ cd build 305 | $ cmake .. -DBUILD_PYTHON_BINDINGS=true -DVKINLINE_BUILD_TESTS=true -DVKINLINE_INCLUDE_PYTESTS=true 306 | $ make 307 | $ make install 308 | ``` 309 | You will get the library headers, binaries and examples in the "install" directory. 310 | 311 | ### Install PyVkInline from PyPi 312 | 313 | Builds for Win64/Linux64 + Python 3.x are available from Pypi. If your 314 | environment matches, you can try: 315 | 316 | ``` 317 | $ pip3 install VkInline 318 | ``` 319 | 320 | ## Runtime Dependencies 321 | 322 | A Vulkan-capable GPU and a recent driver is needed at run-time. 323 | For ray-tracing, a [Nvidia Beta driver](https://developer.nvidia.com/vulkan-driver) might be needed. 324 | 325 | You may also need Vulkan SDK at runtime for some platforms. 326 | 327 | At Python side, VkInline depends on: 328 | * Python 3 329 | * cffi 330 | * numpy 331 | * pyglm 332 | 333 | ## License 334 | 335 | I've decided to license this project under ['"Anti 996" License'](https://github.com/996icu/996.ICU/blob/master/LICENSE) 336 | 337 | Basically, you can use the code any way you like unless you are working for a 996 company. 338 | 339 | [![996.icu](https://img.shields.io/badge/link-996.icu-red.svg)](https://996.icu) 340 | 341 | 342 | -------------------------------------------------------------------------------- /thirdparty/Vulkan_utils/vk_format_utils.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2015-2020 The Khronos Group Inc. 2 | * Copyright (c) 2015-2020 Valve Corporation 3 | * Copyright (c) 2015-2020 LunarG, Inc. 4 | * 5 | * Licensed under the Apache License, Version 2.0 (the "License"); 6 | * you may not use this file except in compliance with the License. 7 | * You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | * 17 | * Author: Mark Lobodzinski 18 | * Author: Courtney Goeltzenleuchter 19 | * Author: Dave Houlton 20 | */ 21 | 22 | #pragma once 23 | #include 24 | #include 25 | #include "vulkan/vulkan.h" 26 | 27 | #if !defined(VK_LAYER_EXPORT) 28 | #if defined(__GNUC__) && __GNUC__ >= 4 29 | #define VK_LAYER_EXPORT __attribute__((visibility("default"))) 30 | #elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590) 31 | #define VK_LAYER_EXPORT __attribute__((visibility("default"))) 32 | #else 33 | #define VK_LAYER_EXPORT 34 | #endif 35 | #endif 36 | 37 | #ifdef __cplusplus 38 | extern "C" { 39 | #endif 40 | 41 | #define VK_MULTIPLANE_FORMAT_MAX_PLANES 3 42 | 43 | typedef enum VkFormatCompatibilityClass { 44 | VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT = 0, 45 | VK_FORMAT_COMPATIBILITY_CLASS_8_BIT = 1, 46 | VK_FORMAT_COMPATIBILITY_CLASS_16_BIT = 2, 47 | VK_FORMAT_COMPATIBILITY_CLASS_24_BIT = 3, 48 | VK_FORMAT_COMPATIBILITY_CLASS_32_BIT = 4, 49 | VK_FORMAT_COMPATIBILITY_CLASS_48_BIT = 5, 50 | VK_FORMAT_COMPATIBILITY_CLASS_64_BIT = 6, 51 | VK_FORMAT_COMPATIBILITY_CLASS_96_BIT = 7, 52 | VK_FORMAT_COMPATIBILITY_CLASS_128_BIT = 8, 53 | VK_FORMAT_COMPATIBILITY_CLASS_192_BIT = 9, 54 | VK_FORMAT_COMPATIBILITY_CLASS_256_BIT = 10, 55 | VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGB_BIT = 11, 56 | VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGBA_BIT = 12, 57 | VK_FORMAT_COMPATIBILITY_CLASS_BC2_BIT = 13, 58 | VK_FORMAT_COMPATIBILITY_CLASS_BC3_BIT = 14, 59 | VK_FORMAT_COMPATIBILITY_CLASS_BC4_BIT = 15, 60 | VK_FORMAT_COMPATIBILITY_CLASS_BC5_BIT = 16, 61 | VK_FORMAT_COMPATIBILITY_CLASS_BC6H_BIT = 17, 62 | VK_FORMAT_COMPATIBILITY_CLASS_BC7_BIT = 18, 63 | VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGB_BIT = 19, 64 | VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGBA_BIT = 20, 65 | VK_FORMAT_COMPATIBILITY_CLASS_ETC2_EAC_RGBA_BIT = 21, 66 | VK_FORMAT_COMPATIBILITY_CLASS_EAC_R_BIT = 22, 67 | VK_FORMAT_COMPATIBILITY_CLASS_EAC_RG_BIT = 23, 68 | VK_FORMAT_COMPATIBILITY_CLASS_ASTC_4X4_BIT = 24, 69 | VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X4_BIT = 25, 70 | VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X5_BIT = 26, 71 | VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X5_BIT = 27, 72 | VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X6_BIT = 28, 73 | VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X5_BIT = 29, 74 | VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X6_BIT = 20, 75 | VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X8_BIT = 31, 76 | VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X5_BIT = 32, 77 | VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X6_BIT = 33, 78 | VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X8_BIT = 34, 79 | VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X10_BIT = 35, 80 | VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X10_BIT = 36, 81 | VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X12_BIT = 37, 82 | VK_FORMAT_COMPATIBILITY_CLASS_D16_BIT = 38, 83 | VK_FORMAT_COMPATIBILITY_CLASS_D24_BIT = 39, 84 | VK_FORMAT_COMPATIBILITY_CLASS_D32_BIT = 30, 85 | VK_FORMAT_COMPATIBILITY_CLASS_S8_BIT = 41, 86 | VK_FORMAT_COMPATIBILITY_CLASS_D16S8_BIT = 42, 87 | VK_FORMAT_COMPATIBILITY_CLASS_D24S8_BIT = 43, 88 | VK_FORMAT_COMPATIBILITY_CLASS_D32S8_BIT = 44, 89 | VK_FORMAT_COMPATIBILITY_CLASS_PVRTC1_2BPP_BIT = 45, 90 | VK_FORMAT_COMPATIBILITY_CLASS_PVRTC1_4BPP_BIT = 46, 91 | VK_FORMAT_COMPATIBILITY_CLASS_PVRTC2_2BPP_BIT = 47, 92 | VK_FORMAT_COMPATIBILITY_CLASS_PVRTC2_4BPP_BIT = 48, 93 | /* KHR_sampler_YCbCr_conversion */ 94 | VK_FORMAT_COMPATIBILITY_CLASS_32BIT_G8B8G8R8 = 49, 95 | VK_FORMAT_COMPATIBILITY_CLASS_32BIT_B8G8R8G8 = 50, 96 | VK_FORMAT_COMPATIBILITY_CLASS_64BIT_R10G10B10A10 = 51, 97 | VK_FORMAT_COMPATIBILITY_CLASS_64BIT_G10B10G10R10 = 52, 98 | VK_FORMAT_COMPATIBILITY_CLASS_64BIT_B10G10R10G10 = 53, 99 | VK_FORMAT_COMPATIBILITY_CLASS_64BIT_R12G12B12A12 = 54, 100 | VK_FORMAT_COMPATIBILITY_CLASS_64BIT_G12B12G12R12 = 55, 101 | VK_FORMAT_COMPATIBILITY_CLASS_64BIT_B12G12R12G12 = 56, 102 | VK_FORMAT_COMPATIBILITY_CLASS_64BIT_G16B16G16R16 = 57, 103 | VK_FORMAT_COMPATIBILITY_CLASS_64BIT_B16G16R16G16 = 58, 104 | VK_FORMAT_COMPATIBILITY_CLASS_8BIT_3PLANE_420 = 59, 105 | VK_FORMAT_COMPATIBILITY_CLASS_8BIT_2PLANE_420 = 60, 106 | VK_FORMAT_COMPATIBILITY_CLASS_8BIT_3PLANE_422 = 61, 107 | VK_FORMAT_COMPATIBILITY_CLASS_8BIT_2PLANE_422 = 62, 108 | VK_FORMAT_COMPATIBILITY_CLASS_8BIT_3PLANE_444 = 63, 109 | VK_FORMAT_COMPATIBILITY_CLASS_10BIT_3PLANE_420 = 64, 110 | VK_FORMAT_COMPATIBILITY_CLASS_10BIT_2PLANE_420 = 65, 111 | VK_FORMAT_COMPATIBILITY_CLASS_10BIT_3PLANE_422 = 66, 112 | VK_FORMAT_COMPATIBILITY_CLASS_10BIT_2PLANE_422 = 67, 113 | VK_FORMAT_COMPATIBILITY_CLASS_10BIT_3PLANE_444 = 68, 114 | VK_FORMAT_COMPATIBILITY_CLASS_12BIT_3PLANE_420 = 69, 115 | VK_FORMAT_COMPATIBILITY_CLASS_12BIT_2PLANE_420 = 70, 116 | VK_FORMAT_COMPATIBILITY_CLASS_12BIT_3PLANE_422 = 71, 117 | VK_FORMAT_COMPATIBILITY_CLASS_12BIT_2PLANE_422 = 72, 118 | VK_FORMAT_COMPATIBILITY_CLASS_12BIT_3PLANE_444 = 73, 119 | VK_FORMAT_COMPATIBILITY_CLASS_16BIT_3PLANE_420 = 74, 120 | VK_FORMAT_COMPATIBILITY_CLASS_16BIT_2PLANE_420 = 75, 121 | VK_FORMAT_COMPATIBILITY_CLASS_16BIT_3PLANE_422 = 76, 122 | VK_FORMAT_COMPATIBILITY_CLASS_16BIT_2PLANE_422 = 77, 123 | VK_FORMAT_COMPATIBILITY_CLASS_16BIT_3PLANE_444 = 78, 124 | VK_FORMAT_COMPATIBILITY_CLASS_MAX_ENUM = 79 125 | } VkFormatCompatibilityClass; 126 | 127 | typedef enum VkFormatNumericalType { 128 | VK_FORMAT_NUMERICAL_TYPE_NONE, 129 | VK_FORMAT_NUMERICAL_TYPE_UINT, 130 | VK_FORMAT_NUMERICAL_TYPE_SINT, 131 | VK_FORMAT_NUMERICAL_TYPE_UNORM, 132 | VK_FORMAT_NUMERICAL_TYPE_SNORM, 133 | VK_FORMAT_NUMERICAL_TYPE_USCALED, 134 | VK_FORMAT_NUMERICAL_TYPE_SSCALED, 135 | VK_FORMAT_NUMERICAL_TYPE_UFLOAT, 136 | VK_FORMAT_NUMERICAL_TYPE_SFLOAT, 137 | VK_FORMAT_NUMERICAL_TYPE_SRGB 138 | } VkFormatNumericalType; 139 | 140 | VK_LAYER_EXPORT bool FormatIsDepthOrStencil(VkFormat format); 141 | VK_LAYER_EXPORT bool FormatIsDepthAndStencil(VkFormat format); 142 | VK_LAYER_EXPORT bool FormatIsDepthOnly(VkFormat format); 143 | VK_LAYER_EXPORT bool FormatIsStencilOnly(VkFormat format); 144 | VK_LAYER_EXPORT bool FormatIsCompressed_ETC2_EAC(VkFormat format); 145 | VK_LAYER_EXPORT bool FormatIsCompressed_ASTC(VkFormat format); 146 | VK_LAYER_EXPORT bool FormatIsCompressed_ASTC_LDR(VkFormat format); 147 | VK_LAYER_EXPORT bool FormatIsCompressed_ASTC_HDR(VkFormat format); 148 | VK_LAYER_EXPORT bool FormatIsCompressed_BC(VkFormat format); 149 | VK_LAYER_EXPORT bool FormatIsCompressed_PVRTC(VkFormat format); 150 | VK_LAYER_EXPORT bool FormatIsSinglePlane_422(VkFormat format); 151 | VK_LAYER_EXPORT bool FormatIsNorm(VkFormat format); 152 | VK_LAYER_EXPORT bool FormatIsUNorm(VkFormat format); 153 | VK_LAYER_EXPORT bool FormatIsSNorm(VkFormat format); 154 | VK_LAYER_EXPORT bool FormatIsInt(VkFormat format); 155 | VK_LAYER_EXPORT bool FormatIsSInt(VkFormat format); 156 | VK_LAYER_EXPORT bool FormatIsUInt(VkFormat format); 157 | VK_LAYER_EXPORT bool FormatIsFloat(VkFormat format); 158 | VK_LAYER_EXPORT bool FormatIsSRGB(VkFormat format); 159 | VK_LAYER_EXPORT bool FormatIsUScaled(VkFormat format); 160 | VK_LAYER_EXPORT bool FormatIsSScaled(VkFormat format); 161 | VK_LAYER_EXPORT bool FormatIsSampledInt(VkFormat format); 162 | VK_LAYER_EXPORT bool FormatIsSampledFloat(VkFormat format); 163 | VK_LAYER_EXPORT bool FormatIsCompressed(VkFormat format); 164 | VK_LAYER_EXPORT bool FormatIsPacked(VkFormat format); 165 | VK_LAYER_EXPORT bool FormatElementIsTexel(VkFormat format); 166 | VK_LAYER_EXPORT bool FormatSizesAreEqual(VkFormat srcFormat, VkFormat dstFormat, uint32_t region_count, const VkImageCopy *regions); 167 | VK_LAYER_EXPORT bool FormatRequiresYcbcrConversion(VkFormat format); 168 | VK_LAYER_EXPORT bool FormatIsXChromaSubsampled(VkFormat format); 169 | VK_LAYER_EXPORT bool FormatIsYChromaSubsampled(VkFormat format); 170 | VK_LAYER_EXPORT VkDeviceSize GetIndexAlignment(VkIndexType indexType); 171 | 172 | VK_LAYER_EXPORT uint32_t FormatDepthSize(VkFormat format); 173 | VK_LAYER_EXPORT VkFormatNumericalType FormatDepthNumericalType(VkFormat format); 174 | VK_LAYER_EXPORT uint32_t FormatStencilSize(VkFormat format); 175 | VK_LAYER_EXPORT VkFormatNumericalType FormatStencilNumericalType(VkFormat format); 176 | VK_LAYER_EXPORT uint32_t FormatPlaneCount(VkFormat format); 177 | VK_LAYER_EXPORT uint32_t FormatChannelCount(VkFormat format); 178 | VK_LAYER_EXPORT VkExtent3D FormatTexelBlockExtent(VkFormat format); 179 | VK_LAYER_EXPORT uint32_t FormatElementSize(VkFormat format, VkImageAspectFlags aspectMask = VK_IMAGE_ASPECT_COLOR_BIT); 180 | VK_LAYER_EXPORT double FormatTexelSize(VkFormat format, VkImageAspectFlags aspectMask = VK_IMAGE_ASPECT_COLOR_BIT); 181 | VK_LAYER_EXPORT VkFormatCompatibilityClass FormatCompatibilityClass(VkFormat format); 182 | VK_LAYER_EXPORT VkDeviceSize SafeModulo(VkDeviceSize dividend, VkDeviceSize divisor); 183 | VK_LAYER_EXPORT VkDeviceSize SafeDivision(VkDeviceSize dividend, VkDeviceSize divisor); 184 | VK_LAYER_EXPORT uint32_t GetPlaneIndex(VkImageAspectFlags aspect); 185 | VK_LAYER_EXPORT VkFormat FindMultiplaneCompatibleFormat(VkFormat fmt, VkImageAspectFlags plane_aspect); 186 | VK_LAYER_EXPORT VkExtent2D FindMultiplaneExtentDivisors(VkFormat mp_fmt, VkImageAspectFlags plane_aspect); 187 | 188 | static inline bool FormatIsUndef(VkFormat format) { return (format == VK_FORMAT_UNDEFINED); } 189 | static inline bool FormatHasDepth(VkFormat format) { return (FormatIsDepthOnly(format) || FormatIsDepthAndStencil(format)); } 190 | static inline bool FormatHasStencil(VkFormat format) { return (FormatIsStencilOnly(format) || FormatIsDepthAndStencil(format)); } 191 | static inline bool FormatIsMultiplane(VkFormat format) { return ((FormatPlaneCount(format)) > 1u); } 192 | static inline bool FormatIsColor(VkFormat format) { 193 | return !(FormatIsUndef(format) || FormatIsDepthOrStencil(format) || FormatIsMultiplane(format)); 194 | } 195 | 196 | #ifdef __cplusplus 197 | } 198 | #endif 199 | -------------------------------------------------------------------------------- /python/VkInline/cffi.py: -------------------------------------------------------------------------------- 1 | # auto-generated file 2 | import _cffi_backend 3 | 4 | ffi = _cffi_backend.FFI('VkInline.cffi', 5 | _version = 0x2601, 6 | _types = b'\x00\x00\x2A\x0D\x00\x00\xCC\x03\x00\x00\x00\x0F\x00\x00\x3C\x0D\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x00\x12\x0D\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x00\x48\x0D\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x00\x48\x0D\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x12\x03\x00\x00\x0D\x01\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x01\x03\x00\x00\x08\x01\x00\x00\x00\x0F\x00\x00\x48\x0D\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x08\x01\x00\x00\x00\x0F\x00\x00\x48\x0D\x00\x00\x00\x0F\x00\x00\x18\x0D\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x00\x35\x0D\x00\x00\xCA\x03\x00\x00\x00\x0F\x00\x00\x35\x0D\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x2A\x11\x00\x00\x2A\x11\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x2A\x11\x00\x00\x0C\x01\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x3C\x03\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x0E\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x12\x03\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x0D\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x48\x03\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x08\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x08\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x08\x01\x00\x00\x08\x01\x00\x00\x08\x01\x00\x00\x08\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x18\x03\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x08\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x08\x01\x00\x00\x08\x01\x00\x00\x08\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x0C\x01\x00\x00\x2A\x03\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x0C\x01\x00\x00\xCB\x03\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x01\x11\x00\x00\x2A\x11\x00\x00\x08\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x01\x11\x00\x00\x2A\x11\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x08\x01\x00\x00\x08\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x01\x11\x00\x00\x08\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x2A\x11\x00\x00\x00\x0F\x00\x00\xCC\x0D\x00\x00\x2A\x11\x00\x00\x00\x0F\x00\x00\xCC\x0D\x00\x00\x2A\x11\x00\x00\x2A\x11\x00\x00\x00\x0F\x00\x00\xCC\x0D\x00\x00\x08\x01\x00\x00\x00\x0F\x00\x00\xCC\x0D\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x00\xCC\x0D\x00\x00\x01\x11\x00\x00\x3C\x03\x00\x00\x00\x0F\x00\x00\xCC\x0D\x00\x00\x01\x11\x00\x00\x11\x11\x00\x00\x00\x0F\x00\x00\xCC\x0D\x00\x00\x01\x11\x00\x00\x0D\x01\x00\x00\x00\x0F\x00\x00\xCC\x0D\x00\x00\x01\x11\x00\x00\x0D\x01\x00\x00\x0D\x01\x00\x00\x0D\x01\x00\x00\x0D\x01\x00\x00\x00\x0F\x00\x00\xCC\x0D\x00\x00\x01\x11\x00\x00\x48\x03\x00\x00\x00\x0F\x00\x00\xCC\x0D\x00\x00\x01\x11\x00\x00\x07\x01\x00\x00\x08\x01\x00\x00\x00\x0F\x00\x00\xCC\x0D\x00\x00\x01\x11\x00\x00\x18\x03\x00\x00\x00\x0F\x00\x00\xCC\x0D\x00\x00\x01\x11\x00\x00\x08\x01\x00\x00\x00\x0F\x00\x00\xCC\x0D\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x00\x0F\x00\x00\xCC\x0D\x00\x00\x01\x11\x00\x00\x01\x11\x00\x00\x0C\x01\x00\x00\x0C\x01\x00\x00\x00\x0F\x00\x00\xCC\x0D\x00\x00\x00\x0F\x00\x00\x02\x01\x00\x00\xCC\x03\x00\x00\x00\x01', 7 | _globals = (b'\x00\x00\x90\x23n_add_built_in_header',0,b'\x00\x00\x8D\x23n_add_code_block',0,b'\x00\x00\x8D\x23n_add_inlcude_filename',0,b'\x00\x00\x70\x23n_blas_create_procedure',0,b'\x00\x00\x84\x23n_blas_create_triangles',0,b'\x00\x00\x97\x23n_blas_destroy',0,b'\x00\x00\x73\x23n_computer_create',0,b'\x00\x00\x97\x23n_computer_destroy',0,b'\x00\x00\x1A\x23n_computer_launch',0,b'\x00\x00\x09\x23n_computer_num_params',0,b'\x00\x00\x26\x23n_cubemap_channelcount',0,b'\x00\x00\x50\x23n_cubemap_create',0,b'\x00\x00\xBE\x23n_cubemap_download',0,b'\x00\x00\x09\x23n_cubemap_height',0,b'\x00\x00\x26\x23n_cubemap_pixelsize',0,b'\x00\x00\x97\x23n_cubemap_release',0,b'\x00\x00\xBE\x23n_cubemap_upload',0,b'\x00\x00\x26\x23n_cubemap_vkformat',0,b'\x00\x00\x09\x23n_cubemap_width',0,b'\x00\x00\x63\x23n_dim3_create',0,b'\x00\x00\x97\x23n_dim3_destroy',0,b'\x00\x00\x2F\x23n_drawcall_create',0,b'\x00\x00\x97\x23n_drawcall_destroy',0,b'\x00\x00\xBA\x23n_drawcall_set_alpha_blend_op',0,b'\x00\x00\xBA\x23n_drawcall_set_alpha_write',0,b'\x00\x00\xA6\x23n_drawcall_set_blend_constants',0,b'\x00\x00\xBA\x23n_drawcall_set_blend_enable',0,b'\x00\x00\xBA\x23n_drawcall_set_color_blend_op',0,b'\x00\x00\xBA\x23n_drawcall_set_color_write',0,b'\x00\x00\xBA\x23n_drawcall_set_color_write_b',0,b'\x00\x00\xBA\x23n_drawcall_set_color_write_g',0,b'\x00\x00\xBA\x23n_drawcall_set_color_write_r',0,b'\x00\x00\xBA\x23n_drawcall_set_cull_mode',0,b'\x00\x00\xBA\x23n_drawcall_set_depth_compare_op',0,b'\x00\x00\xBA\x23n_drawcall_set_depth_enable',0,b'\x00\x00\xBA\x23n_drawcall_set_depth_write',0,b'\x00\x00\xBA\x23n_drawcall_set_dst_alpha_blend_factor',0,b'\x00\x00\xBA\x23n_drawcall_set_dst_color_blend_factor',0,b'\x00\x00\xBA\x23n_drawcall_set_front_face',0,b'\x00\x00\xB1\x23n_drawcall_set_ith_alpha_blend_op',0,b'\x00\x00\xB1\x23n_drawcall_set_ith_alpha_write',0,b'\x00\x00\xB1\x23n_drawcall_set_ith_blend_enable',0,b'\x00\x00\xB1\x23n_drawcall_set_ith_color_blend_op',0,b'\x00\x00\xB1\x23n_drawcall_set_ith_color_write',0,b'\x00\x00\xB1\x23n_drawcall_set_ith_color_write_b',0,b'\x00\x00\xB1\x23n_drawcall_set_ith_color_write_g',0,b'\x00\x00\xB1\x23n_drawcall_set_ith_color_write_r',0,b'\x00\x00\xB1\x23n_drawcall_set_ith_dst_alpha_blend_factor',0,b'\x00\x00\xB1\x23n_drawcall_set_ith_dst_color_blend_factor',0,b'\x00\x00\xB1\x23n_drawcall_set_ith_src_alpha_blend_factor',0,b'\x00\x00\xB1\x23n_drawcall_set_ith_src_color_blend_factor',0,b'\x00\x00\xA2\x23n_drawcall_set_line_width',0,b'\x00\x00\xBA\x23n_drawcall_set_polygon_mode',0,b'\x00\x00\xBA\x23n_drawcall_set_primitive_restart',0,b'\x00\x00\xBA\x23n_drawcall_set_primitive_topology',0,b'\x00\x00\xBA\x23n_drawcall_set_src_alpha_blend_factor',0,b'\x00\x00\xBA\x23n_drawcall_set_src_color_blend_factor',0,b'\x00\x00\x2F\x23n_hit_shaders_create',0,b'\x00\x00\x97\x23n_hit_shaders_destroy',0,b'\x00\x00\x97\x23n_launch_param_destroy',0,b'\x00\x00\x70\x23n_launch_param_from_buffer',0,b'\x00\x00\x60\x23n_launch_param_from_count',0,b'\x00\x00\x3E\x23n_mat4_create',0,b'\x00\x00\x97\x23n_mat4_destroy',0,b'\x00\x00\x6C\x23n_pointer_array_create',0,b'\x00\x00\x97\x23n_pointer_array_destroy',0,b'\x00\x00\x2C\x23n_pointer_array_size',0,b'\x00\x00\xBE\x23n_rasterizer_add_draw_call',0,b'\x00\x00\x80\x23n_rasterizer_create',0,b'\x00\x00\x97\x23n_rasterizer_destroy',0,b'\x00\x00\x0C\x23n_rasterizer_launch',0,b'\x00\x00\x09\x23n_rasterizer_num_params',0,b'\x00\x00\xB1\x23n_rasterizer_set_clear_color_buf',0,b'\x00\x00\xBA\x23n_rasterizer_set_clear_depth_buf',0,b'\x00\x00\x78\x23n_raytracer_create',0,b'\x00\x00\x97\x23n_raytracer_destroy',0,b'\x00\x00\x1A\x23n_raytracer_launch',0,b'\x00\x00\x09\x23n_raytracer_num_params',0,b'\x00\x00\x94\x23n_set_verbose',0,b'\x00\x00\x29\x23n_size_of',0,b'\x00\x00\x68\x23n_string_array_create',0,b'\x00\x00\x97\x23n_string_array_destroy',0,b'\x00\x00\x2C\x23n_string_array_size',0,b'\x00\x00\x97\x23n_sv_destroy',0,b'\x00\x00\x00\x23n_sv_name_view_type',0,b'\x00\x00\x33\x23n_svbuffer_create',0,b'\x00\x00\x2C\x23n_svbuffer_elem_size',0,b'\x00\x00\xBE\x23n_svbuffer_from_host',0,b'\x00\x00\x00\x23n_svbuffer_name_elem_type',0,b'\x00\x00\x2C\x23n_svbuffer_size',0,b'\x00\x00\xC2\x23n_svbuffer_to_host',0,b'\x00\x00\x88\x23n_svcombine_create',0,b'\x00\x00\x38\x23n_svdmat2x2_create',0,b'\x00\x00\x9A\x23n_svdmat2x2_value',0,b'\x00\x00\x38\x23n_svdmat2x3_create',0,b'\x00\x00\x9A\x23n_svdmat2x3_value',0,b'\x00\x00\x38\x23n_svdmat2x4_create',0,b'\x00\x00\x9A\x23n_svdmat2x4_value',0,b'\x00\x00\x38\x23n_svdmat3x2_create',0,b'\x00\x00\x9A\x23n_svdmat3x2_value',0,b'\x00\x00\x38\x23n_svdmat3x3_create',0,b'\x00\x00\x9A\x23n_svdmat3x3_value',0,b'\x00\x00\x38\x23n_svdmat3x4_create',0,b'\x00\x00\x9A\x23n_svdmat3x4_value',0,b'\x00\x00\x38\x23n_svdmat4x2_create',0,b'\x00\x00\x9A\x23n_svdmat4x2_value',0,b'\x00\x00\x38\x23n_svdmat4x3_create',0,b'\x00\x00\x9A\x23n_svdmat4x3_value',0,b'\x00\x00\x38\x23n_svdmat4x4_create',0,b'\x00\x00\x9A\x23n_svdmat4x4_value',0,b'\x00\x00\x3B\x23n_svdouble_create',0,b'\x00\x00\x03\x23n_svdouble_value',0,b'\x00\x00\x38\x23n_svdvec2_create',0,b'\x00\x00\x9A\x23n_svdvec2_value',0,b'\x00\x00\x38\x23n_svdvec3_create',0,b'\x00\x00\x9A\x23n_svdvec3_value',0,b'\x00\x00\x38\x23n_svdvec4_create',0,b'\x00\x00\x9A\x23n_svdvec4_value',0,b'\x00\x00\x41\x23n_svfloat_create',0,b'\x00\x00\x06\x23n_svfloat_value',0,b'\x00\x00\x47\x23n_svint32_create',0,b'\x00\x00\x09\x23n_svint32_value',0,b'\x00\x00\x44\x23n_svivec2_create',0,b'\x00\x00\xAD\x23n_svivec2_value',0,b'\x00\x00\x44\x23n_svivec3_create',0,b'\x00\x00\xAD\x23n_svivec3_value',0,b'\x00\x00\x44\x23n_svivec4_create',0,b'\x00\x00\xAD\x23n_svivec4_value',0,b'\x00\x00\x3E\x23n_svmat2x2_create',0,b'\x00\x00\x9E\x23n_svmat2x2_value',0,b'\x00\x00\x3E\x23n_svmat2x3_create',0,b'\x00\x00\x9E\x23n_svmat2x3_value',0,b'\x00\x00\x3E\x23n_svmat2x4_create',0,b'\x00\x00\x9E\x23n_svmat2x4_value',0,b'\x00\x00\x3E\x23n_svmat3x2_create',0,b'\x00\x00\x9E\x23n_svmat3x2_value',0,b'\x00\x00\x3E\x23n_svmat3x3_create',0,b'\x00\x00\x9E\x23n_svmat3x3_value',0,b'\x00\x00\x3E\x23n_svmat3x4_create',0,b'\x00\x00\x9E\x23n_svmat3x4_value',0,b'\x00\x00\x3E\x23n_svmat4x2_create',0,b'\x00\x00\x9E\x23n_svmat4x2_value',0,b'\x00\x00\x3E\x23n_svmat4x3_create',0,b'\x00\x00\x9E\x23n_svmat4x3_value',0,b'\x00\x00\x3E\x23n_svmat4x4_create',0,b'\x00\x00\x9E\x23n_svmat4x4_value',0,b'\x00\x00\x70\x23n_svobjbuffer_create',0,b'\x00\x00\x2C\x23n_svobjbuffer_elem_size',0,b'\x00\x00\x00\x23n_svobjbuffer_name_elem_type',0,b'\x00\x00\x2C\x23n_svobjbuffer_size',0,b'\x00\x00\x97\x23n_svobjbuffer_update',0,b'\x00\x00\x60\x23n_svuint32_create',0,b'\x00\x00\x26\x23n_svuint32_value',0,b'\x00\x00\x5D\x23n_svuvec2_create',0,b'\x00\x00\xB6\x23n_svuvec2_value',0,b'\x00\x00\x5D\x23n_svuvec3_create',0,b'\x00\x00\xB6\x23n_svuvec3_value',0,b'\x00\x00\x5D\x23n_svuvec4_create',0,b'\x00\x00\xB6\x23n_svuvec4_value',0,b'\x00\x00\x3E\x23n_svvec2_create',0,b'\x00\x00\x9E\x23n_svvec2_value',0,b'\x00\x00\x3E\x23n_svvec3_create',0,b'\x00\x00\x9E\x23n_svvec3_value',0,b'\x00\x00\x3E\x23n_svvec4_create',0,b'\x00\x00\x9E\x23n_svvec4_value',0,b'\x00\x00\x26\x23n_texture2d_channelcount',0,b'\x00\x00\x55\x23n_texture2d_create',0,b'\x00\x00\xBE\x23n_texture2d_download',0,b'\x00\x00\x09\x23n_texture2d_height',0,b'\x00\x00\x26\x23n_texture2d_pixelsize',0,b'\x00\x00\x97\x23n_texture2d_release',0,b'\x00\x00\x26\x23n_texture2d_samplecount',0,b'\x00\x00\xBE\x23n_texture2d_upload',0,b'\x00\x00\x26\x23n_texture2d_vkformat',0,b'\x00\x00\x09\x23n_texture2d_width',0,b'\x00\x00\x26\x23n_texture3d_channelcount',0,b'\x00\x00\x4A\x23n_texture3d_create',0,b'\x00\x00\x09\x23n_texture3d_dimX',0,b'\x00\x00\x09\x23n_texture3d_dimY',0,b'\x00\x00\x09\x23n_texture3d_dimZ',0,b'\x00\x00\xBE\x23n_texture3d_download',0,b'\x00\x00\x26\x23n_texture3d_pixelsize',0,b'\x00\x00\x97\x23n_texture3d_release',0,b'\x00\x00\xBE\x23n_texture3d_upload',0,b'\x00\x00\x26\x23n_texture3d_vkformat',0,b'\x00\x00\x84\x23n_tlas_create',0,b'\x00\x00\x97\x23n_tlas_destroy',0,b'\x00\x00\x24\x23n_vkinline_try_init',0,b'\x00\x00\xC8\x23n_wait',0), 8 | ) 9 | -------------------------------------------------------------------------------- /thirdparty/crc64/crc64.cpp: -------------------------------------------------------------------------------- 1 | /* Redis uses the CRC64 variant with "Jones" coefficients and init value of 0. 2 | * 3 | * Specification of this CRC64 variant follows: 4 | * Name: crc-64-jones 5 | * Width: 64 bites 6 | * Poly: 0xad93d23594c935a9 7 | * Reflected In: True 8 | * Xor_In: 0xffffffffffffffff 9 | * Reflected_Out: True 10 | * Xor_Out: 0x0 11 | * Check("123456789"): 0xe9c6d914c4b8d9ca 12 | * 13 | * Copyright (c) 2012, Salvatore Sanfilippo 14 | * All rights reserved. 15 | * 16 | * Redistribution and use in source and binary forms, with or without 17 | * modification, are permitted provided that the following conditions are met: 18 | * 19 | * * Redistributions of source code must retain the above copyright notice, 20 | * this list of conditions and the following disclaimer. 21 | * * Redistributions in binary form must reproduce the above copyright 22 | * notice, this list of conditions and the following disclaimer in the 23 | * documentation and/or other materials provided with the distribution. 24 | * * Neither the name of Redis nor the names of its contributors may be used 25 | * to endorse or promote products derived from this software without 26 | * specific prior written permission. 27 | * 28 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 29 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 32 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 33 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 34 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 35 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 36 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 37 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 | * POSSIBILITY OF SUCH DAMAGE. */ 39 | 40 | #include 41 | 42 | static const uint64_t crc64_tab[256] = { 43 | UINT64_C(0x0000000000000000), UINT64_C(0x7ad870c830358979), 44 | UINT64_C(0xf5b0e190606b12f2), UINT64_C(0x8f689158505e9b8b), 45 | UINT64_C(0xc038e5739841b68f), UINT64_C(0xbae095bba8743ff6), 46 | UINT64_C(0x358804e3f82aa47d), UINT64_C(0x4f50742bc81f2d04), 47 | UINT64_C(0xab28ecb46814fe75), UINT64_C(0xd1f09c7c5821770c), 48 | UINT64_C(0x5e980d24087fec87), UINT64_C(0x24407dec384a65fe), 49 | UINT64_C(0x6b1009c7f05548fa), UINT64_C(0x11c8790fc060c183), 50 | UINT64_C(0x9ea0e857903e5a08), UINT64_C(0xe478989fa00bd371), 51 | UINT64_C(0x7d08ff3b88be6f81), UINT64_C(0x07d08ff3b88be6f8), 52 | UINT64_C(0x88b81eabe8d57d73), UINT64_C(0xf2606e63d8e0f40a), 53 | UINT64_C(0xbd301a4810ffd90e), UINT64_C(0xc7e86a8020ca5077), 54 | UINT64_C(0x4880fbd87094cbfc), UINT64_C(0x32588b1040a14285), 55 | UINT64_C(0xd620138fe0aa91f4), UINT64_C(0xacf86347d09f188d), 56 | UINT64_C(0x2390f21f80c18306), UINT64_C(0x594882d7b0f40a7f), 57 | UINT64_C(0x1618f6fc78eb277b), UINT64_C(0x6cc0863448deae02), 58 | UINT64_C(0xe3a8176c18803589), UINT64_C(0x997067a428b5bcf0), 59 | UINT64_C(0xfa11fe77117cdf02), UINT64_C(0x80c98ebf2149567b), 60 | UINT64_C(0x0fa11fe77117cdf0), UINT64_C(0x75796f2f41224489), 61 | UINT64_C(0x3a291b04893d698d), UINT64_C(0x40f16bccb908e0f4), 62 | UINT64_C(0xcf99fa94e9567b7f), UINT64_C(0xb5418a5cd963f206), 63 | UINT64_C(0x513912c379682177), UINT64_C(0x2be1620b495da80e), 64 | UINT64_C(0xa489f35319033385), UINT64_C(0xde51839b2936bafc), 65 | UINT64_C(0x9101f7b0e12997f8), UINT64_C(0xebd98778d11c1e81), 66 | UINT64_C(0x64b116208142850a), UINT64_C(0x1e6966e8b1770c73), 67 | UINT64_C(0x8719014c99c2b083), UINT64_C(0xfdc17184a9f739fa), 68 | UINT64_C(0x72a9e0dcf9a9a271), UINT64_C(0x08719014c99c2b08), 69 | UINT64_C(0x4721e43f0183060c), UINT64_C(0x3df994f731b68f75), 70 | UINT64_C(0xb29105af61e814fe), UINT64_C(0xc849756751dd9d87), 71 | UINT64_C(0x2c31edf8f1d64ef6), UINT64_C(0x56e99d30c1e3c78f), 72 | UINT64_C(0xd9810c6891bd5c04), UINT64_C(0xa3597ca0a188d57d), 73 | UINT64_C(0xec09088b6997f879), UINT64_C(0x96d1784359a27100), 74 | UINT64_C(0x19b9e91b09fcea8b), UINT64_C(0x636199d339c963f2), 75 | UINT64_C(0xdf7adabd7a6e2d6f), UINT64_C(0xa5a2aa754a5ba416), 76 | UINT64_C(0x2aca3b2d1a053f9d), UINT64_C(0x50124be52a30b6e4), 77 | UINT64_C(0x1f423fcee22f9be0), UINT64_C(0x659a4f06d21a1299), 78 | UINT64_C(0xeaf2de5e82448912), UINT64_C(0x902aae96b271006b), 79 | UINT64_C(0x74523609127ad31a), UINT64_C(0x0e8a46c1224f5a63), 80 | UINT64_C(0x81e2d7997211c1e8), UINT64_C(0xfb3aa75142244891), 81 | UINT64_C(0xb46ad37a8a3b6595), UINT64_C(0xceb2a3b2ba0eecec), 82 | UINT64_C(0x41da32eaea507767), UINT64_C(0x3b024222da65fe1e), 83 | UINT64_C(0xa2722586f2d042ee), UINT64_C(0xd8aa554ec2e5cb97), 84 | UINT64_C(0x57c2c41692bb501c), UINT64_C(0x2d1ab4dea28ed965), 85 | UINT64_C(0x624ac0f56a91f461), UINT64_C(0x1892b03d5aa47d18), 86 | UINT64_C(0x97fa21650afae693), UINT64_C(0xed2251ad3acf6fea), 87 | UINT64_C(0x095ac9329ac4bc9b), UINT64_C(0x7382b9faaaf135e2), 88 | UINT64_C(0xfcea28a2faafae69), UINT64_C(0x8632586aca9a2710), 89 | UINT64_C(0xc9622c4102850a14), UINT64_C(0xb3ba5c8932b0836d), 90 | UINT64_C(0x3cd2cdd162ee18e6), UINT64_C(0x460abd1952db919f), 91 | UINT64_C(0x256b24ca6b12f26d), UINT64_C(0x5fb354025b277b14), 92 | UINT64_C(0xd0dbc55a0b79e09f), UINT64_C(0xaa03b5923b4c69e6), 93 | UINT64_C(0xe553c1b9f35344e2), UINT64_C(0x9f8bb171c366cd9b), 94 | UINT64_C(0x10e3202993385610), UINT64_C(0x6a3b50e1a30ddf69), 95 | UINT64_C(0x8e43c87e03060c18), UINT64_C(0xf49bb8b633338561), 96 | UINT64_C(0x7bf329ee636d1eea), UINT64_C(0x012b592653589793), 97 | UINT64_C(0x4e7b2d0d9b47ba97), UINT64_C(0x34a35dc5ab7233ee), 98 | UINT64_C(0xbbcbcc9dfb2ca865), UINT64_C(0xc113bc55cb19211c), 99 | UINT64_C(0x5863dbf1e3ac9dec), UINT64_C(0x22bbab39d3991495), 100 | UINT64_C(0xadd33a6183c78f1e), UINT64_C(0xd70b4aa9b3f20667), 101 | UINT64_C(0x985b3e827bed2b63), UINT64_C(0xe2834e4a4bd8a21a), 102 | UINT64_C(0x6debdf121b863991), UINT64_C(0x1733afda2bb3b0e8), 103 | UINT64_C(0xf34b37458bb86399), UINT64_C(0x8993478dbb8deae0), 104 | UINT64_C(0x06fbd6d5ebd3716b), UINT64_C(0x7c23a61ddbe6f812), 105 | UINT64_C(0x3373d23613f9d516), UINT64_C(0x49aba2fe23cc5c6f), 106 | UINT64_C(0xc6c333a67392c7e4), UINT64_C(0xbc1b436e43a74e9d), 107 | UINT64_C(0x95ac9329ac4bc9b5), UINT64_C(0xef74e3e19c7e40cc), 108 | UINT64_C(0x601c72b9cc20db47), UINT64_C(0x1ac40271fc15523e), 109 | UINT64_C(0x5594765a340a7f3a), UINT64_C(0x2f4c0692043ff643), 110 | UINT64_C(0xa02497ca54616dc8), UINT64_C(0xdafce7026454e4b1), 111 | UINT64_C(0x3e847f9dc45f37c0), UINT64_C(0x445c0f55f46abeb9), 112 | UINT64_C(0xcb349e0da4342532), UINT64_C(0xb1eceec59401ac4b), 113 | UINT64_C(0xfebc9aee5c1e814f), UINT64_C(0x8464ea266c2b0836), 114 | UINT64_C(0x0b0c7b7e3c7593bd), UINT64_C(0x71d40bb60c401ac4), 115 | UINT64_C(0xe8a46c1224f5a634), UINT64_C(0x927c1cda14c02f4d), 116 | UINT64_C(0x1d148d82449eb4c6), UINT64_C(0x67ccfd4a74ab3dbf), 117 | UINT64_C(0x289c8961bcb410bb), UINT64_C(0x5244f9a98c8199c2), 118 | UINT64_C(0xdd2c68f1dcdf0249), UINT64_C(0xa7f41839ecea8b30), 119 | UINT64_C(0x438c80a64ce15841), UINT64_C(0x3954f06e7cd4d138), 120 | UINT64_C(0xb63c61362c8a4ab3), UINT64_C(0xcce411fe1cbfc3ca), 121 | UINT64_C(0x83b465d5d4a0eece), UINT64_C(0xf96c151de49567b7), 122 | UINT64_C(0x76048445b4cbfc3c), UINT64_C(0x0cdcf48d84fe7545), 123 | UINT64_C(0x6fbd6d5ebd3716b7), UINT64_C(0x15651d968d029fce), 124 | UINT64_C(0x9a0d8ccedd5c0445), UINT64_C(0xe0d5fc06ed698d3c), 125 | UINT64_C(0xaf85882d2576a038), UINT64_C(0xd55df8e515432941), 126 | UINT64_C(0x5a3569bd451db2ca), UINT64_C(0x20ed197575283bb3), 127 | UINT64_C(0xc49581ead523e8c2), UINT64_C(0xbe4df122e51661bb), 128 | UINT64_C(0x3125607ab548fa30), UINT64_C(0x4bfd10b2857d7349), 129 | UINT64_C(0x04ad64994d625e4d), UINT64_C(0x7e7514517d57d734), 130 | UINT64_C(0xf11d85092d094cbf), UINT64_C(0x8bc5f5c11d3cc5c6), 131 | UINT64_C(0x12b5926535897936), UINT64_C(0x686de2ad05bcf04f), 132 | UINT64_C(0xe70573f555e26bc4), UINT64_C(0x9ddd033d65d7e2bd), 133 | UINT64_C(0xd28d7716adc8cfb9), UINT64_C(0xa85507de9dfd46c0), 134 | UINT64_C(0x273d9686cda3dd4b), UINT64_C(0x5de5e64efd965432), 135 | UINT64_C(0xb99d7ed15d9d8743), UINT64_C(0xc3450e196da80e3a), 136 | UINT64_C(0x4c2d9f413df695b1), UINT64_C(0x36f5ef890dc31cc8), 137 | UINT64_C(0x79a59ba2c5dc31cc), UINT64_C(0x037deb6af5e9b8b5), 138 | UINT64_C(0x8c157a32a5b7233e), UINT64_C(0xf6cd0afa9582aa47), 139 | UINT64_C(0x4ad64994d625e4da), UINT64_C(0x300e395ce6106da3), 140 | UINT64_C(0xbf66a804b64ef628), UINT64_C(0xc5bed8cc867b7f51), 141 | UINT64_C(0x8aeeace74e645255), UINT64_C(0xf036dc2f7e51db2c), 142 | UINT64_C(0x7f5e4d772e0f40a7), UINT64_C(0x05863dbf1e3ac9de), 143 | UINT64_C(0xe1fea520be311aaf), UINT64_C(0x9b26d5e88e0493d6), 144 | UINT64_C(0x144e44b0de5a085d), UINT64_C(0x6e963478ee6f8124), 145 | UINT64_C(0x21c640532670ac20), UINT64_C(0x5b1e309b16452559), 146 | UINT64_C(0xd476a1c3461bbed2), UINT64_C(0xaeaed10b762e37ab), 147 | UINT64_C(0x37deb6af5e9b8b5b), UINT64_C(0x4d06c6676eae0222), 148 | UINT64_C(0xc26e573f3ef099a9), UINT64_C(0xb8b627f70ec510d0), 149 | UINT64_C(0xf7e653dcc6da3dd4), UINT64_C(0x8d3e2314f6efb4ad), 150 | UINT64_C(0x0256b24ca6b12f26), UINT64_C(0x788ec2849684a65f), 151 | UINT64_C(0x9cf65a1b368f752e), UINT64_C(0xe62e2ad306bafc57), 152 | UINT64_C(0x6946bb8b56e467dc), UINT64_C(0x139ecb4366d1eea5), 153 | UINT64_C(0x5ccebf68aecec3a1), UINT64_C(0x2616cfa09efb4ad8), 154 | UINT64_C(0xa97e5ef8cea5d153), UINT64_C(0xd3a62e30fe90582a), 155 | UINT64_C(0xb0c7b7e3c7593bd8), UINT64_C(0xca1fc72bf76cb2a1), 156 | UINT64_C(0x45775673a732292a), UINT64_C(0x3faf26bb9707a053), 157 | UINT64_C(0x70ff52905f188d57), UINT64_C(0x0a2722586f2d042e), 158 | UINT64_C(0x854fb3003f739fa5), UINT64_C(0xff97c3c80f4616dc), 159 | UINT64_C(0x1bef5b57af4dc5ad), UINT64_C(0x61372b9f9f784cd4), 160 | UINT64_C(0xee5fbac7cf26d75f), UINT64_C(0x9487ca0fff135e26), 161 | UINT64_C(0xdbd7be24370c7322), UINT64_C(0xa10fceec0739fa5b), 162 | UINT64_C(0x2e675fb4576761d0), UINT64_C(0x54bf2f7c6752e8a9), 163 | UINT64_C(0xcdcf48d84fe75459), UINT64_C(0xb71738107fd2dd20), 164 | UINT64_C(0x387fa9482f8c46ab), UINT64_C(0x42a7d9801fb9cfd2), 165 | UINT64_C(0x0df7adabd7a6e2d6), UINT64_C(0x772fdd63e7936baf), 166 | UINT64_C(0xf8474c3bb7cdf024), UINT64_C(0x829f3cf387f8795d), 167 | UINT64_C(0x66e7a46c27f3aa2c), UINT64_C(0x1c3fd4a417c62355), 168 | UINT64_C(0x935745fc4798b8de), UINT64_C(0xe98f353477ad31a7), 169 | UINT64_C(0xa6df411fbfb21ca3), UINT64_C(0xdc0731d78f8795da), 170 | UINT64_C(0x536fa08fdfd90e51), UINT64_C(0x29b7d047efec8728), 171 | }; 172 | 173 | uint64_t crc64(uint64_t crc, const unsigned char *s, uint64_t l) { 174 | uint64_t j; 175 | 176 | for (j = 0; j < l; j++) { 177 | uint8_t byte = s[j]; 178 | crc = crc64_tab[(uint8_t)crc ^ byte] ^ (crc >> 8); 179 | } 180 | return crc; 181 | } 182 | -------------------------------------------------------------------------------- /python/api_Context.cpp: -------------------------------------------------------------------------------- 1 | #include "api.h" 2 | #include "Context.h" 3 | using namespace VkInline; 4 | #include 5 | #include 6 | 7 | typedef std::vector StrArray; 8 | typedef std::vector PtrArray; 9 | typedef std::vector Tex2DArray; 10 | typedef std::vector Tex3DArray; 11 | typedef std::vector CubemapArray; 12 | 13 | int n_vkinline_try_init() 14 | { 15 | return TryInit() ? 1 : 0; 16 | } 17 | 18 | void n_set_verbose(unsigned verbose) 19 | { 20 | SetVerbose(verbose != 0); 21 | } 22 | 23 | unsigned long long n_size_of(const char* cls) 24 | { 25 | return SizeOf(cls); 26 | } 27 | 28 | void n_add_built_in_header(const char* filename, const char* filecontent) 29 | { 30 | AddBuiltInHeader(filename, filecontent); 31 | } 32 | 33 | void n_add_inlcude_filename(const char* fn) 34 | { 35 | AddInlcudeFilename(fn); 36 | } 37 | 38 | void n_add_code_block(const char* line) 39 | { 40 | AddCodeBlock(line); 41 | } 42 | 43 | void n_wait() 44 | { 45 | Wait(); 46 | } 47 | 48 | void* n_computer_create(void* ptr_param_list, const char* body, unsigned type_locked) 49 | { 50 | StrArray* param_list = (StrArray*)ptr_param_list; 51 | size_t num_params = param_list->size(); 52 | std::vector params(num_params); 53 | for (size_t i = 0; i < num_params; i++) 54 | params[i] = (*param_list)[i].c_str(); 55 | return new Computer(params, body, type_locked!=0); 56 | } 57 | 58 | void n_computer_destroy(void* cptr) 59 | { 60 | delete (Computer*)cptr; 61 | } 62 | 63 | int n_computer_num_params(void* cptr) 64 | { 65 | Computer* kernel = (Computer*)cptr; 66 | return (int)kernel->num_params(); 67 | } 68 | 69 | 70 | int n_computer_launch(void* ptr_kernel, void* ptr_gridDim, void* ptr_blockDim, void* ptr_arg_list, 71 | void* ptr_tex2d_list, void* ptr_tex3d_list, void* ptr_cubemap_list, unsigned times_submission) 72 | { 73 | Computer* kernel = (Computer*)ptr_kernel; 74 | size_t num_params = kernel->num_params(); 75 | 76 | dim_type* gridDim = (dim_type*)ptr_gridDim; 77 | dim_type* blockDim = (dim_type*)ptr_blockDim; 78 | 79 | PtrArray* arg_list = (PtrArray*)ptr_arg_list; 80 | Tex2DArray* tex2d_list = (Tex2DArray*)ptr_tex2d_list; 81 | Tex3DArray* tex3d_list = (Tex3DArray*)ptr_tex3d_list; 82 | CubemapArray* cubemap_list = (CubemapArray*)ptr_cubemap_list; 83 | 84 | size_t size = arg_list->size(); 85 | if (num_params != size) 86 | { 87 | printf("Wrong number of arguments received. %d required, %d received.", (int)num_params, (int)size); 88 | return -1; 89 | } 90 | 91 | if (kernel->launch(*gridDim, *blockDim, arg_list->data(), *tex2d_list, *tex3d_list, *cubemap_list, times_submission)) 92 | return 0; 93 | else 94 | return -1; 95 | } 96 | 97 | void* n_drawcall_create(const char* code_body_vert, const char* code_body_frag) 98 | { 99 | return new DrawCall(code_body_vert, code_body_frag); 100 | } 101 | 102 | void n_drawcall_destroy(void* cptr) 103 | { 104 | delete (DrawCall*)cptr; 105 | } 106 | 107 | void n_drawcall_set_primitive_topology(void* cptr, unsigned topo) 108 | { 109 | DrawCall* dc = (DrawCall*)cptr; 110 | dc->set_primitive_topology(topo); 111 | } 112 | 113 | void n_drawcall_set_primitive_restart(void* cptr, unsigned enable) 114 | { 115 | DrawCall* dc = (DrawCall*)cptr; 116 | dc->set_primitive_restart(enable!=0); 117 | } 118 | 119 | void n_drawcall_set_polygon_mode(void* cptr, unsigned mode) 120 | { 121 | DrawCall* dc = (DrawCall*)cptr; 122 | dc->set_polygon_mode(mode); 123 | } 124 | 125 | void n_drawcall_set_cull_mode(void* cptr, unsigned mode) 126 | { 127 | DrawCall* dc = (DrawCall*)cptr; 128 | dc->set_cull_mode(mode); 129 | } 130 | 131 | void n_drawcall_set_front_face(void* cptr, unsigned mode) 132 | { 133 | DrawCall* dc = (DrawCall*)cptr; 134 | dc->set_front_face(mode); 135 | } 136 | 137 | void n_drawcall_set_line_width(void* cptr, float width) 138 | { 139 | DrawCall* dc = (DrawCall*)cptr; 140 | dc->set_line_width(width); 141 | } 142 | 143 | void n_drawcall_set_depth_enable(void* cptr, unsigned enable) 144 | { 145 | DrawCall* dc = (DrawCall*)cptr; 146 | dc->set_depth_enable(enable != 0); 147 | } 148 | 149 | void n_drawcall_set_depth_write(void* cptr, unsigned enable) 150 | { 151 | DrawCall* dc = (DrawCall*)cptr; 152 | dc->set_depth_write(enable != 0); 153 | } 154 | 155 | void n_drawcall_set_depth_compare_op(void* cptr, unsigned op) 156 | { 157 | DrawCall* dc = (DrawCall*)cptr; 158 | dc->set_depth_comapre_op(op); 159 | } 160 | 161 | void n_drawcall_set_color_write(void* cptr, unsigned enable) 162 | { 163 | DrawCall* dc = (DrawCall*)cptr; 164 | dc->set_color_write(enable != 0); 165 | } 166 | 167 | void n_drawcall_set_color_write_r(void* cptr, unsigned enable) 168 | { 169 | DrawCall* dc = (DrawCall*)cptr; 170 | dc->set_color_write_r(enable != 0); 171 | } 172 | 173 | void n_drawcall_set_color_write_g(void* cptr, unsigned enable) 174 | { 175 | DrawCall* dc = (DrawCall*)cptr; 176 | dc->set_color_write_g(enable != 0); 177 | } 178 | 179 | void n_drawcall_set_color_write_b(void* cptr, unsigned enable) 180 | { 181 | DrawCall* dc = (DrawCall*)cptr; 182 | dc->set_color_write_b(enable != 0); 183 | } 184 | 185 | void n_drawcall_set_alpha_write(void* cptr, unsigned enable) 186 | { 187 | DrawCall* dc = (DrawCall*)cptr; 188 | dc->set_alpha_write(enable != 0); 189 | } 190 | 191 | void n_drawcall_set_blend_enable(void* cptr, unsigned enable) 192 | { 193 | DrawCall* dc = (DrawCall*)cptr; 194 | dc->set_blend_enable(enable != 0); 195 | } 196 | 197 | void n_drawcall_set_src_color_blend_factor(void* cptr, unsigned factor) 198 | { 199 | DrawCall* dc = (DrawCall*)cptr; 200 | dc->set_src_color_blend_factor(factor); 201 | } 202 | 203 | void n_drawcall_set_dst_color_blend_factor(void* cptr, unsigned factor) 204 | { 205 | DrawCall* dc = (DrawCall*)cptr; 206 | dc->set_dst_color_blend_factor(factor); 207 | } 208 | 209 | void n_drawcall_set_color_blend_op(void* cptr, unsigned op) 210 | { 211 | DrawCall* dc = (DrawCall*)cptr; 212 | dc->set_color_blend_op(op); 213 | } 214 | 215 | void n_drawcall_set_src_alpha_blend_factor(void* cptr, unsigned factor) 216 | { 217 | DrawCall* dc = (DrawCall*)cptr; 218 | dc->set_src_alpha_blend_factor(factor); 219 | } 220 | 221 | void n_drawcall_set_dst_alpha_blend_factor(void* cptr, unsigned factor) 222 | { 223 | DrawCall* dc = (DrawCall*)cptr; 224 | dc->set_dst_alpha_blend_factor(factor); 225 | } 226 | 227 | void n_drawcall_set_alpha_blend_op(void* cptr, unsigned op) 228 | { 229 | DrawCall* dc = (DrawCall*)cptr; 230 | dc->set_alpha_blend_op(op); 231 | } 232 | 233 | void n_drawcall_set_blend_constants(void* cptr, float r, float g, float b, float a) 234 | { 235 | DrawCall* dc = (DrawCall*)cptr; 236 | dc->set_blend_constants(r, g, b, a); 237 | } 238 | 239 | void n_drawcall_set_ith_color_write(void* cptr, int i, unsigned enable) 240 | { 241 | DrawCall* dc = (DrawCall*)cptr; 242 | dc->set_ith_color_write(i, enable != 0); 243 | } 244 | 245 | void n_drawcall_set_ith_color_write_r(void* cptr, int i, unsigned enable) 246 | { 247 | DrawCall* dc = (DrawCall*)cptr; 248 | dc->set_ith_color_write_r(i, enable != 0); 249 | } 250 | 251 | void n_drawcall_set_ith_color_write_g(void* cptr, int i, unsigned enable) 252 | { 253 | DrawCall* dc = (DrawCall*)cptr; 254 | dc->set_ith_color_write_g(i, enable != 0); 255 | } 256 | 257 | void n_drawcall_set_ith_color_write_b(void* cptr, int i, unsigned enable) 258 | { 259 | DrawCall* dc = (DrawCall*)cptr; 260 | dc->set_ith_color_write_b(i, enable != 0); 261 | } 262 | 263 | void n_drawcall_set_ith_alpha_write(void* cptr, int i, unsigned enable) 264 | { 265 | DrawCall* dc = (DrawCall*)cptr; 266 | dc->set_ith_alpha_write(i, enable != 0); 267 | } 268 | 269 | void n_drawcall_set_ith_blend_enable(void* cptr, int i, unsigned enable) 270 | { 271 | DrawCall* dc = (DrawCall*)cptr; 272 | dc->set_ith_blend_enable(i, enable != 0); 273 | } 274 | 275 | void n_drawcall_set_ith_src_color_blend_factor(void* cptr, int i, unsigned factor) 276 | { 277 | DrawCall* dc = (DrawCall*)cptr; 278 | dc->set_ith_src_color_blend_factor(i, factor); 279 | } 280 | 281 | void n_drawcall_set_ith_dst_color_blend_factor(void* cptr, int i, unsigned factor) 282 | { 283 | DrawCall* dc = (DrawCall*)cptr; 284 | dc->set_ith_dst_color_blend_factor(i, factor); 285 | } 286 | 287 | void n_drawcall_set_ith_color_blend_op(void* cptr, int i, unsigned op) 288 | { 289 | DrawCall* dc = (DrawCall*)cptr; 290 | dc->set_ith_color_blend_op(i, op); 291 | } 292 | 293 | void n_drawcall_set_ith_src_alpha_blend_factor(void* cptr, int i, unsigned factor) 294 | { 295 | DrawCall* dc = (DrawCall*)cptr; 296 | dc->set_ith_src_alpha_blend_factor(i, factor); 297 | } 298 | 299 | void n_drawcall_set_ith_dst_alpha_blend_factor(void* cptr, int i, unsigned factor) 300 | { 301 | DrawCall* dc = (DrawCall*)cptr; 302 | dc->set_ith_dst_alpha_blend_factor(i, factor); 303 | } 304 | 305 | void n_drawcall_set_ith_alpha_blend_op(void* cptr, int i, unsigned op) 306 | { 307 | DrawCall* dc = (DrawCall*)cptr; 308 | dc->set_ith_alpha_blend_op(i, op); 309 | } 310 | 311 | void* n_rasterizer_create(void* ptr_param_list, unsigned type_locked) 312 | { 313 | StrArray* param_list = (StrArray*)ptr_param_list; 314 | size_t num_params = param_list->size(); 315 | std::vector params(num_params); 316 | for (size_t i = 0; i < num_params; i++) 317 | { 318 | params[i] = (*param_list)[i].c_str(); 319 | } 320 | return new Rasterizer(params, type_locked); 321 | } 322 | 323 | void n_rasterizer_destroy(void* cptr) 324 | { 325 | delete (Rasterizer*)cptr; 326 | } 327 | 328 | int n_rasterizer_num_params(void* cptr) 329 | { 330 | Rasterizer* rasterizer = (Rasterizer*)cptr; 331 | return (int)rasterizer->num_params(); 332 | } 333 | 334 | void n_rasterizer_set_clear_color_buf(void* cptr, int i, unsigned clear) 335 | { 336 | Rasterizer* rasterizer = (Rasterizer*)cptr; 337 | rasterizer->set_clear_color_buf(i, clear != 0); 338 | } 339 | 340 | void n_rasterizer_set_clear_depth_buf(void* cptr, unsigned clear) 341 | { 342 | Rasterizer* rasterizer = (Rasterizer*)cptr; 343 | rasterizer->set_clear_depth_buf(clear != 0); 344 | } 345 | 346 | void n_rasterizer_add_draw_call(void* cptr, void* draw_call) 347 | { 348 | Rasterizer* rasterizer = (Rasterizer*)cptr; 349 | rasterizer->add_draw_call((DrawCall*)draw_call); 350 | } 351 | 352 | int n_rasterizer_launch(void* cptr, void* ptr_colorBufs, void* _depthBuf, void* ptr_resolveBufs, 353 | float* clear_colors, float clear_depth, void* ptr_arg_list, void* ptr_tex2d_list, void* ptr_tex3d_list, void* ptr_cubemap_list, 354 | void** ptr_launch_params, unsigned times_submission) 355 | { 356 | Rasterizer* rasterizer = (Rasterizer*)cptr; 357 | Tex2DArray* colorBufs = (Tex2DArray*)ptr_colorBufs; 358 | Texture2D* depthBuf = (Texture2D*)_depthBuf; 359 | Tex2DArray* resolveBufs = (Tex2DArray*)ptr_resolveBufs; 360 | PtrArray* arg_list = (PtrArray*)ptr_arg_list; 361 | Tex2DArray* tex2d_list = (Tex2DArray*)ptr_tex2d_list; 362 | Tex3DArray* tex3d_list = (Tex3DArray*)ptr_tex3d_list; 363 | CubemapArray* cubemap_list = (CubemapArray*)ptr_cubemap_list; 364 | Rasterizer::LaunchParam** launch_params = (Rasterizer::LaunchParam**)ptr_launch_params; 365 | 366 | if (rasterizer->launch(*colorBufs, depthBuf, *resolveBufs, clear_colors, clear_depth, arg_list->data(), *tex2d_list, *tex3d_list, *cubemap_list, launch_params, times_submission)) 367 | return 0; 368 | else 369 | return -1; 370 | } 371 | 372 | 373 | #ifdef _VkInlineEX 374 | #include "api_Context_ex.inl" 375 | #endif 376 | 377 | 378 | -------------------------------------------------------------------------------- /python/VkInline/cffi_build.py: -------------------------------------------------------------------------------- 1 | import os 2 | if os.path.exists('VkInline/cffi.py'): 3 | os.remove('VkInline/cffi.py') 4 | 5 | import cffi 6 | ffibuilder = cffi.FFI() 7 | ffibuilder.set_source("VkInline.cffi", None) 8 | 9 | ffibuilder.cdef(""" 10 | // utils 11 | void* n_string_array_create(unsigned long long size, const char* const* strs); 12 | unsigned long long n_string_array_size(void* ptr_arr); 13 | void n_string_array_destroy(void* ptr_arr); 14 | void* n_pointer_array_create(unsigned long long size, const void* const* ptrs); 15 | unsigned long long n_pointer_array_size(void* ptr_arr); 16 | void n_pointer_array_destroy(void* ptr_arr); 17 | void* n_dim3_create(unsigned x, unsigned y, unsigned z); 18 | void n_dim3_destroy(void* cptr); 19 | void* n_launch_param_from_count(unsigned count); 20 | void* n_launch_param_from_buffer(void* buf); 21 | void n_launch_param_destroy(void* lp); 22 | 23 | // Context 24 | int n_vkinline_try_init(); 25 | void n_set_verbose(unsigned verbose); 26 | unsigned long long n_size_of(const char* cls); 27 | void n_add_built_in_header(const char* filename, const char* filecontent); 28 | void n_add_inlcude_filename(const char* fn); 29 | void n_add_code_block(const char* line); 30 | void n_wait(); 31 | 32 | void* n_computer_create(void* ptr_param_list, const char* body, unsigned type_locked); 33 | void n_computer_destroy(void* cptr); 34 | int n_computer_num_params(void* cptr); 35 | int n_computer_launch(void* ptr_kernel, void* ptr_gridDim, void* ptr_blockDim, void* ptr_arg_list, 36 | void* ptr_tex2d_list, void* ptr_tex3d_list, void* ptr_cubemap_list, unsigned times_submission); 37 | 38 | void* n_drawcall_create(const char* code_body_vert, const char* code_body_frag); 39 | void n_drawcall_destroy(void* cptr); 40 | 41 | void n_drawcall_set_primitive_topology(void* cptr, unsigned topo); 42 | void n_drawcall_set_primitive_restart(void* cptr, unsigned enable); 43 | void n_drawcall_set_polygon_mode(void* cptr, unsigned mode); 44 | void n_drawcall_set_cull_mode(void* cptr, unsigned mode); 45 | void n_drawcall_set_front_face(void* cptr, unsigned mode); 46 | void n_drawcall_set_line_width(void* cptr, float width); 47 | void n_drawcall_set_depth_enable(void* cptr, unsigned enable); 48 | void n_drawcall_set_depth_write(void* cptr, unsigned enable); 49 | void n_drawcall_set_depth_compare_op(void* cptr, unsigned op); 50 | 51 | void n_drawcall_set_color_write(void* cptr, unsigned enable); 52 | void n_drawcall_set_color_write_r(void* cptr, unsigned enable); 53 | void n_drawcall_set_color_write_g(void* cptr, unsigned enable); 54 | void n_drawcall_set_color_write_b(void* cptr, unsigned enable); 55 | void n_drawcall_set_alpha_write(void* cptr, unsigned enable); 56 | void n_drawcall_set_blend_enable(void* cptr, unsigned enable); 57 | void n_drawcall_set_src_color_blend_factor(void* cptr, unsigned factor); 58 | void n_drawcall_set_dst_color_blend_factor(void* cptr, unsigned factor); 59 | void n_drawcall_set_color_blend_op(void* cptr, unsigned op); 60 | void n_drawcall_set_src_alpha_blend_factor(void* cptr, unsigned factor); 61 | void n_drawcall_set_dst_alpha_blend_factor(void* cptr, unsigned factor); 62 | void n_drawcall_set_alpha_blend_op(void* cptr, unsigned op); 63 | 64 | void n_drawcall_set_blend_constants(void* cptr, float r, float g, float b, float a); 65 | 66 | void n_drawcall_set_ith_color_write(void* cptr, int i, unsigned enable); 67 | void n_drawcall_set_ith_color_write_r(void* cptr, int i, unsigned enable); 68 | void n_drawcall_set_ith_color_write_g(void* cptr, int i, unsigned enable); 69 | void n_drawcall_set_ith_color_write_b(void* cptr, int i, unsigned enable); 70 | void n_drawcall_set_ith_alpha_write(void* cptr, int i, unsigned enable); 71 | void n_drawcall_set_ith_blend_enable(void* cptr, int i, unsigned enable); 72 | void n_drawcall_set_ith_src_color_blend_factor(void* cptr, int i, unsigned factor); 73 | void n_drawcall_set_ith_dst_color_blend_factor(void* cptr, int i, unsigned factor); 74 | void n_drawcall_set_ith_color_blend_op(void* cptr, int i, unsigned op); 75 | void n_drawcall_set_ith_src_alpha_blend_factor(void* cptr, int i, unsigned factor); 76 | void n_drawcall_set_ith_dst_alpha_blend_factor(void* cptr, int i, unsigned factor); 77 | void n_drawcall_set_ith_alpha_blend_op(void* cptr, int i, unsigned op); 78 | 79 | void* n_rasterizer_create(void* ptr_param_list, unsigned type_locked); 80 | void n_rasterizer_destroy(void* cptr); 81 | int n_rasterizer_num_params(void* cptr); 82 | void n_rasterizer_set_clear_color_buf(void* cptr, int i, unsigned clear); 83 | void n_rasterizer_set_clear_depth_buf(void* cptr, unsigned clear); 84 | void n_rasterizer_add_draw_call(void* cptr, void* draw_call); 85 | int n_rasterizer_launch(void* cptr, void* ptr_colorBufs, void* _depthBuf, void* ptr_resolveBufs, 86 | float* clear_colors, float clear_depth, void* ptr_arg_list, void* ptr_tex2d_list, void* ptr_tex3d_list, void* ptr_cubemap_list, 87 | void** ptr_launch_params, unsigned times_submission); 88 | 89 | // ShaderViewable 90 | const char* n_sv_name_view_type(void* cptr); 91 | void n_sv_destroy(void* cptr); 92 | void* n_svint32_create(int v); 93 | int n_svint32_value(void* cptr); 94 | void* n_svuint32_create(unsigned v); 95 | unsigned n_svuint32_value(void* cptr); 96 | void* n_svfloat_create(float v); 97 | float n_svfloat_value(void* cptr); 98 | void* n_svdouble_create(double v); 99 | double n_svdouble_value(void* cptr); 100 | 101 | void* n_svivec2_create(const int* v); 102 | void n_svivec2_value(void* cptr, int* v); 103 | void* n_svivec3_create(const int* v); 104 | void n_svivec3_value(void* cptr, int* v); 105 | void* n_svivec4_create(const int* v); 106 | void n_svivec4_value(void* cptr, int* v); 107 | 108 | void* n_svuvec2_create(const unsigned* v); 109 | void n_svuvec2_value(void* cptr, unsigned* v); 110 | void* n_svuvec3_create(const unsigned* v); 111 | void n_svuvec3_value(void* cptr, unsigned* v); 112 | void* n_svuvec4_create(const unsigned* v); 113 | void n_svuvec4_value(void* cptr, unsigned* v); 114 | 115 | void* n_svvec2_create(const float* v); 116 | void n_svvec2_value(void* cptr, float* v); 117 | void* n_svvec3_create(const float* v); 118 | void n_svvec3_value(void* cptr, float* v); 119 | void* n_svvec4_create(const float* v); 120 | void n_svvec4_value(void* cptr, float* v); 121 | 122 | void* n_svdvec2_create(const double* v); 123 | void n_svdvec2_value(void* cptr, double* v); 124 | void* n_svdvec3_create(const double* v); 125 | void n_svdvec3_value(void* cptr, double* v); 126 | void* n_svdvec4_create(const double* v); 127 | void n_svdvec4_value(void* cptr, double* v); 128 | 129 | void* n_svmat2x2_create(const float* v); 130 | void n_svmat2x2_value(void* cptr, float* v); 131 | void* n_svmat2x3_create(const float* v); 132 | void n_svmat2x3_value(void* cptr, float* v); 133 | void* n_svmat2x4_create(const float* v); 134 | void n_svmat2x4_value(void* cptr, float* v); 135 | void* n_svmat3x2_create(const float* v); 136 | void n_svmat3x2_value(void* cptr, float* v); 137 | void* n_svmat3x3_create(const float* v); 138 | void n_svmat3x3_value(void* cptr, float* v); 139 | void* n_svmat3x4_create(const float* v); 140 | void n_svmat3x4_value(void* cptr, float* v); 141 | void* n_svmat4x2_create(const float* v); 142 | void n_svmat4x2_value(void* cptr, float* v); 143 | void* n_svmat4x3_create(const float* v); 144 | void n_svmat4x3_value(void* cptr, float* v); 145 | void* n_svmat4x4_create(const float* v); 146 | void n_svmat4x4_value(void* cptr, float* v); 147 | 148 | void* n_svdmat2x2_create(const double* v); 149 | void n_svdmat2x2_value(void* cptr, double* v); 150 | void* n_svdmat2x3_create(const double* v); 151 | void n_svdmat2x3_value(void* cptr, double* v); 152 | void* n_svdmat2x4_create(const double* v); 153 | void n_svdmat2x4_value(void* cptr, double* v); 154 | void* n_svdmat3x2_create(const double* v); 155 | void n_svdmat3x2_value(void* cptr, double* v); 156 | void* n_svdmat3x3_create(const double* v); 157 | void n_svdmat3x3_value(void* cptr, double* v); 158 | void* n_svdmat3x4_create(const double* v); 159 | void n_svdmat3x4_value(void* cptr, double* v); 160 | void* n_svdmat4x2_create(const double* v); 161 | void n_svdmat4x2_value(void* cptr, double* v); 162 | void* n_svdmat4x3_create(const double* v); 163 | void n_svdmat4x3_value(void* cptr, double* v); 164 | void* n_svdmat4x4_create(const double* v); 165 | void n_svdmat4x4_value(void* cptr, double* v); 166 | 167 | // SVBuffer 168 | void* n_svbuffer_create(const char* elem_type, unsigned long long size, void* hdata); 169 | const char* n_svbuffer_name_elem_type(void* cptr); 170 | unsigned long long n_svbuffer_elem_size(void* cptr); 171 | unsigned long long n_svbuffer_size(void* cptr); 172 | void n_svbuffer_from_host(void* cptr, void* hdata); 173 | void n_svbuffer_to_host(void* cptr, void* hdata, unsigned long long begin, unsigned long long end); 174 | 175 | // SVCombine 176 | void* n_svcombine_create(void* ptr_svs, void* ptr_names, const char* operations); 177 | 178 | // SVObjBuffer 179 | void* n_svobjbuffer_create(void* ptr_svs); 180 | const char* n_svobjbuffer_name_elem_type(void* cptr); 181 | unsigned long long n_svobjbuffer_elem_size(void* cptr); 182 | unsigned long long n_svobjbuffer_size(void* cptr); 183 | void n_svobjbuffer_update(void* cptr); 184 | 185 | // Texture2D 186 | void* n_texture2d_create(int width, int height, unsigned vkformat, unsigned isDepth, unsigned isStencil, unsigned sampleCount); 187 | void n_texture2d_release(void* tex2d); 188 | int n_texture2d_width(void* tex2d); 189 | int n_texture2d_height(void* tex2d); 190 | unsigned n_texture2d_pixelsize(void* tex2d); 191 | unsigned n_texture2d_channelcount(void* tex2d); 192 | unsigned n_texture2d_samplecount(void* tex2d); 193 | unsigned n_texture2d_vkformat(void* tex2d); 194 | void n_texture2d_upload(void* tex2d, void* hdata); 195 | void n_texture2d_download(void* tex2d, void* hdata); 196 | 197 | // Texture3D 198 | void* n_texture3d_create(int dimX, int dimY, int dimZ, unsigned vkformat); 199 | void n_texture3d_release(void* tex3d); 200 | int n_texture3d_dimX(void* tex3d); 201 | int n_texture3d_dimY(void* tex3d); 202 | int n_texture3d_dimZ(void* tex3d); 203 | unsigned n_texture3d_pixelsize(void* tex3d); 204 | unsigned n_texture3d_channelcount(void* tex3d); 205 | unsigned n_texture3d_vkformat(void* tex3d); 206 | void n_texture3d_upload(void* tex3d, void* hdata); 207 | void n_texture3d_download(void* tex3d, void* hdata); 208 | 209 | // Cubemap 210 | void* n_cubemap_create(int width, int height, unsigned vkformat); 211 | void n_cubemap_release(void* cubemap); 212 | int n_cubemap_width(void* cubemap); 213 | int n_cubemap_height(void* cubemap); 214 | unsigned n_cubemap_pixelsize(void* cubemap); 215 | unsigned n_cubemap_channelcount(void* cubemap); 216 | unsigned n_cubemap_vkformat(void* cubemap); 217 | void n_cubemap_upload(void* cubemap, void* hdata); 218 | void n_cubemap_download(void* cubemap, void* hdata); 219 | 220 | // Extensions 221 | void* n_blas_create_triangles(void* indBuf, void* posBuf); 222 | void* n_blas_create_procedure(void* aabbBuf); 223 | void n_blas_destroy(void* ptr_blas); 224 | void* n_mat4_create(const float* v); 225 | void n_mat4_destroy(void* ptr); 226 | void* n_tlas_create(void* ptr_blases, void* ptr_transes); 227 | void n_tlas_destroy(void* ptr); 228 | void* n_hit_shaders_create(const char* closest_hit, const char* intersection); 229 | void n_hit_shaders_destroy(void* ptr); 230 | void* n_raytracer_create(void* ptr_param_list, const char* body_raygen, void* ptr_body_miss, void* ptr_body_hit, unsigned maxRecursionDepth, unsigned type_locked); 231 | void n_raytracer_destroy(void* cptr); 232 | int n_raytracer_num_params(void* cptr); 233 | int n_raytracer_launch(void* ptr_raytracer, void* ptr_glbDim, void* ptr_arg_list, void* ptr_tlas_list, void* ptr_tex2d_list, void* ptr_tex3d_list, void* ptr_cubemap_list, unsigned times_submission); 234 | """) 235 | 236 | 237 | ffibuilder.compile() 238 | 239 | -------------------------------------------------------------------------------- /python/VkInline/Context.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .Native import ffi, native 3 | from .ShaderViewable import * 4 | from .utils import * 5 | 6 | def Set_Verbose(verbose=True): 7 | native.n_set_verbose(verbose) 8 | 9 | def Size_Of(clsname): 10 | return native.n_size_of(clsname.encode('utf-8')) 11 | 12 | def Add_Built_In_Header(filename, filecontent): 13 | native.n_add_built_in_header(filename.encode('utf-8'), filecontent.encode('utf-8')) 14 | 15 | def Add_Inlcude_Filename(filename): 16 | native.n_add_inlcude_filename(filename.encode('utf-8')) 17 | 18 | def Add_Code_Block(code): 19 | native.n_add_code_block(code.encode('utf-8')) 20 | 21 | def Wait(): 22 | native.n_wait() 23 | 24 | class Computer: 25 | def __init__(self, param_names, body, type_locked=False): 26 | o_param_names = StrArray(param_names) 27 | self.m_cptr = native.n_computer_create(o_param_names.m_cptr, body.encode('utf-8'), type_locked) 28 | 29 | def __del__(self): 30 | native.n_computer_destroy(self.m_cptr) 31 | 32 | def num_params(self): 33 | return native.n_computer_num_params(self.m_cptr) 34 | 35 | def launch(self, gridDim, blockDim, args, tex2ds=[], tex3ds=[], cubemaps=[], times_submission = 1): 36 | d_gridDim = Dim3(gridDim) 37 | d_blockDim = Dim3(blockDim) 38 | arg_list = ObjArray(args) 39 | tex2d_list = ObjArray(tex2ds) 40 | tex3d_list = ObjArray(tex3ds) 41 | cubemap_list = ObjArray(cubemaps) 42 | native.n_computer_launch( 43 | self.m_cptr, 44 | d_gridDim.m_cptr, 45 | d_blockDim.m_cptr, 46 | arg_list.m_cptr, 47 | tex2d_list.m_cptr, 48 | tex3d_list.m_cptr, 49 | cubemap_list.m_cptr, 50 | times_submission) 51 | 52 | class For: 53 | def __init__(self, param_names, name_inner, body, block_size=128, type_locked=False): 54 | self.block_size = block_size 55 | param_names = param_names + ['_begin', '_end'] 56 | o_param_names = StrArray(param_names) 57 | whole_body = body + ''' 58 | void main() 59 | {{ 60 | uint id = gl_GlobalInvocationID.x + _begin; 61 | if(id>=_end) return; 62 | {0}(id); 63 | }} 64 | '''.format(name_inner) 65 | self.m_cptr = native.n_computer_create(o_param_names.m_cptr, whole_body.encode('utf-8'), type_locked) 66 | 67 | def __del__(self): 68 | native.n_computer_destroy(self.m_cptr) 69 | 70 | def num_params(self): 71 | return native.n_computer_num_params(self.m_cptr) - 2 72 | 73 | def launch(self, begin, end, args, tex2ds=[], tex3ds=[], cubemaps=[], times_submission = 1): 74 | svbegin = SVUInt32(begin) 75 | svend = SVUInt32(end) 76 | args = args + [svbegin, svend] 77 | numBlocks = int((end - begin + self.block_size - 1) / self.block_size) 78 | d_gridDim = Dim3(numBlocks) 79 | d_blockDim = Dim3(self.block_size) 80 | arg_list = ObjArray(args) 81 | tex2d_list = ObjArray(tex2ds) 82 | tex3d_list = ObjArray(tex3ds) 83 | cubemap_list = ObjArray(cubemaps) 84 | native.n_computer_launch( 85 | self.m_cptr, 86 | d_gridDim.m_cptr, 87 | d_blockDim.m_cptr, 88 | arg_list.m_cptr, 89 | tex2d_list.m_cptr, 90 | tex3d_list.m_cptr, 91 | cubemap_list.m_cptr, 92 | times_submission) 93 | 94 | def launch_n(self, n, args, tex2ds=[], tex3ds=[], cubemaps=[], times_submission = 1): 95 | svbegin = SVUInt32(0) 96 | svend = SVUInt32(n) 97 | args = args + [svbegin, svend] 98 | numBlocks = int((n + self.block_size - 1) / self.block_size) 99 | d_gridDim = Dim3(numBlocks) 100 | d_blockDim = Dim3(self.block_size) 101 | arg_list = ObjArray(args) 102 | tex2d_list = ObjArray(tex2ds) 103 | tex3d_list = ObjArray(tex3ds) 104 | cubemap_list = ObjArray(cubemaps) 105 | native.n_computer_launch( 106 | self.m_cptr, 107 | d_gridDim.m_cptr, 108 | d_blockDim.m_cptr, 109 | arg_list.m_cptr, 110 | tex2d_list.m_cptr, 111 | tex3d_list.m_cptr, 112 | cubemap_list.m_cptr, 113 | times_submission) 114 | 115 | class DrawCall: 116 | def __init__(self, code_body_vert, code_body_frag, options={}): 117 | self.m_cptr = native.n_drawcall_create(code_body_vert.encode('utf-8'), code_body_frag.encode('utf-8')) 118 | 119 | if 'primitive_topology' in options: 120 | native.n_drawcall_set_primitive_topology(self.m_cptr, options['primitive_topology']) 121 | 122 | if 'primitive_restart' in options: 123 | native.n_drawcall_set_primitive_restart(self.m_cptr, options['primitive_restart']) 124 | 125 | if 'polygon_mode' in options: 126 | native.n_drawcall_set_polygon_mode(self.m_cptr, options['polygon_mode']) 127 | 128 | if 'cull_mode' in options: 129 | native.n_drawcall_set_cull_mode(self.m_cptr, options['cull_mode']) 130 | 131 | if 'front_face' in options: 132 | native.n_drawcall_set_front_face(self.m_cptr, options['front_face']) 133 | 134 | if 'line_width' in options: 135 | native.n_drawcall_set_line_width(self.m_cptr, options['line_width']) 136 | 137 | if 'depth_enable' in options: 138 | native.n_drawcall_set_depth_enable(self.m_cptr, options['depth_enable']) 139 | 140 | if 'depth_write' in options: 141 | native.n_drawcall_set_depth_write(self.m_cptr, options['depth_write']) 142 | 143 | if 'depth_compare_op' in options: 144 | native.n_drawcall_set_depth_compare_op(self.m_cptr, options['depth_compare_op']) 145 | 146 | if 'color_write' in options: 147 | native.n_drawcall_set_color_write(self.m_cptr, options['color_write']) 148 | 149 | if 'color_write_r' in options: 150 | native.n_drawcall_set_color_write_r(self.m_cptr, options['color_write_r']) 151 | 152 | if 'color_write_g' in options: 153 | native.n_drawcall_set_color_write_g(self.m_cptr, options['color_write_g']) 154 | 155 | if 'color_write_b' in options: 156 | native.n_drawcall_set_color_write_b(self.m_cptr, options['color_write_b']) 157 | 158 | if 'alpha_write' in options: 159 | native.n_drawcall_set_alpha_write(self.m_cptr, options['alpha_write']) 160 | 161 | if 'blend_enable' in options: 162 | native.n_drawcall_set_blend_enable(self.m_cptr, options['blend_enable']) 163 | 164 | # for compatibility of legacy code 165 | if 'alpha_blend' in options: 166 | native.n_drawcall_set_blend_enable(self.m_cptr, options['alpha_blend']) 167 | 168 | if 'src_color_blend_factor' in options: 169 | native.n_drawcall_set_src_color_blend_factor(self.m_cptr, options['src_color_blend_factor']) 170 | 171 | if 'dst_color_blend_factor' in options: 172 | native.n_drawcall_set_dst_color_blend_factor(self.m_cptr, options['dst_color_blend_factor']) 173 | 174 | if 'color_blend_op' in options: 175 | native.n_drawcall_set_color_blend_op(self.m_cptr, options['color_blend_op']) 176 | 177 | if 'src_alpha_blend_factor' in options: 178 | native.n_drawcall_set_src_alpha_blend_factor(self.m_cptr, options['src_alpha_blend_factor']) 179 | 180 | if 'dst_alpha_blend_factor' in options: 181 | native.n_drawcall_set_dst_alpha_blend_factor(self.m_cptr, options['dst_alpha_blend_factor']) 182 | 183 | if 'alpha_blend_op' in options: 184 | native.n_drawcall_set_alpha_blend_op(self.m_cptr, options['alpha_blend_op']) 185 | 186 | if 'blend_constants' in options: 187 | c = options['blend_constants'] 188 | native.n_drawcall_set_alpha_blend_op(self.m_cptr, c[0], c[1], c[2], c[3]) 189 | 190 | if 'color_attachements' in options: 191 | lst = options['color_attachements'] 192 | for i in range(len(lst)): 193 | if 'color_write' in lst[i]: 194 | native.n_drawcall_set_ith_color_write(self.m_cptr, i, lst[i]['color_write']) 195 | if 'color_write_r' in lst[i]: 196 | native.n_drawcall_set_ith_color_write_r(self.m_cptr, i, lst[i]['color_write_r']) 197 | if 'color_write_g' in lst[i]: 198 | native.n_drawcall_set_ith_color_write_g(self.m_cptr, i, lst[i]['color_write_g']) 199 | if 'color_write_b' in lst[i]: 200 | native.n_drawcall_set_ith_color_write_b(self.m_cptr, i, lst[i]['color_write_b']) 201 | if 'alpha_write' in lst[i]: 202 | native.n_drawcall_set_ith_alpha_write(self.m_cptr, i, lst[i]['alpha_write']) 203 | if 'blend_enable' in lst[i]: 204 | native.n_drawcall_set_ith_blend_enable(self.m_cptr, i, lst[i]['blend_enable']) 205 | if 'src_color_blend_factor' in lst[i]: 206 | native.n_drawcall_set_ith_src_color_blend_factor(self.m_cptr, i, lst[i]['src_color_blend_factor']) 207 | if 'dst_color_blend_factor' in lst[i]: 208 | native.n_drawcall_set_ith_dst_color_blend_factor(self.m_cptr, i, lst[i]['dst_color_blend_factor']) 209 | if 'color_blend_op' in lst[i]: 210 | native.n_drawcall_set_ith_color_blend_op(self.m_cptr, i, lst[i]['color_blend_op']) 211 | if 'src_alpha_blend_factor' in lst[i]: 212 | native.n_drawcall_set_ith_src_alpha_blend_factor(self.m_cptr, i, lst[i]['src_alpha_blend_factor']) 213 | if 'dst_alpha_blend_factor' in lst[i]: 214 | native.n_drawcall_set_ith_dst_alpha_blend_factor(self.m_cptr, i, lst[i]['dst_alpha_blend_factor']) 215 | if 'alpha_blend_op' in lst[i]: 216 | native.n_drawcall_set_ith_alpha_blend_op(self.m_cptr, i, lst[i]['alpha_blend_op']) 217 | 218 | 219 | def __del__(self): 220 | native.n_drawcall_destroy(self.m_cptr) 221 | 222 | 223 | class Rasterizer: 224 | def __init__(self, param_names, type_locked=False): 225 | o_param_names = StrArray(param_names) 226 | self.m_cptr = native.n_rasterizer_create(o_param_names.m_cptr, type_locked) 227 | self.m_draw_calls = [] 228 | 229 | def __del__(self): 230 | native.n_rasterizer_destroy(self.m_cptr) 231 | 232 | def num_params(self): 233 | return native.n_rasterizer_num_params(self.m_cptr) 234 | 235 | def set_clear_color_buf(self, i, clear): 236 | native.n_rasterizer_set_clear_color_buf(self.m_cptr, i, clear) 237 | 238 | def set_clear_depth_buf(self, clear): 239 | native.n_rasterizer_set_clear_depth_buf(self.m_cptr, clear) 240 | 241 | def add_draw_call(self, draw_call): 242 | self.m_draw_calls += [draw_call] 243 | native.n_rasterizer_add_draw_call(self.m_cptr, draw_call.m_cptr) 244 | 245 | def launch(self, launch_params, colorBufs, depthBuf, clear_colors, clear_depth, args, tex2ds=[], tex3ds=[], cubemaps=[], resolveBufs=[], times_submission = 1): 246 | colorBuf_list = ObjArray(colorBufs) 247 | p_depthBuf = ffi.NULL 248 | if depthBuf!=None: 249 | p_depthBuf = depthBuf.m_cptr 250 | resolveBuf_list = ObjArray(resolveBufs) 251 | arg_list = ObjArray(args) 252 | tex2d_list = ObjArray(tex2ds) 253 | tex3d_list = ObjArray(tex3ds) 254 | cubemap_list = ObjArray(cubemaps) 255 | launch_param_list = [LaunchParam(obj) for obj in launch_params] 256 | ptrs_launch_param_list = [lp.m_cptr for lp in launch_param_list] 257 | native.n_rasterizer_launch( 258 | self.m_cptr, 259 | colorBuf_list.m_cptr, 260 | p_depthBuf, 261 | resolveBuf_list.m_cptr, 262 | clear_colors, 263 | clear_depth, 264 | arg_list.m_cptr, 265 | tex2d_list.m_cptr, 266 | tex3d_list.m_cptr, 267 | cubemap_list.m_cptr, 268 | ptrs_launch_param_list, 269 | times_submission) 270 | 271 | 272 | 273 | -------------------------------------------------------------------------------- /python/api.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__NT__) 4 | #define PY_VkInline_API __declspec(dllexport) 5 | #else 6 | #define PY_VkInline_API 7 | #endif 8 | 9 | extern "C" 10 | { 11 | // utils 12 | PY_VkInline_API void* n_string_array_create(unsigned long long size, const char* const* strs); 13 | PY_VkInline_API unsigned long long n_string_array_size(void* ptr_arr); 14 | PY_VkInline_API void n_string_array_destroy(void* ptr_arr); 15 | PY_VkInline_API void* n_pointer_array_create(unsigned long long size, const void* const* ptrs); 16 | PY_VkInline_API unsigned long long n_pointer_array_size(void* ptr_arr); 17 | PY_VkInline_API void n_pointer_array_destroy(void* ptr_arr); 18 | PY_VkInline_API void* n_dim3_create(unsigned x, unsigned y, unsigned z); 19 | PY_VkInline_API void n_dim3_destroy(void* cptr); 20 | 21 | PY_VkInline_API void* n_launch_param_from_count(unsigned count); 22 | PY_VkInline_API void* n_launch_param_from_buffer(void* buf); 23 | PY_VkInline_API void n_launch_param_destroy(void* lp); 24 | 25 | // Context 26 | PY_VkInline_API int n_vkinline_try_init(); 27 | PY_VkInline_API void n_set_verbose(unsigned verbose); 28 | PY_VkInline_API unsigned long long n_size_of(const char* cls); 29 | PY_VkInline_API void n_add_built_in_header(const char* filename, const char* filecontent); 30 | PY_VkInline_API void n_add_inlcude_filename(const char* fn); 31 | PY_VkInline_API void n_add_code_block(const char* line); 32 | PY_VkInline_API void n_wait(); 33 | 34 | PY_VkInline_API void* n_computer_create(void* ptr_param_list, const char* body, unsigned type_locked); 35 | PY_VkInline_API void n_computer_destroy(void* cptr); 36 | PY_VkInline_API int n_computer_num_params(void* cptr); 37 | PY_VkInline_API int n_computer_launch(void* ptr_kernel, void* ptr_gridDim, void* ptr_blockDim, void* ptr_arg_list, 38 | void* ptr_tex2d_list, void* ptr_tex3d_list, void* ptr_cubemap_list, unsigned times_submission); 39 | 40 | PY_VkInline_API void* n_drawcall_create(const char* code_body_vert, const char* code_body_frag); 41 | PY_VkInline_API void n_drawcall_destroy(void* cptr); 42 | 43 | PY_VkInline_API void n_drawcall_set_primitive_topology(void* cptr, unsigned topo); 44 | PY_VkInline_API void n_drawcall_set_primitive_restart(void* cptr, unsigned enable); 45 | PY_VkInline_API void n_drawcall_set_polygon_mode(void* cptr, unsigned mode); 46 | PY_VkInline_API void n_drawcall_set_cull_mode(void* cptr, unsigned mode); 47 | PY_VkInline_API void n_drawcall_set_front_face(void* cptr, unsigned mode); 48 | PY_VkInline_API void n_drawcall_set_line_width(void* cptr, float width); 49 | PY_VkInline_API void n_drawcall_set_depth_enable(void* cptr, unsigned enable); 50 | PY_VkInline_API void n_drawcall_set_depth_write(void* cptr, unsigned enable); 51 | PY_VkInline_API void n_drawcall_set_depth_compare_op(void* cptr, unsigned op); 52 | 53 | PY_VkInline_API void n_drawcall_set_color_write(void* cptr, unsigned enable); 54 | PY_VkInline_API void n_drawcall_set_color_write_r(void* cptr, unsigned enable); 55 | PY_VkInline_API void n_drawcall_set_color_write_g(void* cptr, unsigned enable); 56 | PY_VkInline_API void n_drawcall_set_color_write_b(void* cptr, unsigned enable); 57 | PY_VkInline_API void n_drawcall_set_alpha_write(void* cptr, unsigned enable); 58 | PY_VkInline_API void n_drawcall_set_blend_enable(void* cptr, unsigned enable); 59 | PY_VkInline_API void n_drawcall_set_src_color_blend_factor(void* cptr, unsigned factor); 60 | PY_VkInline_API void n_drawcall_set_dst_color_blend_factor(void* cptr, unsigned factor); 61 | PY_VkInline_API void n_drawcall_set_color_blend_op(void* cptr, unsigned op); 62 | PY_VkInline_API void n_drawcall_set_src_alpha_blend_factor(void* cptr, unsigned factor); 63 | PY_VkInline_API void n_drawcall_set_dst_alpha_blend_factor(void* cptr, unsigned factor); 64 | PY_VkInline_API void n_drawcall_set_alpha_blend_op(void* cptr, unsigned op); 65 | 66 | PY_VkInline_API void n_drawcall_set_blend_constants(void* cptr, float r, float g, float b, float a); 67 | 68 | PY_VkInline_API void n_drawcall_set_ith_color_write(void* cptr, int i, unsigned enable); 69 | PY_VkInline_API void n_drawcall_set_ith_color_write_r(void* cptr, int i, unsigned enable); 70 | PY_VkInline_API void n_drawcall_set_ith_color_write_g(void* cptr, int i, unsigned enable); 71 | PY_VkInline_API void n_drawcall_set_ith_color_write_b(void* cptr, int i, unsigned enable); 72 | PY_VkInline_API void n_drawcall_set_ith_alpha_write(void* cptr, int i, unsigned enable); 73 | PY_VkInline_API void n_drawcall_set_ith_blend_enable(void* cptr, int i, unsigned enable); 74 | PY_VkInline_API void n_drawcall_set_ith_src_color_blend_factor(void* cptr, int i, unsigned factor); 75 | PY_VkInline_API void n_drawcall_set_ith_dst_color_blend_factor(void* cptr, int i, unsigned factor); 76 | PY_VkInline_API void n_drawcall_set_ith_color_blend_op(void* cptr, int i, unsigned op); 77 | PY_VkInline_API void n_drawcall_set_ith_src_alpha_blend_factor(void* cptr, int i, unsigned factor); 78 | PY_VkInline_API void n_drawcall_set_ith_dst_alpha_blend_factor(void* cptr, int i, unsigned factor); 79 | PY_VkInline_API void n_drawcall_set_ith_alpha_blend_op(void* cptr, int i, unsigned op); 80 | 81 | PY_VkInline_API void* n_rasterizer_create(void* ptr_param_list, unsigned type_locked); 82 | PY_VkInline_API void n_rasterizer_destroy(void* cptr); 83 | PY_VkInline_API int n_rasterizer_num_params(void* cptr); 84 | PY_VkInline_API void n_rasterizer_set_clear_color_buf(void* cptr, int i, unsigned clear); 85 | PY_VkInline_API void n_rasterizer_set_clear_depth_buf(void* cptr, unsigned clear); 86 | PY_VkInline_API void n_rasterizer_add_draw_call(void* cptr, void* draw_call); 87 | PY_VkInline_API int n_rasterizer_launch(void* cptr, void* ptr_colorBufs, void* _depthBuf, void* ptr_resolveBufs, 88 | float* clear_colors, float clear_depth, void* ptr_arg_list, void* ptr_tex2d_list, void* ptr_tex3d_list, void* ptr_cubemap_list, 89 | void** ptr_launch_params, unsigned times_submission); 90 | 91 | // ShaderViewable 92 | PY_VkInline_API const char* n_sv_name_view_type(void* cptr); 93 | PY_VkInline_API void n_sv_destroy(void* cptr); 94 | PY_VkInline_API void* n_svint32_create(int v); 95 | PY_VkInline_API int n_svint32_value(void* cptr); 96 | PY_VkInline_API void* n_svuint32_create(unsigned v); 97 | PY_VkInline_API unsigned n_svuint32_value(void* cptr); 98 | PY_VkInline_API void* n_svfloat_create(float v); 99 | PY_VkInline_API float n_svfloat_value(void* cptr); 100 | PY_VkInline_API void* n_svdouble_create(double v); 101 | PY_VkInline_API double n_svdouble_value(void* cptr); 102 | 103 | PY_VkInline_API void* n_svivec2_create(const int* v); 104 | PY_VkInline_API void n_svivec2_value(void* cptr, int* v); 105 | PY_VkInline_API void* n_svivec3_create(const int* v); 106 | PY_VkInline_API void n_svivec3_value(void* cptr, int* v); 107 | PY_VkInline_API void* n_svivec4_create(const int* v); 108 | PY_VkInline_API void n_svivec4_value(void* cptr, int* v); 109 | 110 | PY_VkInline_API void* n_svuvec2_create(const unsigned* v); 111 | PY_VkInline_API void n_svuvec2_value(void* cptr, unsigned* v); 112 | PY_VkInline_API void* n_svuvec3_create(const unsigned* v); 113 | PY_VkInline_API void n_svuvec3_value(void* cptr, unsigned* v); 114 | PY_VkInline_API void* n_svuvec4_create(const unsigned* v); 115 | PY_VkInline_API void n_svuvec4_value(void* cptr, unsigned* v); 116 | 117 | PY_VkInline_API void* n_svvec2_create(const float* v); 118 | PY_VkInline_API void n_svvec2_value(void* cptr, float* v); 119 | PY_VkInline_API void* n_svvec3_create(const float* v); 120 | PY_VkInline_API void n_svvec3_value(void* cptr, float* v); 121 | PY_VkInline_API void* n_svvec4_create(const float* v); 122 | PY_VkInline_API void n_svvec4_value(void* cptr, float* v); 123 | 124 | PY_VkInline_API void* n_svdvec2_create(const double* v); 125 | PY_VkInline_API void n_svdvec2_value(void* cptr, double* v); 126 | PY_VkInline_API void* n_svdvec3_create(const double* v); 127 | PY_VkInline_API void n_svdvec3_value(void* cptr, double* v); 128 | PY_VkInline_API void* n_svdvec4_create(const double* v); 129 | PY_VkInline_API void n_svdvec4_value(void* cptr, double* v); 130 | 131 | PY_VkInline_API void* n_svmat2x2_create(const float* v); 132 | PY_VkInline_API void n_svmat2x2_value(void* cptr, float* v); 133 | PY_VkInline_API void* n_svmat2x3_create(const float* v); 134 | PY_VkInline_API void n_svmat2x3_value(void* cptr, float* v); 135 | PY_VkInline_API void* n_svmat2x4_create(const float* v); 136 | PY_VkInline_API void n_svmat2x4_value(void* cptr, float* v); 137 | PY_VkInline_API void* n_svmat3x2_create(const float* v); 138 | PY_VkInline_API void n_svmat3x2_value(void* cptr, float* v); 139 | PY_VkInline_API void* n_svmat3x3_create(const float* v); 140 | PY_VkInline_API void n_svmat3x3_value(void* cptr, float* v); 141 | PY_VkInline_API void* n_svmat3x4_create(const float* v); 142 | PY_VkInline_API void n_svmat3x4_value(void* cptr, float* v); 143 | PY_VkInline_API void* n_svmat4x2_create(const float* v); 144 | PY_VkInline_API void n_svmat4x2_value(void* cptr, float* v); 145 | PY_VkInline_API void* n_svmat4x3_create(const float* v); 146 | PY_VkInline_API void n_svmat4x3_value(void* cptr, float* v); 147 | PY_VkInline_API void* n_svmat4x4_create(const float* v); 148 | PY_VkInline_API void n_svmat4x4_value(void* cptr, float* v); 149 | 150 | PY_VkInline_API void* n_svdmat2x2_create(const double* v); 151 | PY_VkInline_API void n_svdmat2x2_value(void* cptr, double* v); 152 | PY_VkInline_API void* n_svdmat2x3_create(const double* v); 153 | PY_VkInline_API void n_svdmat2x3_value(void* cptr, double* v); 154 | PY_VkInline_API void* n_svdmat2x4_create(const double* v); 155 | PY_VkInline_API void n_svdmat2x4_value(void* cptr, double* v); 156 | PY_VkInline_API void* n_svdmat3x2_create(const double* v); 157 | PY_VkInline_API void n_svdmat3x2_value(void* cptr, double* v); 158 | PY_VkInline_API void* n_svdmat3x3_create(const double* v); 159 | PY_VkInline_API void n_svdmat3x3_value(void* cptr, double* v); 160 | PY_VkInline_API void* n_svdmat3x4_create(const double* v); 161 | PY_VkInline_API void n_svdmat3x4_value(void* cptr, double* v); 162 | PY_VkInline_API void* n_svdmat4x2_create(const double* v); 163 | PY_VkInline_API void n_svdmat4x2_value(void* cptr, double* v); 164 | PY_VkInline_API void* n_svdmat4x3_create(const double* v); 165 | PY_VkInline_API void n_svdmat4x3_value(void* cptr, double* v); 166 | PY_VkInline_API void* n_svdmat4x4_create(const double* v); 167 | PY_VkInline_API void n_svdmat4x4_value(void* cptr, double* v); 168 | 169 | // SVBuffer 170 | PY_VkInline_API void* n_svbuffer_create(const char* elem_type, unsigned long long size, void* hdata); 171 | PY_VkInline_API const char* n_svbuffer_name_elem_type(void* cptr); 172 | PY_VkInline_API unsigned long long n_svbuffer_elem_size(void* cptr); 173 | PY_VkInline_API unsigned long long n_svbuffer_size(void* cptr); 174 | PY_VkInline_API void n_svbuffer_from_host(void* cptr, void* hdata); 175 | PY_VkInline_API void n_svbuffer_to_host(void* cptr, void* hdata, unsigned long long begin, unsigned long long end); 176 | 177 | // SVCombine 178 | PY_VkInline_API void* n_svcombine_create(void* ptr_svs, void* ptr_names, const char* operations); 179 | 180 | // SVObjBuffer 181 | PY_VkInline_API void* n_svobjbuffer_create(void* ptr_svs); 182 | PY_VkInline_API const char* n_svobjbuffer_name_elem_type(void* cptr); 183 | PY_VkInline_API unsigned long long n_svobjbuffer_elem_size(void* cptr); 184 | PY_VkInline_API unsigned long long n_svobjbuffer_size(void* cptr); 185 | PY_VkInline_API void n_svobjbuffer_update(void* cptr); 186 | 187 | // Texture2D 188 | PY_VkInline_API void* n_texture2d_create(int width, int height, unsigned vkformat, unsigned isDepth, unsigned isStencil, unsigned sampleCount); 189 | PY_VkInline_API void n_texture2d_release(void* tex2d); 190 | PY_VkInline_API int n_texture2d_width(void* tex2d); 191 | PY_VkInline_API int n_texture2d_height(void* tex2d); 192 | PY_VkInline_API unsigned n_texture2d_pixelsize(void* tex2d); 193 | PY_VkInline_API unsigned n_texture2d_channelcount(void* tex2d); 194 | PY_VkInline_API unsigned n_texture2d_samplecount(void* tex2d); 195 | PY_VkInline_API unsigned n_texture2d_vkformat(void* tex2d); 196 | PY_VkInline_API void n_texture2d_upload(void* tex2d, void* hdata); 197 | PY_VkInline_API void n_texture2d_download(void* tex2d, void* hdata); 198 | 199 | // Texture3D 200 | PY_VkInline_API void* n_texture3d_create(int dimX, int dimY, int dimZ, unsigned vkformat); 201 | PY_VkInline_API void n_texture3d_release(void* tex3d); 202 | PY_VkInline_API int n_texture3d_dimX(void* tex3d); 203 | PY_VkInline_API int n_texture3d_dimY(void* tex3d); 204 | PY_VkInline_API int n_texture3d_dimZ(void* tex3d); 205 | PY_VkInline_API unsigned n_texture3d_pixelsize(void* tex3d); 206 | PY_VkInline_API unsigned n_texture3d_channelcount(void* tex3d); 207 | PY_VkInline_API unsigned n_texture3d_vkformat(void* tex3d); 208 | PY_VkInline_API void n_texture3d_upload(void* tex3d, void* hdata); 209 | PY_VkInline_API void n_texture3d_download(void* tex3d, void* hdata); 210 | 211 | // Cubemap 212 | PY_VkInline_API void* n_cubemap_create(int width, int height, unsigned vkformat); 213 | PY_VkInline_API void n_cubemap_release(void* cubemap); 214 | PY_VkInline_API int n_cubemap_width(void* cubemap); 215 | PY_VkInline_API int n_cubemap_height(void* cubemap); 216 | PY_VkInline_API unsigned n_cubemap_pixelsize(void* cubemap); 217 | PY_VkInline_API unsigned n_cubemap_channelcount(void* cubemap); 218 | PY_VkInline_API unsigned n_cubemap_vkformat(void* cubemap); 219 | PY_VkInline_API void n_cubemap_upload(void* cubemap, void* hdata); 220 | PY_VkInline_API void n_cubemap_download(void* cubemap, void* hdata); 221 | 222 | } 223 | 224 | 225 | #ifdef _VkInlineEX 226 | #include "api_ex.h" 227 | #endif 228 | -------------------------------------------------------------------------------- /python/VkInline/ShaderViewable.py: -------------------------------------------------------------------------------- 1 | from .Native import ffi, native 2 | import struct 3 | import glm 4 | 5 | class ShaderViewable: 6 | def name_view_type(self): 7 | return ffi.string(native.n_sv_name_view_type(self.m_cptr)).decode('utf-8') 8 | def __del__(self): 9 | native.n_sv_destroy(self.m_cptr) 10 | def value(self): 11 | s_type = self.name_view_type() 12 | return '[Shader-viewable object, type: %s]'%s_type 13 | 14 | class SVInt32(ShaderViewable): 15 | def __init__(self, value): 16 | self.m_cptr = native.n_svint32_create(value) 17 | def value(self): 18 | return native.n_svint32_value(self.m_cptr) 19 | 20 | class SVUInt32(ShaderViewable): 21 | def __init__(self, value): 22 | self.m_cptr = native.n_svuint32_create(value) 23 | def value(self): 24 | return native.n_svuint32_value(self.m_cptr) 25 | 26 | class SVFloat(ShaderViewable): 27 | def __init__(self, value): 28 | self.m_cptr = native.n_svfloat_create(value) 29 | def value(self): 30 | return native.n_svfloat_value(self.m_cptr) 31 | 32 | class SVDouble(ShaderViewable): 33 | def __init__(self, value): 34 | self.m_cptr = native.n_svdouble_create(value) 35 | def value(self): 36 | return native.n_svdouble_value(self.m_cptr) 37 | 38 | class SVIVec2(ShaderViewable): 39 | def __init__(self, value): 40 | self.m_cptr = native.n_svivec2_create((value.x, value.y)) 41 | def value(self): 42 | v = b'\x00'*8 43 | native.n_svivec2_value(self.m_cptr, ffi.from_buffer('int[]', v)) 44 | return glm.ivec2(struct.unpack('2i', v)) 45 | 46 | class SVIVec3(ShaderViewable): 47 | def __init__(self, value): 48 | self.m_cptr = native.n_svivec3_create((value.x, value.y, value.z)) 49 | def value(self): 50 | v = b'\x00'*12 51 | native.n_svivec3_value(self.m_cptr, ffi.from_buffer('int[]', v)) 52 | return glm.ivec3(struct.unpack('3i', v)) 53 | 54 | class SVIVec4(ShaderViewable): 55 | def __init__(self, value): 56 | self.m_cptr = native.n_svivec4_create((value.x, value.y, value.z, value.w)) 57 | def value(self): 58 | v = b'\x00'*16 59 | native.n_svivec4_value(self.m_cptr, ffi.from_buffer('int[]', v)) 60 | return glm.ivec4(struct.unpack('4i', v)) 61 | 62 | class SVUVec2(ShaderViewable): 63 | def __init__(self, value): 64 | self.m_cptr = native.n_svuvec2_create((value.x, value.y)) 65 | def value(self): 66 | v = b'\x00'*8 67 | native.n_svuvec2_value(self.m_cptr, ffi.from_buffer('unsigned[]', v)) 68 | return glm.uvec2(struct.unpack('2I', v)) 69 | 70 | class SVUVec3(ShaderViewable): 71 | def __init__(self, value): 72 | self.m_cptr = native.n_svuvec3_create((value.x, value.y, value.z)) 73 | def value(self): 74 | v = b'\x00'*12 75 | native.n_svuvec3_value(self.m_cptr, ffi.from_buffer('unsigned[]', v)) 76 | return glm.uvec3(struct.unpack('3I', v)) 77 | 78 | class SVUVec4(ShaderViewable): 79 | def __init__(self, value): 80 | self.m_cptr = native.n_svuvec4_create((value.x, value.y, value.z, value.w)) 81 | def value(self): 82 | v = b'\x00'*16 83 | native.n_svuvec4_value(self.m_cptr, ffi.from_buffer('unsigned[]', v)) 84 | return glm.uvec4(struct.unpack('4I', v)) 85 | 86 | class SVVec2(ShaderViewable): 87 | def __init__(self, value): 88 | self.m_cptr = native.n_svvec2_create((value.x, value.y)) 89 | def value(self): 90 | v = b'\x00'*8 91 | native.n_svvec2_value(self.m_cptr, ffi.from_buffer('float[]', v)) 92 | return glm.vec2(struct.unpack('2f', v)) 93 | 94 | class SVVec3(ShaderViewable): 95 | def __init__(self, value): 96 | self.m_cptr = native.n_svvec3_create((value.x, value.y, value.z)) 97 | def value(self): 98 | v = b'\x00'*12 99 | native.n_svvec3_value(self.m_cptr, ffi.from_buffer('float[]', v)) 100 | return glm.vec3(struct.unpack('3f', v)) 101 | 102 | class SVVec4(ShaderViewable): 103 | def __init__(self, value): 104 | self.m_cptr = native.n_svvec4_create((value.x, value.y, value.z, value.w)) 105 | def value(self): 106 | v = b'\x00'*16 107 | native.n_svvec4_value(self.m_cptr, ffi.from_buffer('float[]', v)) 108 | return glm.vec4(struct.unpack('4f', v)) 109 | 110 | class SVDVec2(ShaderViewable): 111 | def __init__(self, value): 112 | self.m_cptr = native.n_svdvec2_create((value.x, value.y)) 113 | def value(self): 114 | v = b'\x00'*16 115 | native.n_svdvec2_value(self.m_cptr, ffi.from_buffer('double[]', v)) 116 | return glm.dvec2(struct.unpack('2d', v)) 117 | 118 | class SVDVec3(ShaderViewable): 119 | def __init__(self, value): 120 | self.m_cptr = native.n_svdvec3_create((value.x, value.y, value.z)) 121 | def value(self): 122 | v = b'\x00'*24 123 | native.n_svdvec3_value(self.m_cptr, ffi.from_buffer('double[]', v)) 124 | return glm.dvec3(struct.unpack('3d', v)) 125 | 126 | class SVDVec4(ShaderViewable): 127 | def __init__(self, value): 128 | self.m_cptr = native.n_svdvec4_create((value.x, value.y, value.z, value.w)) 129 | def value(self): 130 | v = b'\x00'*32 131 | native.n_svdvec4_value(self.m_cptr, ffi.from_buffer('double[]', v)) 132 | return glm.dvec4(struct.unpack('4d', v)) 133 | 134 | class SVMat2x2(ShaderViewable): 135 | def __init__(self, value): 136 | self.m_cptr = native.n_svmat2x2_create((value[0].x, value[0].y, value[1].x, value[1].y)) 137 | def value(self): 138 | v = b'\x00'*16 139 | native.n_svmat2x2_value(self.m_cptr, ffi.from_buffer('float[]', v)) 140 | elems = struct.unpack('4f', v) 141 | return glm.mat2x2(elems[0:2], elems[2:4]) 142 | 143 | class SVMat2x3(ShaderViewable): 144 | def __init__(self, value): 145 | self.m_cptr = native.n_svmat2x3_create((value[0].x, value[0].y, value[0].z, value[1].x, value[1].y, value[1].z)) 146 | def value(self): 147 | v = b'\x00'*24 148 | native.n_svmat2x3_value(self.m_cptr, ffi.from_buffer('float[]', v)) 149 | elems = struct.unpack('6f', v) 150 | return glm.mat2x3(elems[0:3], elems[3:6]) 151 | 152 | class SVMat2x4(ShaderViewable): 153 | def __init__(self, value): 154 | self.m_cptr = native.n_svmat2x4_create((value[0].x, value[0].y, value[0].z, value[0].w, value[1].x, value[1].y, value[1].z, value[1].w)) 155 | def value(self): 156 | v = b'\x00'*32 157 | native.n_svmat2x4_value(self.m_cptr, ffi.from_buffer('float[]', v)) 158 | elems = struct.unpack('8f', v) 159 | return glm.mat2x4(elems[0:4], elems[4:8]) 160 | 161 | class SVMat3x2(ShaderViewable): 162 | def __init__(self, value): 163 | self.m_cptr = native.n_svmat3x2_create((value[0].x, value[0].y, value[1].x, value[1].y, value[2].x, value[2].y)) 164 | def value(self): 165 | v = b'\x00'*24 166 | native.n_svmat3x2_value(self.m_cptr, ffi.from_buffer('float[]', v)) 167 | elems = struct.unpack('6f', v) 168 | return glm.mat3x2(elems[0:2], elems[2:4], elems[4:6]) 169 | 170 | class SVMat3x3(ShaderViewable): 171 | def __init__(self, value): 172 | self.m_cptr = native.n_svmat3x3_create((value[0].x, value[0].y, value[0].z, value[1].x, value[1].y, value[1].z, value[2].x, value[2].y, value[2].z)) 173 | def value(self): 174 | v = b'\x00'*36 175 | native.n_svmat3x3_value(self.m_cptr, ffi.from_buffer('float[]', v)) 176 | elems = struct.unpack('9f', v) 177 | return glm.mat3x3(elems[0:3], elems[3:6], elems[6:9]) 178 | 179 | class SVMat3x4(ShaderViewable): 180 | def __init__(self, value): 181 | self.m_cptr = native.n_svmat3x4_create((value[0].x, value[0].y, value[0].z, value[0].w, value[1].x, value[1].y, value[1].z, value[1].w, value[2].x, value[2].y, value[2].z, value[2].w)) 182 | def value(self): 183 | v = b'\x00'*48 184 | native.n_svmat3x4_value(self.m_cptr, ffi.from_buffer('float[]', v)) 185 | elems = struct.unpack('12f', v) 186 | return glm.mat3x4(elems[0:4], elems[4:8], elems[8:12]) 187 | 188 | class SVMat4x2(ShaderViewable): 189 | def __init__(self, value): 190 | self.m_cptr = native.n_svmat4x2_create((value[0].x, value[0].y, value[1].x, value[1].y, value[2].x, value[2].y, value[3].x, value[3].y)) 191 | def value(self): 192 | v = b'\x00'*32 193 | native.n_svmat4x2_value(self.m_cptr, ffi.from_buffer('float[]', v)) 194 | elems = struct.unpack('8f', v) 195 | return glm.mat4x2(elems[0:2], elems[2:4], elems[4:6], elems[6:8]) 196 | 197 | class SVMat4x3(ShaderViewable): 198 | def __init__(self, value): 199 | self.m_cptr = native.n_svmat4x3_create((value[0].x, value[0].y, value[0].z, value[1].x, value[1].y, value[1].z, value[2].x, value[2].y, value[2].z, value[3].x, value[3].y, value[3].z)) 200 | def value(self): 201 | v = b'\x00'*48 202 | native.n_svmat4x3_value(self.m_cptr, ffi.from_buffer('float[]', v)) 203 | elems = struct.unpack('12f', v) 204 | return glm.mat4x3(elems[0:3], elems[3:6], elems[6:9], elems[9:12]) 205 | 206 | class SVMat4x4(ShaderViewable): 207 | def __init__(self, value): 208 | self.m_cptr = native.n_svmat4x4_create((value[0].x, value[0].y, value[0].z, value[0].w, value[1].x, value[1].y, value[1].z, value[1].w, value[2].x, value[2].y, value[2].z, value[2].w, value[3].x, value[3].y, value[3].z, value[3].w)) 209 | def value(self): 210 | v = b'\x00'*64 211 | native.n_svmat4x4_value(self.m_cptr, ffi.from_buffer('float[]', v)) 212 | elems = struct.unpack('16f', v) 213 | return glm.mat4x4(elems[0:4], elems[4:8], elems[8:12], elems[12:16]) 214 | 215 | class SVDMat2x2(ShaderViewable): 216 | def __init__(self, value): 217 | self.m_cptr = native.n_svdmat2x2_create((value[0].x, value[0].y, value[1].x, value[1].y)) 218 | def value(self): 219 | v = b'\x00'*32 220 | native.n_svdmat2x2_value(self.m_cptr, ffi.from_buffer('double[]', v)) 221 | elems = struct.unpack('4d', v) 222 | return glm.dmat2x2(elems[0:2], elems[2:4]) 223 | 224 | class SVDMat2x3(ShaderViewable): 225 | def __init__(self, value): 226 | self.m_cptr = native.n_svdmat2x3_create((value[0].x, value[0].y, value[0].z, value[1].x, value[1].y, value[1].z)) 227 | def value(self): 228 | v = b'\x00'*48 229 | native.n_svdmat2x3_value(self.m_cptr, ffi.from_buffer('double[]', v)) 230 | elems = struct.unpack('6d', v) 231 | return glm.dmat2x3(elems[0:3], elems[3:6]) 232 | 233 | class SVDMat2x4(ShaderViewable): 234 | def __init__(self, value): 235 | self.m_cptr = native.n_svdmat2x4_create((value[0].x, value[0].y, value[0].z, value[0].w, value[1].x, value[1].y, value[1].z, value[1].w)) 236 | def value(self): 237 | v = b'\x00'*64 238 | native.n_svdmat2x4_value(self.m_cptr, ffi.from_buffer('double[]', v)) 239 | elems = struct.unpack('8d', v) 240 | return glm.dmat2x4(elems[0:4], elems[4:8]) 241 | 242 | class SVDMat3x2(ShaderViewable): 243 | def __init__(self, value): 244 | self.m_cptr = native.n_svdmat3x2_create((value[0].x, value[0].y, value[1].x, value[1].y, value[2].x, value[2].y)) 245 | def value(self): 246 | v = b'\x00'*48 247 | native.n_svdmat3x2_value(self.m_cptr, ffi.from_buffer('double[]', v)) 248 | elems = struct.unpack('6d', v) 249 | return glm.dmat3x2(elems[0:2], elems[2:4], elems[4:6]) 250 | 251 | class SVDMat3x3(ShaderViewable): 252 | def __init__(self, value): 253 | self.m_cptr = native.n_svdmat3x3_create((value[0].x, value[0].y, value[0].z, value[1].x, value[1].y, value[1].z, value[2].x, value[2].y, value[2].z)) 254 | def value(self): 255 | v = b'\x00'*72 256 | native.n_svdmat3x3_value(self.m_cptr, ffi.from_buffer('double[]', v)) 257 | elems = struct.unpack('9d', v) 258 | return glm.dmat3x3(elems[0:3], elems[3:6], elems[6:9]) 259 | 260 | class SVDMat3x4(ShaderViewable): 261 | def __init__(self, value): 262 | self.m_cptr = native.n_svdmat3x4_create((value[0].x, value[0].y, value[0].z, value[0].w, value[1].x, value[1].y, value[1].z, value[1].w, value[2].x, value[2].y, value[2].z, value[2].w)) 263 | def value(self): 264 | v = b'\x00'*96 265 | native.n_svdmat3x4_value(self.m_cptr, ffi.from_buffer('double[]', v)) 266 | elems = struct.unpack('12d', v) 267 | return glm.dmat3x4(elems[0:4], elems[4:8], elems[8:12]) 268 | 269 | class SVDMat4x2(ShaderViewable): 270 | def __init__(self, value): 271 | self.m_cptr = native.n_svdmat4x2_create((value[0].x, value[0].y, value[1].x, value[1].y, value[2].x, value[2].y, value[3].x, value[3].y)) 272 | def value(self): 273 | v = b'\x00'*64 274 | native.n_svdmat4x2_value(self.m_cptr, ffi.from_buffer('double[]', v)) 275 | elems = struct.unpack('8d', v) 276 | return glm.dmat4x2(elems[0:2], elems[2:4], elems[4:6], elems[6:8]) 277 | 278 | class SVDMat4x3(ShaderViewable): 279 | def __init__(self, value): 280 | self.m_cptr = native.n_svdmat4x3_create((value[0].x, value[0].y, value[0].z, value[1].x, value[1].y, value[1].z, value[2].x, value[2].y, value[2].z, value[3].x, value[3].y, value[3].z)) 281 | def value(self): 282 | v = b'\x00'*96 283 | native.n_svdmat4x3_value(self.m_cptr, ffi.from_buffer('double[]', v)) 284 | elems = struct.unpack('12d', v) 285 | return glm.dmat4x3(elems[0:3], elems[3:6], elems[6:9], elems[9:12]) 286 | 287 | class SVDMat4x4(ShaderViewable): 288 | def __init__(self, value): 289 | self.m_cptr = native.n_svdmat4x4_create((value[0].x, value[0].y, value[0].z, value[0].w, value[1].x, value[1].y, value[1].z, value[1].w, value[2].x, value[2].y, value[2].z, value[2].w, value[3].x, value[3].y, value[3].z, value[3].w)) 290 | def value(self): 291 | v = b'\x00'*128 292 | native.n_svdmat4x4_value(self.m_cptr, ffi.from_buffer('double[]', v)) 293 | elems = struct.unpack('16d', v) 294 | return glm.dmat4x4(elems[0:4], elems[4:8], elems[8:12], elems[12:16]) 295 | --------------------------------------------------------------------------------