├── .gitignore ├── media ├── carousel.png └── prisoner.png ├── include ├── Blam │ ├── Math.hpp │ ├── Types │ │ ├── TagDataReferece.hpp │ │ ├── TagReference.hpp │ │ ├── Vertex.hpp │ │ ├── TagBlock.hpp │ │ ├── MapHeader.hpp │ │ └── TagIndex.hpp │ ├── Math │ │ ├── Vector.hpp │ │ └── Bounds3D.hpp │ ├── Util │ │ └── VirtualHeap.hpp │ ├── Types.hpp │ ├── Util.hpp │ ├── TagVisitor.hpp │ ├── Blam.hpp │ └── Enums.hpp ├── VkBlam │ ├── Format.hpp │ ├── SceneView.hpp │ ├── Shaders │ │ └── ShaderEnvironment.hpp │ ├── World.hpp │ ├── Shader.hpp │ ├── Renderer.hpp │ ├── Scene.hpp │ └── VkBlam.hpp ├── Vulkan │ ├── Pipeline.hpp │ ├── VulkanAPI.hpp │ ├── SamplerCache.hpp │ ├── ShaderModuleCache.hpp │ ├── Memory.hpp │ ├── Debug.hpp │ ├── DescriptorHeap.hpp │ ├── DescriptorUpdateBatch.hpp │ └── StreamBuffer.hpp └── Common │ ├── Alignment.hpp │ ├── Literals.hpp │ ├── Format.hpp │ └── Endian.hpp ├── source ├── stb_image_write.cpp ├── Vulkan │ ├── Pipeline.cpp │ ├── VulkanAPI.cpp │ ├── SamplerCache.cpp │ ├── ShaderModuleCache.cpp │ ├── Debug.cpp │ ├── DescriptorUpdateBatch.cpp │ ├── DescriptorHeap.cpp │ ├── Memory.cpp │ └── StreamBuffer.cpp ├── VkBlam │ ├── Shader.cpp │ ├── SceneView.cpp │ ├── World.cpp │ ├── Format.cpp │ ├── FormatTraits.hpp │ ├── Shaders │ │ └── ShaderEnvironment.cpp │ ├── Renderer.cpp │ └── VkBlam.cpp ├── Common │ └── Format.cpp ├── Blam │ ├── Blam.cpp │ ├── TagVisitor.cpp │ └── Util.cpp ├── dump-bsp.cpp ├── decrypt-shader.cpp └── main.cpp ├── .gitmodules ├── shaders ├── Unlit.frag ├── vkBlam.glsl ├── Default.vert └── Default.frag ├── LICENSE ├── README.md ├── .clang-format ├── .github └── workflows │ └── ci.yml └── CMakeLists.txt /.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | .vscode/ 3 | .cache/ 4 | vert.spv 5 | frag.spv 6 | -------------------------------------------------------------------------------- /media/carousel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wunkolo/vkblam/HEAD/media/carousel.png -------------------------------------------------------------------------------- /media/prisoner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Wunkolo/vkblam/HEAD/media/prisoner.png -------------------------------------------------------------------------------- /include/Blam/Math.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Math/Bounds3D.hpp" 4 | #include "Math/Vector.hpp" -------------------------------------------------------------------------------- /source/stb_image_write.cpp: -------------------------------------------------------------------------------- 1 | #define STB_IMAGE_WRITE_IMPLEMENTATION 2 | #include "stb_image_write.h" 3 | -------------------------------------------------------------------------------- /source/Vulkan/Pipeline.cpp: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | 4 | namespace Vulkan 5 | { 6 | 7 | } // namespace Vulkan -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "external/mio"] 2 | path = external/mio 3 | url = git@github.com:mandreyel/mio.git 4 | [submodule "external/cmrc"] 5 | path = external/cmrc 6 | url = git@github.com:vector-of-bool/cmrc.git 7 | [submodule "external/glm"] 8 | path = external/glm 9 | url = git@github.com:g-truc/glm.git 10 | -------------------------------------------------------------------------------- /include/VkBlam/Format.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace VkBlam 6 | { 7 | vk::ImageType BlamToVk(Blam::BitmapEntryType Value); 8 | vk::Format BlamToVk(Blam::BitmapEntryFormat Value); 9 | 10 | vk::ComponentMapping GetFormatSwizzle(Blam::BitmapEntryFormat Value); 11 | } // namespace VkBlam -------------------------------------------------------------------------------- /shaders/Unlit.frag: -------------------------------------------------------------------------------- 1 | #version 460 2 | #extension GL_EXT_shader_explicit_arithmetic_types : require 3 | 4 | layout( push_constant ) uniform Constants { 5 | layout(offset = 64) f32vec4 Color; 6 | } PushConstant; 7 | 8 | layout( location = 0 ) out f32vec4 Attachment0; 9 | 10 | void main() 11 | { 12 | Attachment0 = PushConstant.Color; 13 | } -------------------------------------------------------------------------------- /include/VkBlam/SceneView.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace VkBlam 6 | { 7 | class SceneView 8 | { 9 | private: 10 | public: 11 | SceneView(glm::f32mat4 View, glm::f32mat4 Projection, glm::uvec2 Viewport); 12 | 13 | VkBlam::CameraGlobals CameraGlobalsData; 14 | glm::uvec2 Viewport; 15 | }; 16 | } // namespace VkBlam -------------------------------------------------------------------------------- /shaders/vkBlam.glsl: -------------------------------------------------------------------------------- 1 | #extension GL_EXT_shader_explicit_arithmetic_types : enable 2 | 3 | struct CameraGlobals 4 | { 5 | f32mat4x4 View; 6 | f32mat4x4 Projection; 7 | f32mat4x4 ViewProjection; 8 | }; 9 | 10 | struct SimulationGlobals 11 | { 12 | float32_t Time; 13 | }; 14 | struct PassGlobals 15 | { 16 | f32vec4 ScreenSize; // {width, height, 1/width, 1/height} 17 | }; -------------------------------------------------------------------------------- /source/VkBlam/Shader.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | namespace VkBlam 4 | { 5 | Shader::Shader( 6 | const Vulkan::Context& VulkanContext, const BitmapHeapT& BitmapHeap, 7 | Vulkan::DescriptorUpdateBatch& DescriptorUpdateBatch 8 | ) 9 | : VulkanContext(VulkanContext), BitmapHeap(BitmapHeap), 10 | DescriptorUpdateBatch(DescriptorUpdateBatch) 11 | { 12 | } 13 | } // namespace VkBlam -------------------------------------------------------------------------------- /include/Blam/Types/TagDataReferece.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace Blam 6 | { 7 | #pragma pack(push, 1) 8 | struct TagDataReference 9 | { 10 | std::uint32_t Size; 11 | std::uint32_t IsExternal; 12 | std::uint32_t Offset; 13 | std::uint64_t VirtualOffset; 14 | }; 15 | #pragma pack(pop) 16 | 17 | static_assert(sizeof(TagDataReference) == 20); 18 | } // namespace Blam -------------------------------------------------------------------------------- /source/VkBlam/SceneView.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | namespace VkBlam 4 | { 5 | SceneView::SceneView( 6 | glm::f32mat4 View, glm::f32mat4 Projection, glm::uvec2 Viewport 7 | ) 8 | : Viewport(Viewport) 9 | { 10 | CameraGlobalsData.View = View; 11 | CameraGlobalsData.Projection = Projection; 12 | 13 | CameraGlobalsData.ViewProjection = Projection * View; 14 | } 15 | } // namespace VkBlam -------------------------------------------------------------------------------- /source/Vulkan/VulkanAPI.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE; 4 | 5 | static const bool VulkanLoader = []() -> bool { 6 | static vk::DynamicLoader dl; 7 | VULKAN_HPP_DEFAULT_DISPATCHER.init( 8 | dl.getProcAddress("vkGetInstanceProcAddr") 9 | ); 10 | return true; 11 | }(); 12 | 13 | namespace Vulkan 14 | { 15 | 16 | } // namespace Vulkan -------------------------------------------------------------------------------- /include/Blam/Math/Vector.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace Blam 6 | { 7 | #pragma pack(push, 1) 8 | using Vector2f = std::array; 9 | using Vector3f = std::array; 10 | using Vector4f = std::array; 11 | #pragma pack(pop) 12 | 13 | static_assert(sizeof(Vector2f) == sizeof(float) * 2); 14 | static_assert(sizeof(Vector3f) == sizeof(float) * 3); 15 | static_assert(sizeof(Vector4f) == sizeof(float) * 4); 16 | } // namespace Blam -------------------------------------------------------------------------------- /include/Vulkan/Pipeline.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | namespace Vulkan 8 | { 9 | 10 | template 11 | inline vk::VertexInputBindingDescription CreateVertexInputBinding( 12 | std::uint8_t BindingIndex, 13 | vk::VertexInputRate InputRate = vk::VertexInputRate::eVertex 14 | ) 15 | { 16 | return vk::VertexInputBindingDescription( 17 | BindingIndex, sizeof(T), InputRate 18 | ); 19 | } 20 | } // namespace Vulkan -------------------------------------------------------------------------------- /include/Common/Alignment.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace Common 6 | { 7 | 8 | template 9 | constexpr T AlignUp(T value, std::size_t size) 10 | { 11 | const T mod = static_cast(value % size); 12 | value -= mod; 13 | return static_cast(mod == T{0} ? value : value + size); 14 | } 15 | 16 | template 17 | constexpr T AlignDown(T value, std::size_t size) 18 | { 19 | return static_cast(value - value % size); 20 | } 21 | 22 | } // namespace Common -------------------------------------------------------------------------------- /include/Blam/Types/TagReference.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | namespace Blam 8 | { 9 | #pragma pack(push, 1) 10 | struct TagReference 11 | { 12 | TagClass Class; 13 | std::uint32_t PathVirtualOffset; 14 | std::uint32_t PathLength; 15 | std::uint32_t TagID; 16 | 17 | bool Valid() const 18 | { 19 | return TagID != 0xFFFFFFFFu; 20 | } 21 | }; 22 | #pragma pack(pop) 23 | 24 | static_assert(sizeof(TagReference) == 0x10); 25 | } // namespace Blam -------------------------------------------------------------------------------- /include/Blam/Types/Vertex.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace Blam 6 | { 7 | #pragma pack(push, 1) 8 | struct Vertex 9 | { 10 | Vector3f Position; 11 | Vector3f Normal; 12 | Vector3f Binormal; 13 | Vector3f Tangent; 14 | Vector2f UV; 15 | }; 16 | 17 | struct LightmapVertex 18 | { 19 | Vector3f Normal; 20 | Vector2f UV; 21 | }; 22 | #pragma pack(pop) 23 | 24 | static_assert(sizeof(Vertex) == 56); 25 | static_assert(sizeof(LightmapVertex) == 20); 26 | } // namespace Blam -------------------------------------------------------------------------------- /include/Blam/Math/Bounds3D.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "Vector.hpp" 4 | 5 | namespace Blam 6 | { 7 | #pragma pack(push, 1) 8 | struct Bounds3D 9 | { 10 | Vector2f BoundsX; 11 | Vector2f BoundsY; 12 | Vector2f BoundsZ; 13 | 14 | inline bool Intersects(const Bounds3D& Other) const 15 | { 16 | return (BoundsX[0] <= Other.BoundsX[1] && BoundsX[1] >= Other.BoundsX[0] 17 | ) 18 | && (BoundsY[0] <= Other.BoundsY[1] && BoundsY[1] >= Other.BoundsY[0] 19 | ) 20 | && (BoundsZ[0] <= Other.BoundsZ[1] && BoundsZ[1] >= Other.BoundsZ[0] 21 | ); 22 | } 23 | }; 24 | #pragma pack(pop) 25 | 26 | static_assert(sizeof(Bounds3D) == sizeof(float) * 6); 27 | } // namespace Blam -------------------------------------------------------------------------------- /include/Common/Literals.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | namespace Common::Literals 6 | { 7 | 8 | constexpr std::uint64_t operator""_KiB(unsigned long long int x) 9 | { 10 | return 1024ULL * x; 11 | } 12 | 13 | constexpr std::uint64_t operator""_MiB(unsigned long long int x) 14 | { 15 | return 1024_KiB * x; 16 | } 17 | 18 | constexpr std::uint64_t operator""_GiB(unsigned long long int x) 19 | { 20 | return 1024_MiB * x; 21 | } 22 | 23 | constexpr std::uint64_t operator""_TiB(unsigned long long int x) 24 | { 25 | return 1024_GiB * x; 26 | } 27 | 28 | constexpr std::uint64_t operator""_PiB(unsigned long long int x) 29 | { 30 | return 1024_TiB * x; 31 | } 32 | 33 | } // namespace Common::Literals -------------------------------------------------------------------------------- /include/Vulkan/VulkanAPI.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #define VK_NO_PROTOTYPES 4 | #include 5 | 6 | #define VULKAN_HPP_DISPATCH_LOADER_DYNAMIC 1 7 | #define VULKAN_HPP_NO_EXCEPTIONS 8 | #include 9 | #include 10 | #include 11 | 12 | namespace Vulkan 13 | { 14 | // Lightweight object to encapsulate everything minimally needed to interface 15 | // with a fully initialized vulkan context 16 | struct Context 17 | { 18 | vk::Device LogicalDevice; 19 | vk::PhysicalDevice PhysicalDevice; 20 | 21 | vk::Queue RenderQueue; 22 | std::uint8_t RenderQueueFamilyIndex; 23 | 24 | vk::Queue TransferQueue; 25 | std::uint8_t TransferQueueFamilyIndex; 26 | }; 27 | } // namespace Vulkan 28 | -------------------------------------------------------------------------------- /include/Vulkan/SamplerCache.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | namespace Vulkan 9 | { 10 | 11 | // Implements a simple pool of reusable sampler objects 12 | class SamplerCache 13 | { 14 | private: 15 | const Vulkan::Context& VulkanContext; 16 | 17 | std::unordered_map SamplerMap; 18 | 19 | explicit SamplerCache(const Vulkan::Context& VulkanContext); 20 | 21 | public: 22 | ~SamplerCache() = default; 23 | 24 | SamplerCache(SamplerCache&&) = default; 25 | 26 | const vk::Sampler& GetSampler(const vk::SamplerCreateInfo& SamplerInfo); 27 | 28 | static std::optional 29 | Create(const Vulkan::Context& VulkanContext); 30 | }; 31 | } // namespace Vulkan -------------------------------------------------------------------------------- /include/Blam/Types/TagBlock.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | namespace Blam 9 | { 10 | #pragma pack(push, 1) 11 | template 12 | struct TagBlock 13 | { 14 | std::uint32_t Count; 15 | std::uint32_t VirtualOffset; 16 | std::uint32_t Unknown8; 17 | 18 | template< 19 | typename U = T, 20 | typename = typename std::enable_if_t == false>> 21 | std::span 22 | GetSpan(const std::byte Data[], std::uint32_t VirtualBase) const 23 | { 24 | return std::span( 25 | reinterpret_cast(Data + (VirtualOffset - VirtualBase)), 26 | Count 27 | ); 28 | } 29 | }; 30 | #pragma pack(pop) 31 | 32 | static_assert(sizeof(TagBlock) == 12); 33 | } // namespace Blam -------------------------------------------------------------------------------- /include/Blam/Types/MapHeader.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include 7 | 8 | namespace Blam 9 | { 10 | #pragma pack(push, 1) 11 | struct MapHeader 12 | { 13 | std::uint32_t MagicHead; // 'head' 14 | CacheVersion Version; 15 | std::uint32_t FileSize; 16 | std::uint32_t PaddingLength; // Xbox Only 17 | std::uint32_t TagIndexOffset; 18 | std::uint32_t TagIndexSize; 19 | std::byte Pad18[8]; 20 | char ScenarioName[32]; 21 | char BuildVersion[32]; 22 | ScenarioType Type; 23 | std::byte Pad64[2]; 24 | std::uint32_t Checksum; 25 | std::uint32_t H1AFlags; // Todo 26 | 27 | std::byte Pad6C[1936]; 28 | std::uint32_t MagicFoot; // 'foot' 29 | }; 30 | #pragma pack(pop) 31 | 32 | static_assert(sizeof(MapHeader) == 2048); 33 | } // namespace Blam -------------------------------------------------------------------------------- /include/Blam/Util/VirtualHeap.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | namespace Blam 9 | { 10 | // Represents a runtime chunk of memory mapped 11 | // to the specified base address. Intended to help "devirtualize" runtime 12 | // offsets and pointers into regular file-offsets 13 | struct VirtualHeap 14 | { 15 | const std::uint32_t BaseAddress; 16 | const std::span Data; 17 | 18 | template 19 | std::span GetBlock(const TagBlock& Block) const 20 | { 21 | return Block.GetSpan(Data.data(), BaseAddress); 22 | } 23 | 24 | template 25 | const T& Read(std::uint32_t Offset = 0u) const 26 | { 27 | return *reinterpret_cast( 28 | Data.subspan(Offset - BaseAddress).data() 29 | ); 30 | } 31 | }; 32 | } // namespace Blam -------------------------------------------------------------------------------- /include/Common/Format.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | namespace Common 11 | { 12 | void HexDump( 13 | const std::span& Data, std::uint8_t Columns = 16, 14 | std::FILE* Stream = stdout 15 | ); 16 | 17 | std::string FormatByteCount(std::size_t ByteCount); 18 | 19 | template 20 | std::string Format(const std::string_view Format, ArgsT... Args) 21 | { 22 | int FormatSize = std::snprintf(nullptr, 0, Format.data(), Args...) + 1u; 23 | if( FormatSize <= 0 ) 24 | { 25 | return ""; 26 | } 27 | const std::size_t StringSize = static_cast(FormatSize); 28 | 29 | std::string Result(StringSize - 1, '\0'); 30 | std::snprintf(Result.data(), StringSize, Format.data(), Args...); 31 | return Result; 32 | } 33 | 34 | } // namespace Common -------------------------------------------------------------------------------- /include/Vulkan/ShaderModuleCache.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | namespace Vulkan 10 | { 11 | 12 | // Implements a simple pool of reusable shader module objects 13 | class ShaderModuleCache 14 | { 15 | private: 16 | const Vulkan::Context& VulkanContext; 17 | 18 | std::unordered_map ShaderModuleMap; 19 | 20 | explicit ShaderModuleCache(const Vulkan::Context& VulkanContext); 21 | 22 | public: 23 | ~ShaderModuleCache() = default; 24 | 25 | ShaderModuleCache(ShaderModuleCache&&) = default; 26 | 27 | std::optional GetShaderModule( 28 | std::size_t Hash, std::span ShaderCode 29 | ); 30 | 31 | static std::optional 32 | Create(const Vulkan::Context& VulkanContext); 33 | }; 34 | } // namespace Vulkan -------------------------------------------------------------------------------- /include/VkBlam/Shaders/ShaderEnvironment.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | 9 | #include 10 | 11 | namespace VkBlam 12 | { 13 | class ShaderEnvironment final : public Shader 14 | { 15 | private: 16 | std::unique_ptr DescriptorHeap; 17 | 18 | std::unordered_map 19 | ShaderEnvironmentBindings; 20 | 21 | public: 22 | ShaderEnvironment( 23 | const Vulkan::Context& VulkanContext, 24 | const VkBlam::BitmapHeapT& BitmapHeap, 25 | Vulkan::DescriptorUpdateBatch& DescriptorUpdateBatch 26 | ); 27 | virtual ~ShaderEnvironment(); 28 | 29 | bool RegisterShader( 30 | const Blam::TagIndexEntry& TagEntry, 31 | const Blam::Tag& Shader 32 | ) override; 33 | }; 34 | } // namespace VkBlam -------------------------------------------------------------------------------- /include/VkBlam/World.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | namespace VkBlam 10 | { 11 | 12 | // A simulated representation of an instantiated Halo map 13 | class World 14 | { 15 | private: 16 | const Blam::MapFile& MapFile; 17 | 18 | glm::f32vec3 WorldBoundMax; 19 | glm::f32vec3 WorldBoundMin; 20 | 21 | World(const Blam::MapFile& MapFile); 22 | 23 | public: 24 | ~World(); 25 | 26 | World(World&&) = default; 27 | 28 | const Blam::MapFile& GetMapFile() const 29 | { 30 | return MapFile; 31 | } 32 | 33 | const Blam::MapHeader& GetMapHeader() const 34 | { 35 | return MapFile.MapHeader; 36 | } 37 | 38 | const glm::f32mat2x3 GetWorldBounds() const 39 | { 40 | return glm::f32mat2x3(WorldBoundMin, WorldBoundMax); 41 | } 42 | 43 | static std::optional Create(const Blam::MapFile& MapFile); 44 | }; 45 | } // namespace VkBlam -------------------------------------------------------------------------------- /include/Blam/Types.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | namespace Blam 17 | { 18 | 19 | #pragma pack(push, 1) 20 | union DatumIndex 21 | { 22 | std::int32_t Handle; 23 | struct 24 | { 25 | std::int16_t Index; 26 | std::int16_t Salt; 27 | }; 28 | 29 | static constexpr DatumIndex Invalid() 30 | { 31 | return DatumIndex{-1}; 32 | } 33 | }; 34 | 35 | struct ResourceMapHeader 36 | { 37 | ResourceMapType Type; 38 | std::uint32_t TagPathsOffset; 39 | std::uint32_t ResourceOffset; 40 | std::uint32_t ResourceCount; 41 | }; 42 | #pragma pack(pop) 43 | 44 | static_assert(sizeof(DatumIndex) == sizeof(std::int32_t)); 45 | } // namespace Blam -------------------------------------------------------------------------------- /include/Blam/Types/TagIndex.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | namespace Blam 8 | { 9 | #pragma pack(push, 1) 10 | struct TagIndexHeader 11 | { 12 | std::uint32_t TagIndexVirtualOffset; 13 | std::uint32_t BaseTag; 14 | std::uint32_t ScenarioTagID; 15 | std::uint32_t TagCount; 16 | std::uint32_t VertexCount; 17 | std::uint32_t VertexOffset; 18 | std::uint32_t IndexCount; 19 | std::uint32_t IndexOffset; 20 | std::uint32_t ModelDataSize; 21 | std::uint32_t MagicTags; // 'tags' 22 | }; 23 | 24 | struct TagIndexEntry 25 | { 26 | TagClass ClassPrimary; 27 | TagClass ClassSecondary; 28 | TagClass ClassTertiary; 29 | std::uint32_t TagID; 30 | std::uint32_t TagPathVirtualOffset; 31 | std::uint32_t TagDataVirtualOffset; 32 | std::uint32_t IsExternal; 33 | std::uint32_t Unused; 34 | }; 35 | #pragma pack(pop) 36 | 37 | static_assert(sizeof(TagIndexHeader) == 40); 38 | static_assert(sizeof(TagIndexEntry) == 32); 39 | } // namespace Blam -------------------------------------------------------------------------------- /source/Vulkan/SamplerCache.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | namespace Vulkan 6 | { 7 | 8 | SamplerCache::SamplerCache(const Vulkan::Context& VulkanContext) 9 | : VulkanContext(VulkanContext) 10 | { 11 | } 12 | 13 | const vk::Sampler& 14 | SamplerCache::GetSampler(const vk::SamplerCreateInfo& SamplerInfo) 15 | { 16 | const std::size_t SamplerHash 17 | = std::hash()(SamplerInfo); 18 | 19 | // Cache hit 20 | if( SamplerMap.contains(SamplerHash) ) 21 | { 22 | return SamplerMap.at(SamplerHash).get(); 23 | } 24 | 25 | auto CreateResult 26 | = VulkanContext.LogicalDevice.createSamplerUnique(SamplerInfo); 27 | 28 | // TODO: Handle creation issues. std::optional? 29 | // CreateResult.result == vk::Result::eSuccess; 30 | 31 | return (SamplerMap[SamplerHash] = std::move(CreateResult.value)).get(); 32 | } 33 | 34 | std::optional 35 | SamplerCache::Create(const Vulkan::Context& VulkanContext) 36 | { 37 | SamplerCache NewSamplerCache(VulkanContext); 38 | 39 | return {std::move(NewSamplerCache)}; 40 | } 41 | } // namespace Vulkan -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Wunkolo 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /source/Common/Format.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | namespace Common 8 | { 9 | void HexDump( 10 | const std::span& Data, std::uint8_t Columns, 11 | std::FILE* Stream 12 | ) 13 | { 14 | 15 | for( std::size_t CurOffset = 0; CurOffset < Data.size(); 16 | CurOffset += Columns ) 17 | { 18 | std::printf("0x%08" PRIX64 ":", CurOffset); 19 | for( const auto& Byte : Data.subspan( 20 | CurOffset, 21 | std::min(Data.size() - CurOffset, Columns) 22 | ) ) 23 | { 24 | std::fprintf(Stream, " %02" SCNx8, std::uint8_t(Byte)); 25 | } 26 | std::fprintf(Stream, "\n"); 27 | } 28 | } 29 | 30 | std::string FormatByteCount(std::size_t ByteCount) 31 | { 32 | static const char* SizeUnits[] 33 | = {"Bytes", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}; 34 | std::size_t Index; 35 | std::double_t ByteSize = ByteCount; 36 | for( Index = 0; Index < std::extent_v; Index++ ) 37 | { 38 | if( ByteSize < 1024 ) 39 | break; 40 | ByteSize /= 1024; 41 | } 42 | return std::to_string(ByteSize) + " " + SizeUnits[Index]; 43 | } 44 | } // namespace Common -------------------------------------------------------------------------------- /include/Blam/Util.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "Enums.hpp" 7 | #include "Tags.hpp" 8 | #include "Types.hpp" 9 | 10 | #include "Util/VirtualHeap.hpp" 11 | 12 | namespace Blam 13 | { 14 | 15 | std::string FormatTagClass(TagClass Class); 16 | 17 | std::size_t GetVertexStride(VertexFormat Format); 18 | 19 | // HEK allows a max of 0x2'0000 total surfaces and uses an array of 32-bit 20 | // integers to map each surface's visibility to a single bit 21 | // WordIndex = SurfaceIndex / 32 22 | // BitIndex = SurfaceIndex % 32 23 | using SurfaceOcclusionBitArray = std::span; 24 | 25 | void GenerateVisibleSurfaceIndices( 26 | const VirtualHeap& Heap, 27 | std::span::Cluster::SubCluster> 28 | SubClusters, 29 | const Bounds3D& OverlapTest, SurfaceOcclusionBitArray SurfaceOcclusionArray 30 | ); 31 | 32 | // Enums 33 | const char* ToString(const CacheVersion& Value); 34 | const char* ToString(const ScenarioType& Value); 35 | 36 | std::string ToString(const MapHeader& Value); 37 | std::string ToString(const TagIndexHeader& Value); 38 | std::string ToString(const TagIndexEntry& Value); 39 | } // namespace Blam -------------------------------------------------------------------------------- /source/Blam/Blam.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | namespace Blam 4 | { 5 | 6 | MapFile::MapFile( 7 | std::span MapFileData, 8 | std::span BitmapFileData 9 | ) 10 | : MapFileData(MapFileData), BitmapFileData(BitmapFileData), 11 | MapHeader(*reinterpret_cast(MapFileData.data())), 12 | TagIndexHeader(*reinterpret_cast( 13 | MapFileData.data() + MapHeader.TagIndexOffset 14 | )), 15 | TagHeap{ 16 | (TagIndexHeader.TagIndexVirtualOffset 17 | - std::uint32_t(sizeof(Blam::TagIndexHeader))) 18 | - MapHeader.TagIndexOffset, 19 | MapFileData} 20 | { 21 | } 22 | 23 | std::span MapFile::GetTagIndexArray() const 24 | { 25 | return std::span( 26 | reinterpret_cast( 27 | MapFileData.data() + MapHeader.TagIndexOffset 28 | + sizeof(Blam::TagIndexHeader) 29 | ), 30 | TagIndexHeader.TagCount 31 | ); 32 | } 33 | 34 | const TagIndexEntry* MapFile::GetTagIndexEntry(std::uint16_t TagIndex) const 35 | { 36 | if( TagIndex >= TagIndexHeader.TagCount ) 37 | { 38 | return nullptr; 39 | } 40 | return &GetTagIndexArray()[TagIndex]; 41 | } 42 | 43 | } // namespace Blam 44 | -------------------------------------------------------------------------------- /include/VkBlam/Shader.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | 7 | #include 8 | 9 | namespace VkBlam 10 | { 11 | 12 | // Each shader creates a derived graphics pipeline as well as a 13 | // descriptor set+buffer that flattens out all of the material 14 | // parameters that need to be passed onto the fragment shader 15 | // itself into a uniform buffer 16 | // There might possibly be some runtime overhead needed for the 17 | // animated U/V functions and other animation phases but they might 18 | // just be able to be derived at runtime from the current time 19 | // without having to maintain a per-shader animation-state 20 | class Shader 21 | { 22 | protected: 23 | const Vulkan::Context& VulkanContext; 24 | const BitmapHeapT& BitmapHeap; 25 | 26 | Vulkan::DescriptorUpdateBatch& DescriptorUpdateBatch; 27 | 28 | Shader( 29 | const Vulkan::Context& VulkanContext, const BitmapHeapT& BitmapHeap, 30 | Vulkan::DescriptorUpdateBatch& DescriptorUpdateBatch 31 | ); 32 | 33 | public: 34 | virtual ~Shader() = default; 35 | 36 | virtual bool RegisterShader( 37 | const Blam::TagIndexEntry& TagEntry, 38 | const Blam::Tag& Shader 39 | ) = 0; 40 | }; 41 | } // namespace VkBlam -------------------------------------------------------------------------------- /shaders/Default.vert: -------------------------------------------------------------------------------- 1 | #version 460 2 | 3 | #extension GL_GOOGLE_include_directive : require 4 | 5 | #include "vkBlam.glsl" 6 | 7 | layout( push_constant ) uniform PushConstants { 8 | CameraGlobals Camera; 9 | }; 10 | 11 | // Input vertex data: Standard vertex 12 | layout( location = 0 ) in f32vec3 InPosition; 13 | layout( location = 1 ) in f32vec3 InNormal; 14 | layout( location = 2 ) in f32vec3 InBinormal; 15 | layout( location = 3 ) in f32vec3 InTangent; 16 | layout( location = 4 ) in f32vec2 InUV; 17 | 18 | // Input vertex data: Lightmap-vertex 19 | layout( location = 5 ) in f32vec3 InLightmapNormal; 20 | layout( location = 6 ) in f32vec2 InLightmapUV; 21 | 22 | // Output vertex data 23 | layout( location = 0 ) out f32vec3 OutPosition; 24 | layout( location = 1 ) out f32vec3 OutNormal; 25 | layout( location = 2 ) out f32vec3 OutBinormal; 26 | layout( location = 3 ) out f32vec3 OutTangent; 27 | layout( location = 4 ) out f32vec2 OutUV; 28 | 29 | layout( location = 5 ) out f32vec3 OutLightmapNormal; 30 | layout( location = 6 ) out f32vec2 OutLightmapUV; 31 | 32 | void main() 33 | { 34 | OutPosition = InPosition; 35 | OutNormal = InNormal; 36 | OutBinormal = InBinormal; 37 | OutTangent = InTangent; 38 | OutUV = InUV; 39 | 40 | OutLightmapNormal = InLightmapNormal; 41 | OutLightmapUV = InLightmapUV; 42 | 43 | gl_Position = Camera.ViewProjection * vec4( InPosition.xyz, 1.0 ); 44 | } -------------------------------------------------------------------------------- /include/Blam/TagVisitor.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include 7 | 8 | namespace Blam 9 | { 10 | 11 | struct TagVisitorProc 12 | { 13 | // Do not begin this TagVisitor until previous visitors have ran of this 14 | // type 15 | std::unordered_set DependClasses; 16 | 17 | // Visits tags of this designated primary class 18 | Blam::TagClass VisitClass = Blam::TagClass::None; 19 | 20 | // Ran before all tags within a map are about to be visited 21 | std::function BeginVisits; 22 | 23 | // Allow Tag visits to happen in parallel 24 | bool Parallel = false; 25 | 26 | // Visits a particular tag from a particular map file 27 | std::function< 28 | void(std::span, const Blam::MapFile&)> 29 | VisitTags; 30 | 31 | // Ran after all tags within a map have been visited 32 | std::function EndVisits; 33 | }; 34 | 35 | class TagVisiter 36 | { 37 | virtual ~TagVisiter() = 0; 38 | 39 | virtual std::vector GetTagVisitorProcs() = 0; 40 | }; 41 | 42 | // Dispatch a sequence of TagVisitorProc structures against all the tags within 43 | // a particular map file. Will automatically handle dispatching dependent Tag 44 | // Visitors in order 45 | void DispatchTagVisitors( 46 | std::span Visitors, const Blam::MapFile& Map 47 | ); 48 | 49 | } // namespace Blam -------------------------------------------------------------------------------- /source/Vulkan/ShaderModuleCache.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | namespace Vulkan 6 | { 7 | 8 | ShaderModuleCache::ShaderModuleCache(const Vulkan::Context& VulkanContext) 9 | : VulkanContext(VulkanContext) 10 | { 11 | } 12 | 13 | std::optional ShaderModuleCache::GetShaderModule( 14 | std::size_t Hash, std::span ShaderCode 15 | ) 16 | { 17 | // Cache hit 18 | if( ShaderModuleMap.contains(Hash) ) 19 | { 20 | return {ShaderModuleMap.at(Hash).get()}; 21 | } 22 | 23 | vk::ShaderModuleCreateInfo ShaderModuleInfo = {}; 24 | ShaderModuleInfo.pCode 25 | = reinterpret_cast(ShaderCode.data()); 26 | ShaderModuleInfo.codeSize = ShaderCode.size_bytes(); 27 | 28 | if( auto CreateResult 29 | = VulkanContext.LogicalDevice.createShaderModuleUnique(ShaderModuleInfo 30 | ); 31 | CreateResult.result == vk::Result::eSuccess ) 32 | { 33 | const auto Iterator 34 | = ShaderModuleMap.insert({Hash, std::move(CreateResult.value)}); 35 | 36 | return {Iterator.first->second.get()}; 37 | } 38 | else 39 | { 40 | // Todo: std::expected 41 | return {std::nullopt}; 42 | } 43 | } 44 | 45 | std::optional 46 | ShaderModuleCache::Create(const Vulkan::Context& VulkanContext) 47 | { 48 | ShaderModuleCache NewShaderModuleCache(VulkanContext); 49 | 50 | return {std::move(NewShaderModuleCache)}; 51 | } 52 | } // namespace Vulkan -------------------------------------------------------------------------------- /source/VkBlam/World.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | namespace VkBlam 6 | { 7 | World::World(const Blam::MapFile& MapFile) 8 | : MapFile(MapFile), WorldBoundMax(std::numeric_limits::min()), 9 | WorldBoundMin(std::numeric_limits::max()) 10 | { 11 | } 12 | 13 | World::~World() 14 | { 15 | } 16 | 17 | std::optional World::Create(const Blam::MapFile& MapFile) 18 | { 19 | World NewWorld(MapFile); 20 | 21 | // Get the total world bounds min/max 22 | for( const Blam::Tag::StructureBSP& CurBSPEntry : 23 | NewWorld.MapFile.GetScenarioBSPs() ) 24 | { 25 | const Blam::Tag& ScenarioBSP 26 | = CurBSPEntry.GetSBSP( 27 | CurBSPEntry.GetSBSPHeap(NewWorld.MapFile.GetMapData()) 28 | ); 29 | 30 | NewWorld.WorldBoundMin.x = glm::min( 31 | NewWorld.WorldBoundMin.x, ScenarioBSP.WorldBounds.BoundsX[0] 32 | ); 33 | NewWorld.WorldBoundMin.y = glm::min( 34 | NewWorld.WorldBoundMin.y, ScenarioBSP.WorldBounds.BoundsY[0] 35 | ); 36 | NewWorld.WorldBoundMin.z = glm::min( 37 | NewWorld.WorldBoundMin.z, ScenarioBSP.WorldBounds.BoundsZ[0] 38 | ); 39 | 40 | NewWorld.WorldBoundMax.x = glm::max( 41 | NewWorld.WorldBoundMax.x, ScenarioBSP.WorldBounds.BoundsX[1] 42 | ); 43 | NewWorld.WorldBoundMax.y = glm::max( 44 | NewWorld.WorldBoundMax.y, ScenarioBSP.WorldBounds.BoundsY[1] 45 | ); 46 | NewWorld.WorldBoundMax.z = glm::max( 47 | NewWorld.WorldBoundMax.z, ScenarioBSP.WorldBounds.BoundsZ[1] 48 | ); 49 | } 50 | 51 | return {std::move(NewWorld)}; 52 | } 53 | } // namespace VkBlam -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vkblam [![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://raw.githubusercontent.com/Wunkolo/vkblam/main/LICENSE) ![Github Actions Build Status](https://github.com/Wunkolo/vkblam/actions/workflows/ci.yml/badge.svg?branch=main) 2 | 3 | **vkblam** is a re-implementation of the Halo 1 blam engine's graphics library in vulkan. 4 | 5 | Currently this is highly experimental and only generates static images and not a real-time interactable window. 6 | 7 | Derelict|Prisoner 8 | -|- 9 | ![carousel](media/carousel.png) | ![prisoner](media/prisoner.png) 10 | 11 | ## Building 12 | 13 | vkblam requires the official Vulkan SDK library and tools to be installed: 14 | 15 | ||| 16 | |-|-| 17 | Windows|[LunarG VulkanSDK](https://vulkan.lunarg.com/sdk/home#windows) 18 | Mac|[LunarG VulkanSDK](https://vulkan.lunarg.com/sdk/home#mac) 19 | Arch|[vulkan-devel](https://archlinux.org/groups/x86_64/vulkan-devel/) 20 | Ubuntu|`libvulkan-dev spirv-tools vulkan-tools` 21 | 22 | Once all dependencies are satisfied 23 | [a typical cmake out-of-source build may be done](https://preshing.com/20170511/how-to-build-a-cmake-based-project/#running-cmake-from-the-command-line). 24 | 25 | ``` 26 | git clone --recursive https://github.com/Wunkolo/vkblam 27 | mkdir build 28 | cd build 29 | cmake .. 30 | cmake --build . 31 | ``` 32 | 33 | ## Acknowledgements 34 | 35 | * [Reclaimers](https://c20.reclaimers.net/) 36 | * [Assembly](https://github.com/XboxChaos/Assembly) 37 | * [Sparkedit](https://github.com/HaloMods/SparkEdit) 38 | * [Swordedit](https://github.com/ChadSki/Swordedit) 39 | * [Prometheus](https://github.com/HaloMods/Prometheus) 40 | -------------------------------------------------------------------------------- /include/Vulkan/Memory.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | namespace Vulkan 8 | { 9 | 10 | // Will try to find a memory type that is suitable for the given requirements. 11 | // Returns -1 if no suitable memory type was found. 12 | std::int32_t FindMemoryTypeIndex( 13 | vk::PhysicalDevice PhysicalDevice, std::uint32_t MemoryTypeMask, 14 | vk::MemoryPropertyFlags MemoryProperties, 15 | vk::MemoryPropertyFlags MemoryExcludeProperties 16 | = vk::MemoryPropertyFlagBits::eProtected 17 | ); 18 | 19 | // Given an array of valid Vulkan image-handles or buffer-handles, these 20 | // functions will allocate a single block of device-memory for all of them 21 | // and bind them consecutively. 22 | // 23 | // There may be a case that all the buffers or images cannot be allocated 24 | // to the same device memory due to their required memory-type. 25 | std::tuple CommitImageHeap( 26 | vk::Device Device, vk::PhysicalDevice PhysicalDevice, 27 | const std::span Images, 28 | vk::MemoryPropertyFlags MemoryProperties 29 | = vk::MemoryPropertyFlagBits::eDeviceLocal, 30 | vk::MemoryPropertyFlags MemoryExcludeProperties 31 | = vk::MemoryPropertyFlagBits::eProtected 32 | ); 33 | 34 | std::tuple CommitBufferHeap( 35 | vk::Device Device, vk::PhysicalDevice PhysicalDevice, 36 | const std::span Buffers, 37 | vk::MemoryPropertyFlags MemoryProperties 38 | = vk::MemoryPropertyFlagBits::eDeviceLocal, 39 | vk::MemoryPropertyFlags MemoryExcludeProperties 40 | = vk::MemoryPropertyFlagBits::eProtected 41 | ); 42 | } // namespace Vulkan -------------------------------------------------------------------------------- /include/Vulkan/Debug.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | namespace Vulkan 9 | { 10 | void SetObjectName( 11 | vk::Device Device, vk::ObjectType ObjectType, const void* ObjectHandle, 12 | const char* Format, ... 13 | ); 14 | 15 | template< 16 | typename T, 17 | typename = std::enable_if_t::value == true>, 18 | typename... ArgsT> 19 | inline void SetObjectName( 20 | vk::Device Device, const T ObjectHandle, const char* Format, ArgsT&&... Args 21 | ) 22 | { 23 | SetObjectName( 24 | Device, T::objectType, ObjectHandle, Format, 25 | std::forward(Args)... 26 | ); 27 | } 28 | 29 | void BeginDebugLabel( 30 | vk::CommandBuffer CommandBuffer, const std::array& Color, 31 | const char* Format, ... 32 | ); 33 | 34 | void InsertDebugLabel( 35 | vk::CommandBuffer CommandBuffer, const std::array& Color, 36 | const char* Format, ... 37 | ); 38 | 39 | void EndDebugLabel(vk::CommandBuffer CommandBuffer); 40 | 41 | class DebugLabelScope 42 | { 43 | private: 44 | const vk::CommandBuffer CommandBuffer; 45 | 46 | public: 47 | template 48 | DebugLabelScope( 49 | vk::CommandBuffer TargetCommandBuffer, 50 | const std::array& Color, const char* Format, ArgsT&&... Args 51 | ) 52 | : CommandBuffer(TargetCommandBuffer) 53 | { 54 | BeginDebugLabel( 55 | CommandBuffer, Color, Format, std::forward(Args)... 56 | ); 57 | } 58 | 59 | template 60 | void operator()( 61 | const std::array& Color, const char* Format, ArgsT&&... Args 62 | ) const 63 | { 64 | InsertDebugLabel( 65 | CommandBuffer, Color, Format, std::forward(Args)... 66 | ); 67 | } 68 | 69 | ~DebugLabelScope() 70 | { 71 | EndDebugLabel(CommandBuffer); 72 | } 73 | }; 74 | 75 | } // namespace Vulkan -------------------------------------------------------------------------------- /include/Vulkan/DescriptorHeap.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | namespace Vulkan 9 | { 10 | 11 | // Implements a basic heap of descriptor sets given a layout of particular 12 | // bindings. Create a descriptor set by providing a list of bindings and it will 13 | // automatically create both the pool, layout, and maintail a heap of descriptor 14 | // sets. Descriptor sets will be reused and recycled. Assume that newly 15 | // allocated descriptor sets are in an undefined state. 16 | class DescriptorHeap 17 | { 18 | private: 19 | const Vulkan::Context& VulkanContext; 20 | 21 | vk::UniqueDescriptorPool DescriptorPool; 22 | vk::UniqueDescriptorSetLayout DescriptorSetLayout; 23 | std::vector DescriptorSets; 24 | 25 | std::vector Bindings; 26 | 27 | std::vector AllocationMap; 28 | 29 | explicit DescriptorHeap(const Vulkan::Context& VulkanContext); 30 | 31 | public: 32 | ~DescriptorHeap() = default; 33 | 34 | DescriptorHeap(DescriptorHeap&&) = default; 35 | 36 | const vk::DescriptorPool& GetDescriptorPool() const 37 | { 38 | return DescriptorPool.get(); 39 | }; 40 | 41 | const vk::DescriptorSetLayout& GetDescriptorSetLayout() const 42 | { 43 | return DescriptorSetLayout.get(); 44 | }; 45 | 46 | const std::span GetDescriptorSets() const 47 | { 48 | return DescriptorSets; 49 | }; 50 | 51 | std::span GetBindings() const 52 | { 53 | return Bindings; 54 | }; 55 | 56 | std::optional AllocateDescriptorSet(); 57 | bool FreeDescriptorSet(vk::DescriptorSet Set); 58 | 59 | static std::optional Create( 60 | const Vulkan::Context& VulkanContext, 61 | std::span Bindings, 62 | std::uint16_t DescriptorHeapCount = 1024 63 | ); 64 | }; 65 | 66 | } // namespace Vulkan -------------------------------------------------------------------------------- /include/VkBlam/Renderer.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include 15 | #include 16 | 17 | namespace VkBlam 18 | { 19 | 20 | using namespace Common::Literals; 21 | 22 | struct RendererConfig 23 | { 24 | vk::DeviceSize StreamBufferSize = 128_MiB; 25 | 26 | std::size_t DescriptorWriteMax = 256; 27 | std::size_t DescriptorCopyMax = 256; 28 | }; 29 | 30 | // Encapsulates the top-level global state of the renderer. 31 | class Renderer 32 | { 33 | private: 34 | const Vulkan::Context& VulkanContext; 35 | 36 | vk::UniqueSampler DefaultSampler = {}; 37 | 38 | // Temporary 39 | std::unordered_map 40 | DefaultRenderPasses = {}; 41 | 42 | std::unique_ptr StreamBuffer; 43 | std::unique_ptr SamplerCache; 44 | std::unique_ptr ShaderModuleCache; 45 | std::unique_ptr DescriptorUpdateBatch; 46 | 47 | Renderer(const Vulkan::Context& VulkanContext); 48 | 49 | public: 50 | ~Renderer(); 51 | 52 | Renderer(Renderer&&) = default; 53 | 54 | const Vulkan::Context& GetVulkanContext() const 55 | { 56 | return VulkanContext; 57 | } 58 | 59 | Vulkan::StreamBuffer& GetStreamBuffer() const 60 | { 61 | return *StreamBuffer.get(); 62 | } 63 | 64 | Vulkan::SamplerCache& GetSamplerCache() const 65 | { 66 | return *SamplerCache.get(); 67 | } 68 | 69 | Vulkan::ShaderModuleCache& GetShaderModuleCache() const 70 | { 71 | return *ShaderModuleCache.get(); 72 | } 73 | 74 | Vulkan::DescriptorUpdateBatch& GetDescriptorUpdateBatch() const 75 | { 76 | return *DescriptorUpdateBatch.get(); 77 | } 78 | 79 | const vk::RenderPass& 80 | GetDefaultRenderPass(vk::SampleCountFlagBits SampleCount); 81 | 82 | static std::optional Create( 83 | const Vulkan::Context& VulkanContext, const RendererConfig& Config = {} 84 | ); 85 | }; 86 | } // namespace VkBlam -------------------------------------------------------------------------------- /include/VkBlam/Scene.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | 11 | namespace VkBlam 12 | { 13 | 14 | // All rendering state associated with a world. 15 | class Scene 16 | { 17 | private: 18 | const World& TargetWorld; 19 | Renderer& TargetRenderer; 20 | 21 | Scene(Renderer& TargetRenderer, const World& TargetWorld); 22 | 23 | // Temporary 24 | std::unordered_map 25 | ShaderEnvironmentDescriptors; 26 | std::unique_ptr ShaderEnvironmentDescriptorPool; 27 | 28 | std::unique_ptr DebugDrawDescriptorPool; 29 | 30 | vk::UniquePipeline DebugDrawPipeline = {}; 31 | vk::UniquePipelineLayout DebugDrawPipelineLayout = {}; 32 | 33 | std::unique_ptr UnlitDescriptorPool; 34 | 35 | vk::UniquePipeline UnlitDrawPipeline = {}; 36 | vk::UniquePipelineLayout UnlitDrawPipelineLayout = {}; 37 | 38 | vk::ShaderModule DefaultVertexShaderModule; 39 | vk::ShaderModule DefaultFragmentShaderModule; 40 | vk::ShaderModule UnlitFragmentShaderModule; 41 | 42 | // Contains _both_ the vertex buffers and the index buffer 43 | vk::UniqueDeviceMemory BSPGeometryMemory = {}; 44 | 45 | vk::UniqueBuffer BSPVertexBuffer = {}; 46 | vk::UniqueBuffer BSPLightmapVertexBuffer = {}; 47 | vk::UniqueBuffer BSPIndexBuffer = {}; 48 | 49 | struct LightmapMesh 50 | { 51 | std::uint32_t VertexIndexOffset = 0; 52 | std::uint32_t IndexCount = 0; 53 | std::uint32_t IndexOffset = 0; 54 | 55 | std::span VertexData; 56 | std::span LightmapVertexData; 57 | 58 | std::uint32_t ShaderTag; 59 | 60 | // Some lightmap meshes don't have a lightmap! 61 | std::optional LightmapTag; 62 | std::optional LightmapIndex; 63 | }; 64 | std::vector LightmapMeshs; 65 | 66 | vk::UniqueDeviceMemory BitmapHeapMemory = {}; 67 | BitmapHeapT BitmapHeap = {}; 68 | 69 | std::unique_ptr SceneDescriptorPool; 70 | 71 | vk::DescriptorSet CurSceneDescriptor = {}; 72 | 73 | public: 74 | ~Scene(); 75 | 76 | Scene(Scene&&) = default; 77 | 78 | void Render(const SceneView& View, vk::CommandBuffer CommandBuffer); 79 | 80 | static std::optional 81 | Create(Renderer& TargetRenderer, const World& TargetWorld); 82 | }; 83 | } // namespace VkBlam -------------------------------------------------------------------------------- /include/VkBlam/VkBlam.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #define GLM_FORCE_RADIANS 14 | // #define GLM_FORCE_DEPTH_ZERO_TO_ONE 15 | // #define GLM_FORCE_LEFT_HANDED 16 | #define GLM_ENABLE_EXPERIMENTAL 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | namespace VkBlam 23 | { 24 | 25 | // Remove me 26 | constexpr vk::SampleCountFlagBits RenderSamples = vk::SampleCountFlagBits::e4; 27 | 28 | // Temporary structure so that the image heap can be passed around 29 | struct BitmapHeapT 30 | { 31 | struct Bitmap 32 | { 33 | vk::UniqueImage Image; 34 | vk::UniqueImageView View; 35 | }; 36 | std::unordered_map> Bitmaps; 37 | 38 | // From the globals tag 39 | std::uint32_t Default2D; 40 | std::uint32_t Default3D; 41 | std::uint32_t DefaultCube; 42 | 43 | // Remove me 44 | std::unordered_map< 45 | std::uint32_t, std::map> 46 | Sets; 47 | }; 48 | 49 | std::optional> OpenResource(const std::string& Path); 50 | 51 | std::vector 52 | GetVertexInputBindings(std::span Formats); 53 | std::vector 54 | GetVertexInputAttributes(std::span Formats); 55 | 56 | inline std::tuple< 57 | std::vector, 58 | std::vector> 59 | GetVertexInputDescriptions(std::span Formats) 60 | { 61 | return std::make_tuple( 62 | GetVertexInputBindings(Formats), GetVertexInputAttributes(Formats) 63 | ); 64 | } 65 | 66 | // Abstracts the way that halo utilizes its samplers 67 | vk::SamplerCreateInfo Sampler2D(bool Filtered = true, bool Clamp = false); 68 | vk::SamplerCreateInfo SamplerCube(); 69 | 70 | //// Must match vkBlam.glsl structures 71 | 72 | struct CameraGlobals 73 | { 74 | alignas(16) glm::f32mat4x4 View; 75 | alignas(16) glm::f32mat4x4 Projection; 76 | alignas(16) glm::f32mat4x4 ViewProjection; 77 | }; 78 | 79 | struct SimulationGlobals 80 | { 81 | alignas(16) glm::float32_t Time; 82 | }; 83 | struct PassGlobals 84 | { 85 | alignas(16) glm::f32vec4 ScreenSize; // {width, height, 1/width, 1/height} 86 | }; 87 | 88 | } // namespace VkBlam -------------------------------------------------------------------------------- /include/Common/Endian.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | namespace Common 4 | { 5 | // x86 6 | #if defined(__i386__) || defined(__x86_64__) 7 | #if defined(_MSC_VER) 8 | 9 | #include 10 | 11 | inline std::uint64_t Swap64(std::uint64_t x) 12 | { 13 | return _byteswap_uint64(x); 14 | } 15 | 16 | inline std::uint32_t Swap32(std::uint32_t x) 17 | { 18 | return _byteswap_ulong(x); 19 | } 20 | 21 | inline std::uint16_t Swap16(std::uint16_t x) 22 | { 23 | return _byteswap_ushort(x); 24 | } 25 | 26 | #elif defined(__GNUC__) || defined(__clang__) 27 | 28 | #include 29 | 30 | inline std::uint64_t Swap64(std::uint64_t x) 31 | { 32 | return __builtin_bswap64(x); 33 | } 34 | 35 | inline std::uint32_t Swap32(std::uint32_t x) 36 | { 37 | return __builtin_bswap32(x); 38 | } 39 | 40 | inline std::uint16_t Swap16(std::uint16_t x) 41 | { 42 | return __builtin_bswap16(x); 43 | } 44 | 45 | #endif 46 | 47 | // ARM 48 | #elif defined(__ARM_NEON) 49 | 50 | #include 51 | 52 | #if defined(_MSC_VER) 53 | 54 | inline std::uint64_t Swap64(std::uint64_t x) 55 | { 56 | return _byteswap_uint64(x); 57 | } 58 | 59 | inline std::uint32_t Swap32(std::uint32_t x) 60 | { 61 | return _byteswap_ulong(x); 62 | } 63 | 64 | inline std::uint16_t Swap16(std::uint16_t x) 65 | { 66 | return _byteswap_ushort(x); 67 | } 68 | 69 | #elif defined(__GNUC__) || defined(__clang__) 70 | 71 | inline std::uint64_t Swap64(std::uint64_t x) 72 | { 73 | return __builtin_bswap64(x); 74 | } 75 | 76 | inline std::uint32_t Swap32(std::uint32_t x) 77 | { 78 | return __builtin_bswap32(x); 79 | } 80 | 81 | inline std::uint16_t Swap16(std::uint16_t x) 82 | { 83 | return __builtin_bswap16(x); 84 | } 85 | 86 | #endif 87 | 88 | // Pure 89 | #else 90 | 91 | inline std::uint64_t Swap64(std::uint64_t x) 92 | { 93 | return ( 94 | ((x & 0x00000000000000FF) << 56) | ((x & 0x000000000000FF00) << 40) 95 | | ((x & 0x0000000000FF0000) << 24) | ((x & 0x00000000FF000000) << 8) 96 | | ((x & 0x000000FF00000000) >> 8) | ((x & 0x0000FF0000000000) >> 24) 97 | | ((x & 0x00FF000000000000) >> 40) | ((x & 0xFF00000000000000) >> 56) 98 | ); 99 | } 100 | 101 | inline std::uint32_t Swap32(std::uint32_t x) 102 | { 103 | return ( 104 | ((x & 0x000000FF) << 24) | ((x & 0x0000FF00) << 8) 105 | | ((x & 0x00FF0000) >> 8) | ((x & 0xFF000000) >> 24) 106 | ); 107 | } 108 | 109 | inline std::uint16_t Swap16(std::uint16_t x) 110 | { 111 | return (((x & 0x00FF) << 8) | ((x & 0xFF00) >> 8)); 112 | } 113 | 114 | #endif 115 | } // namespace Common -------------------------------------------------------------------------------- /include/Vulkan/DescriptorUpdateBatch.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | namespace Vulkan 10 | { 11 | // Implements a re-usable structure for batching up descriptor writes with a 12 | // finite amount of space for both convenience and to reduce the overall amount 13 | // of API calls to `vkUpdateDescriptorSets` 14 | class DescriptorUpdateBatch 15 | { 16 | private: 17 | const Vulkan::Context& VulkanContext; 18 | 19 | const std::size_t DescriptorWriteMax; 20 | const std::size_t DescriptorCopyMax; 21 | 22 | using DescriptorInfoUnion = std::variant< 23 | vk::DescriptorImageInfo, vk::DescriptorBufferInfo, vk::BufferView>; 24 | 25 | // Todo: Maybe some kind of hash so that these structures can be re-used 26 | // among descriptor writes. 27 | std::unique_ptr DescriptorInfos; 28 | std::unique_ptr DescriptorWrites; 29 | std::unique_ptr DescriptorCopies; 30 | 31 | std::size_t DescriptorWriteEnd = 0; 32 | std::size_t DescriptorCopyEnd = 0; 33 | 34 | DescriptorUpdateBatch( 35 | const Vulkan::Context& VulkanContext, std::size_t DescriptorWriteMax, 36 | std::size_t DescriptorCopyMax 37 | ) 38 | : VulkanContext(VulkanContext), DescriptorWriteMax(DescriptorWriteMax), 39 | DescriptorCopyMax(DescriptorCopyMax) 40 | { 41 | } 42 | 43 | public: 44 | ~DescriptorUpdateBatch() = default; 45 | 46 | DescriptorUpdateBatch(DescriptorUpdateBatch&&) = default; 47 | 48 | void Flush(); 49 | 50 | void AddImage( 51 | vk::DescriptorSet TargetDescriptor, std::uint8_t TargetBinding, 52 | vk::ImageView ImageView, 53 | vk::ImageLayout ImageLayout = vk::ImageLayout::eGeneral 54 | ); 55 | void AddSampler( 56 | vk::DescriptorSet TargetDescriptor, std::uint8_t TargetBinding, 57 | vk::Sampler Sampler 58 | ); 59 | 60 | void AddImageSampler( 61 | vk::DescriptorSet TargetDescriptor, std::uint8_t TargetBinding, 62 | vk::ImageView ImageView, vk::Sampler Sampler, 63 | vk::ImageLayout ImageLayout = vk::ImageLayout::eShaderReadOnlyOptimal 64 | ); 65 | void AddBuffer( 66 | vk::DescriptorSet TargetDescriptor, std::uint8_t TargetBinding, 67 | vk::Buffer Buffer, vk::DeviceSize Offset, 68 | vk::DeviceSize Size = VK_WHOLE_SIZE 69 | ); 70 | 71 | void CopyBinding( 72 | vk::DescriptorSet SourceDescriptor, vk::DescriptorSet TargetDescriptor, 73 | std::uint8_t SourceBinding, std::uint8_t TargetBinding, 74 | std::uint8_t SourceArrayElement = 0, 75 | std::uint8_t TargetArrayElement = 0, std::uint8_t DescriptorCount = 1 76 | ); 77 | 78 | static std::optional Create( 79 | const Vulkan::Context& VulkanContext, 80 | std::size_t DescriptorWriteMax = 256, 81 | std::size_t DescriptorCopyMax = 256 82 | ); 83 | }; 84 | } // namespace Vulkan -------------------------------------------------------------------------------- /.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | Language: Cpp 3 | AccessModifierOffset: -4 4 | AlignAfterOpenBracket: BlockIndent 5 | AlignConsecutiveAssignments: true 6 | AlignConsecutiveDeclarations: true 7 | AlignConsecutiveBitFields: true 8 | AlignEscapedNewlines: Right 9 | AlignOperands: AlignAfterOperator 10 | AlignTrailingComments: true 11 | AllowAllParametersOfDeclarationOnNextLine: true 12 | AllowShortBlocksOnASingleLine: false 13 | AllowShortCaseLabelsOnASingleLine: false 14 | AllowShortFunctionsOnASingleLine: None 15 | AllowShortIfStatementsOnASingleLine: false 16 | AllowShortLoopsOnASingleLine: false 17 | AlwaysBreakAfterReturnType: None 18 | AlwaysBreakBeforeMultilineStrings: true 19 | AlwaysBreakTemplateDeclarations: true 20 | BinPackArguments: true 21 | BinPackParameters: true 22 | BitFieldColonSpacing: Both 23 | BreakBeforeBraces: Custom 24 | BraceWrapping: 25 | AfterCaseLabel: true 26 | AfterClass: true 27 | AfterControlStatement: true 28 | AfterEnum: true 29 | AfterFunction: true 30 | AfterNamespace: true 31 | AfterObjCDeclaration: true 32 | AfterStruct: true 33 | AfterUnion: true 34 | BeforeCatch: true 35 | BeforeElse: true 36 | IndentBraces: false 37 | SplitEmptyFunction: true 38 | SplitEmptyRecord: true 39 | SplitEmptyNamespace: true 40 | BreakBeforeBinaryOperators: All 41 | BreakBeforeInheritanceComma: false 42 | BreakBeforeTernaryOperators: true 43 | BreakConstructorInitializers: BeforeColon 44 | BreakStringLiterals: true 45 | ColumnLimit: 80 46 | CompactNamespaces: false 47 | ConstructorInitializerAllOnOneLineOrOnePerLine: false 48 | ConstructorInitializerIndentWidth: 4 49 | ContinuationIndentWidth: 4 50 | Cpp11BracedListStyle: true 51 | DerivePointerAlignment: false 52 | DisableFormat: false 53 | ExperimentalAutoDetectBinPacking: false 54 | FixNamespaceComments: true 55 | IndentCaseLabels: false 56 | IndentWidth: 4 57 | IndentWrappedFunctionNames: true 58 | KeepEmptyLinesAtTheStartOfBlocks: true 59 | MacroBlockBegin: '' 60 | MacroBlockEnd: '' 61 | MaxEmptyLinesToKeep: 1 62 | NamespaceIndentation: None 63 | PenaltyBreakAssignment: 4 64 | PenaltyBreakBeforeFirstCallParameter: 19 65 | PenaltyBreakComment: 300 66 | PenaltyBreakFirstLessLess: 120 67 | PenaltyBreakString: 1000 68 | PenaltyExcessCharacter: 1000000 69 | PenaltyReturnTypeOnItsOwnLine: 60 70 | PointerAlignment: Left 71 | ReflowComments: true 72 | SortIncludes: true 73 | SortUsingDeclarations: true 74 | SpaceAfterCStyleCast: false 75 | SpaceAfterTemplateKeyword: false 76 | SpaceBeforeAssignmentOperators: true 77 | SpaceBeforeParens: Never 78 | SpaceInEmptyParentheses: false 79 | SpacesBeforeTrailingComments: 1 80 | SpacesInAngles: false 81 | SpacesInContainerLiterals: true 82 | SpacesInCStyleCastParentheses: false 83 | SpacesInParentheses: false 84 | SpacesInConditionalStatement: true 85 | SpacesInSquareBrackets: false 86 | Standard: c++20 87 | TabWidth: 4 88 | UseTab: ForContinuationAndIndentation 89 | ... 90 | -------------------------------------------------------------------------------- /include/Vulkan/StreamBuffer.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | namespace Vulkan 11 | { 12 | 13 | // A basic stream buffer implementation to handle uploading and download to 14 | // buffers and images This implementation does not handle image layout 15 | // transitions or barriers of any sort and must be handled externally 16 | class StreamBuffer 17 | { 18 | private: 19 | const Vulkan::Context& VulkanContext; 20 | const vk::DeviceSize BufferSize; 21 | 22 | // This is a timeline semaphore with a value that increases with each flush. 23 | vk::UniqueSemaphore FlushSemaphore; 24 | std::uint64_t FlushTick; 25 | 26 | vk::UniqueBuffer RingBuffer; 27 | vk::UniqueDeviceMemory RingBufferMemory; 28 | 29 | // The host-mapped vulkan memory for the ring buffer 30 | std::span RingMemoryMapped; 31 | // Current write-point for the ring buffer. 32 | std::size_t RingOffset; 33 | 34 | vk::UniqueCommandPool CommandPool; 35 | std::vector CommandBuffers; 36 | std::vector CommandBufferTimeStamps; 37 | 38 | std::unordered_map> BufferCopies; 39 | std::unordered_map> ImageCopies; 40 | 41 | std::vector ImagePreBarrier; 42 | std::vector ImagePostBarrier; 43 | 44 | public: 45 | StreamBuffer( 46 | const Vulkan::Context& VulkanContext, vk::DeviceSize BufferSize 47 | ); 48 | 49 | ~StreamBuffer(); 50 | 51 | // Queue an Upload of the passed in span of bytes to the target buffer at 52 | // the designated offset 53 | // Upon success, returns the timeline semaphore value to listen 54 | // for to know when this transfer is complete 55 | std::uint64_t QueueBufferUpload( 56 | const std::span Data, vk::Buffer Buffer, 57 | vk::DeviceSize Offset = 0 58 | ); 59 | 60 | // Queue an Upload of the passed in span of bytes to the target image at the 61 | // specified offset, extent, and subresource 62 | // Upon success, returns the timeline semaphore value to listen 63 | // for to know when this transfer is complete 64 | std::uint64_t QueueImageUpload( 65 | const std::span Data, vk::Image Image, 66 | vk::Offset3D Offset, vk::Extent3D Extent, 67 | vk::ImageSubresourceLayers SubresourceLayers 68 | = vk::ImageSubresourceLayers(vk::ImageAspectFlagBits::eColor, 0, 0, 1), 69 | vk::ImageLayout DstLayout = vk::ImageLayout::eTransferDstOptimal 70 | ); 71 | 72 | // Flush all pending uploads and downloads to the specified queue 73 | // and returns the semaphore value to wait for to know when completion is 74 | // done. 75 | std::uint64_t Flush(); 76 | 77 | // The timeline-semaphore used to synchronize uploads 78 | const vk::Semaphore& GetSemaphore() const; 79 | }; 80 | 81 | } // namespace Vulkan -------------------------------------------------------------------------------- /include/Blam/Blam.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include "Enums.hpp" 11 | #include "Tags.hpp" 12 | #include "Types.hpp" 13 | 14 | #include "Util.hpp" 15 | 16 | namespace Blam 17 | { 18 | 19 | // Encapsulates a halo map-cache file 20 | class MapFile 21 | { 22 | private: 23 | const std::span MapFileData; 24 | const std::span BitmapFileData; 25 | 26 | public: 27 | MapFile( 28 | std::span MapFileData, 29 | std::span BitmapFileData 30 | ); 31 | 32 | const Blam::MapHeader& MapHeader; 33 | const Blam::TagIndexHeader& TagIndexHeader; 34 | 35 | const VirtualHeap TagHeap; 36 | 37 | const std::span& GetMapData() const 38 | { 39 | return MapFileData; 40 | } 41 | 42 | const std::span& GetBitmapData() const 43 | { 44 | return BitmapFileData; 45 | } 46 | 47 | std::span GetTagIndexArray() const; 48 | 49 | const TagIndexEntry* GetTagIndexEntry(std::uint16_t TagIndex) const; 50 | 51 | template 52 | void VisitTagClass( 53 | const std::function&)>& 54 | Func 55 | ) const 56 | { 57 | for( const auto& CurTagEntry : GetTagIndexArray() ) 58 | { 59 | if( CurTagEntry.ClassPrimary == TagClassT ) 60 | { 61 | const auto& CurTag = TagHeap.Read>( 62 | CurTagEntry.TagDataVirtualOffset 63 | ); 64 | Func(CurTagEntry, CurTag); 65 | } 66 | } 67 | } 68 | 69 | template 70 | const Tag* GetTag(std::uint32_t TagID) const 71 | { 72 | const TagIndexEntry* TagIndexEntryPtr 73 | = GetTagIndexEntry(std::uint16_t(TagID)); 74 | if( !TagIndexEntryPtr ) 75 | { 76 | return nullptr; 77 | } 78 | 79 | if( TagIndexEntryPtr->TagID != TagID ) 80 | { 81 | // Salts don't match 82 | return nullptr; 83 | } 84 | 85 | return &TagHeap.Read>( 86 | TagIndexEntryPtr->TagDataVirtualOffset 87 | ); 88 | } 89 | 90 | std::string_view GetTagName(std::uint32_t TagID) const 91 | { 92 | const TagIndexEntry* TagIndexEntryPtr = GetTagIndexEntry(TagID); 93 | if( !TagIndexEntryPtr ) 94 | { 95 | return {}; 96 | } 97 | 98 | return &TagHeap.Read(TagIndexEntryPtr->TagPathVirtualOffset); 99 | } 100 | 101 | // Helpers 102 | const Tag* GetScenarioTag() const 103 | { 104 | return GetTag(TagIndexHeader.BaseTag); 105 | } 106 | 107 | std::span::StructureBSP> 108 | GetScenarioBSPs() const 109 | { 110 | if( const auto* ScenarioTag = GetScenarioTag(); ScenarioTag ) 111 | { 112 | return TagHeap.GetBlock(ScenarioTag->StructureBSPs); 113 | } 114 | return {}; 115 | } 116 | }; 117 | 118 | } // namespace Blam 119 | -------------------------------------------------------------------------------- /source/Blam/TagVisitor.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | namespace Blam 11 | { 12 | 13 | void DispatchTagVisitors( 14 | std::span Visitors, const Blam::MapFile& Map 15 | ) 16 | { 17 | // Set of all tag-classes that are ever going to be visited 18 | std::unordered_set VisitClasses; 19 | 20 | std::unordered_map> Tags; 21 | 22 | // Created a new list of each visitor such that they are sorted by their 23 | // dependencies 24 | std::vector VisitorDAG; 25 | VisitorDAG.reserve(Visitors.size()); 26 | 27 | for( const auto& CurVisitor : Visitors ) 28 | { 29 | VisitClasses.insert(CurVisitor.VisitClass); 30 | 31 | VisitorDAG.push_back(&CurVisitor); 32 | } 33 | 34 | std::stable_sort( 35 | VisitorDAG.begin(), VisitorDAG.end(), 36 | [](const TagVisitorProc* A, const TagVisitorProc* B) -> bool { 37 | // A should go before B if B depends on A, otherwise just keep the 38 | // order as it is 39 | return B->DependClasses.contains(A->VisitClass); 40 | } 41 | ); 42 | 43 | // Collect all the tags to be visited 44 | for( const auto& CurTagEntry : Map.GetTagIndexArray() ) 45 | { 46 | if( VisitClasses.contains(CurTagEntry.ClassPrimary) ) 47 | { 48 | Tags[CurTagEntry.ClassPrimary].push_back(CurTagEntry); 49 | } 50 | } 51 | 52 | const std::size_t ThreadCount = std::thread::hardware_concurrency(); 53 | 54 | std::vector ThreadPool(ThreadCount); 55 | 56 | for( const auto& CurVisitor : VisitorDAG ) 57 | { 58 | if( CurVisitor->BeginVisits ) 59 | { 60 | CurVisitor->BeginVisits(Map); 61 | } 62 | 63 | if( CurVisitor->VisitTags ) 64 | { 65 | auto TagList = std::span(Tags.at(CurVisitor->VisitClass)); 66 | 67 | const std::size_t TagsPerThread 68 | = std::max(TagList.size() / ThreadCount, 1); 69 | 70 | // If there are less tags than threads, then emit a smaller amount 71 | // of threads 72 | const std::size_t CurVisitorThreads 73 | = std::min(TagsPerThread, TagList.size()); 74 | 75 | if( CurVisitor->Parallel ) 76 | { 77 | for( auto& Thread : 78 | std::span(ThreadPool).first(CurVisitorThreads) ) 79 | { 80 | const std::size_t TagsThisThread 81 | = std::min(TagsPerThread, TagList.size()); 82 | 83 | if( TagsThisThread == 0 ) 84 | continue; 85 | 86 | std::mutex Barrier; 87 | 88 | auto ThreadProc 89 | = [&Barrier]( 90 | const Blam::TagVisitorProc* Visitor, 91 | const Blam::MapFile& Map, 92 | std::span Tags 93 | ) -> void { 94 | std::scoped_lock Lock{Barrier}; 95 | Visitor->VisitTags(Tags, Map); 96 | }; 97 | Thread = std::thread( 98 | ThreadProc, CurVisitor, std::ref(Map), 99 | TagList.first(TagsPerThread) 100 | ); 101 | TagList = TagList.subspan(TagsPerThread); 102 | } 103 | 104 | for( auto& Thread : 105 | std::span(ThreadPool).first(CurVisitorThreads) ) 106 | { 107 | Thread.join(); 108 | } 109 | } 110 | else 111 | { 112 | CurVisitor->VisitTags(TagList, Map); 113 | } 114 | } 115 | 116 | if( CurVisitor->EndVisits ) 117 | { 118 | CurVisitor->EndVisits(Map); 119 | } 120 | } 121 | } 122 | 123 | } // namespace Blam 124 | -------------------------------------------------------------------------------- /source/Vulkan/Debug.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | namespace Vulkan 10 | { 11 | void SetObjectName( 12 | vk::Device Device, vk::ObjectType ObjectType, const void* ObjectHandle, 13 | const char* Format, ... 14 | ) 15 | { 16 | va_list Args; 17 | va_start(Args, Format); 18 | const auto NameLength = std::vsnprintf(nullptr, 0, Format, Args); 19 | va_end(Args); 20 | if( NameLength < 0 ) 21 | { 22 | // Invalid vsnprintf 23 | return; 24 | } 25 | 26 | std::unique_ptr ObjectName 27 | = std::make_unique(std::size_t(NameLength) + 1u); 28 | 29 | // Write formatted object name 30 | va_start(Args, Format); 31 | std::vsnprintf( 32 | ObjectName.get(), std::size_t(NameLength) + 1u, Format, Args 33 | ); 34 | va_end(Args); 35 | 36 | vk::DebugUtilsObjectNameInfoEXT NameInfo = {}; 37 | NameInfo.objectType = ObjectType; 38 | NameInfo.objectHandle = reinterpret_cast(ObjectHandle); 39 | NameInfo.pObjectName = ObjectName.get(); 40 | 41 | if( Device.setDebugUtilsObjectNameEXT(NameInfo) != vk::Result::eSuccess ) 42 | { 43 | // Failed to set object name 44 | } 45 | } 46 | 47 | void BeginDebugLabel( 48 | vk::CommandBuffer CommandBuffer, const std::array& Color, 49 | const char* Format, ... 50 | ) 51 | { 52 | va_list Args; 53 | va_start(Args, Format); 54 | const auto NameLength = std::vsnprintf(nullptr, 0, Format, Args); 55 | va_end(Args); 56 | if( NameLength < 0 ) 57 | { 58 | // Invalid vsnprintf 59 | return; 60 | } 61 | 62 | std::unique_ptr ObjectName 63 | = std::make_unique(std::size_t(NameLength) + 1u); 64 | 65 | // Write formatted object name 66 | va_start(Args, Format); 67 | std::vsnprintf( 68 | ObjectName.get(), std::size_t(NameLength) + 1u, Format, Args 69 | ); 70 | va_end(Args); 71 | 72 | vk::DebugUtilsLabelEXT LabelInfo = {}; 73 | LabelInfo.pLabelName = ObjectName.get(); 74 | LabelInfo.color[0] = Color[0]; 75 | LabelInfo.color[1] = Color[1]; 76 | LabelInfo.color[2] = Color[2]; 77 | LabelInfo.color[3] = Color[3]; 78 | 79 | CommandBuffer.beginDebugUtilsLabelEXT(LabelInfo); 80 | } 81 | 82 | void InsertDebugLabel( 83 | vk::CommandBuffer CommandBuffer, const std::array& Color, 84 | const char* Format, ... 85 | ) 86 | { 87 | va_list Args; 88 | va_start(Args, Format); 89 | const auto NameLength = std::vsnprintf(nullptr, 0, Format, Args); 90 | va_end(Args); 91 | if( NameLength < 0 ) 92 | { 93 | // Invalid vsnprintf 94 | return; 95 | } 96 | 97 | std::unique_ptr ObjectName 98 | = std::make_unique(std::size_t(NameLength) + 1u); 99 | 100 | // Write formatted object name 101 | va_start(Args, Format); 102 | std::vsnprintf( 103 | ObjectName.get(), std::size_t(NameLength) + 1u, Format, Args 104 | ); 105 | va_end(Args); 106 | 107 | vk::DebugUtilsLabelEXT LabelInfo = {}; 108 | LabelInfo.pLabelName = ObjectName.get(); 109 | LabelInfo.color[0] = Color[0]; 110 | LabelInfo.color[1] = Color[1]; 111 | LabelInfo.color[2] = Color[2]; 112 | LabelInfo.color[3] = Color[3]; 113 | 114 | CommandBuffer.insertDebugUtilsLabelEXT(LabelInfo); 115 | } 116 | 117 | void EndDebugLabel(vk::CommandBuffer CommandBuffer) 118 | { 119 | CommandBuffer.endDebugUtilsLabelEXT(); 120 | } 121 | 122 | } // namespace Vulkan -------------------------------------------------------------------------------- /source/VkBlam/Format.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "FormatTraits.hpp" 4 | 5 | namespace VkBlam 6 | { 7 | 8 | vk::ImageType BlamToVk(Blam::BitmapEntryType Value) 9 | { 10 | switch( Value ) 11 | { 12 | case Blam::BitmapEntryType::Texture2D: 13 | return vk::ImageType::e2D; 14 | case Blam::BitmapEntryType::Texture3D: 15 | return vk::ImageType::e3D; 16 | case Blam::BitmapEntryType::CubeMap: 17 | return vk::ImageType::e2D; 18 | case Blam::BitmapEntryType::White: 19 | return vk::ImageType::e2D; 20 | } 21 | return vk::ImageType::e2D; 22 | } 23 | 24 | vk::Format BlamToVk(Blam::BitmapEntryFormat Value) 25 | { 26 | switch( Value ) 27 | { 28 | case Blam::BitmapEntryFormat::A8: 29 | return FormatTrait::Format; 30 | case Blam::BitmapEntryFormat::Y8: 31 | return FormatTrait::Format; 32 | case Blam::BitmapEntryFormat::AY8: 33 | return FormatTrait::Format; 34 | case Blam::BitmapEntryFormat::A8Y8: 35 | return FormatTrait::Format; 36 | case Blam::BitmapEntryFormat::R5G6B5: 37 | return FormatTrait::Format; 38 | case Blam::BitmapEntryFormat::A1R5G5B5: 39 | return FormatTrait::Format; 40 | case Blam::BitmapEntryFormat::A4R4G4B4: 41 | return FormatTrait::Format; 42 | case Blam::BitmapEntryFormat::X8R8G8B8: 43 | return FormatTrait::Format; 44 | case Blam::BitmapEntryFormat::A8R8G8B8: 45 | return FormatTrait::Format; 46 | case Blam::BitmapEntryFormat::DXT1: 47 | return FormatTrait::Format; 48 | case Blam::BitmapEntryFormat::DXT2AND3: 49 | return FormatTrait::Format; 50 | case Blam::BitmapEntryFormat::DXT4AND5: 51 | return FormatTrait::Format; 52 | case Blam::BitmapEntryFormat::P8: 53 | return FormatTrait::Format; 54 | } 55 | return vk::Format::eUndefined; 56 | } 57 | 58 | vk::ComponentMapping GetFormatSwizzle(Blam::BitmapEntryFormat Value) 59 | { 60 | switch( Value ) 61 | { 62 | case Blam::BitmapEntryFormat::A8: 63 | return FormatTrait::Swizzle; 64 | case Blam::BitmapEntryFormat::Y8: 65 | return FormatTrait::Swizzle; 66 | case Blam::BitmapEntryFormat::AY8: 67 | return FormatTrait::Swizzle; 68 | case Blam::BitmapEntryFormat::A8Y8: 69 | return FormatTrait::Swizzle; 70 | case Blam::BitmapEntryFormat::R5G6B5: 71 | return FormatTrait::Swizzle; 72 | case Blam::BitmapEntryFormat::A1R5G5B5: 73 | return FormatTrait::Swizzle; 74 | case Blam::BitmapEntryFormat::A4R4G4B4: 75 | return FormatTrait::Swizzle; 76 | case Blam::BitmapEntryFormat::X8R8G8B8: 77 | return FormatTrait::Swizzle; 78 | case Blam::BitmapEntryFormat::A8R8G8B8: 79 | return FormatTrait::Swizzle; 80 | case Blam::BitmapEntryFormat::DXT1: 81 | return FormatTrait::Swizzle; 82 | case Blam::BitmapEntryFormat::DXT2AND3: 83 | return FormatTrait::Swizzle; 84 | case Blam::BitmapEntryFormat::DXT4AND5: 85 | return FormatTrait::Swizzle; 86 | case Blam::BitmapEntryFormat::P8: 87 | return FormatTrait::Swizzle; 88 | } 89 | return vk::ComponentMapping(); 90 | } 91 | } // namespace VkBlam -------------------------------------------------------------------------------- /source/VkBlam/FormatTraits.hpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | template 5 | struct FormatTrait 6 | { 7 | }; 8 | 9 | template<> 10 | struct FormatTrait 11 | { 12 | static constexpr vk::Format Format = vk::Format::eR8Unorm; 13 | static constexpr vk::ComponentMapping Swizzle = vk::ComponentMapping( 14 | vk::ComponentSwizzle::eIdentity, vk::ComponentSwizzle::eIdentity, 15 | vk::ComponentSwizzle::eIdentity, vk::ComponentSwizzle::eR 16 | ); 17 | }; 18 | 19 | template<> 20 | struct FormatTrait 21 | { 22 | static constexpr vk::Format Format = vk::Format::eR8Unorm; 23 | static constexpr vk::ComponentMapping Swizzle = vk::ComponentMapping(); 24 | }; 25 | 26 | template<> 27 | struct FormatTrait 28 | { 29 | static constexpr vk::Format Format = vk::Format::eR8Unorm; 30 | static constexpr vk::ComponentMapping Swizzle = vk::ComponentMapping(); 31 | }; 32 | 33 | template<> 34 | struct FormatTrait 35 | { 36 | static constexpr vk::Format Format = vk::Format::eR8G8Unorm; 37 | static constexpr vk::ComponentMapping Swizzle = vk::ComponentMapping(); 38 | }; 39 | 40 | template<> 41 | struct FormatTrait 42 | { 43 | static constexpr vk::Format Format = vk::Format::eR5G6B5UnormPack16; 44 | static constexpr vk::ComponentMapping Swizzle = vk::ComponentMapping(); 45 | }; 46 | 47 | template<> 48 | struct FormatTrait 49 | { 50 | static constexpr vk::Format Format = vk::Format::eA1R5G5B5UnormPack16; 51 | static constexpr vk::ComponentMapping Swizzle = vk::ComponentMapping(); 52 | }; 53 | 54 | template<> 55 | struct FormatTrait 56 | { 57 | static constexpr vk::Format Format = vk::Format::eR4G4B4A4UnormPack16; 58 | // ARGB <-> RGBA 59 | static constexpr vk::ComponentMapping Swizzle = vk::ComponentMapping( 60 | vk::ComponentSwizzle::eG, vk::ComponentSwizzle::eB, 61 | vk::ComponentSwizzle::eA, vk::ComponentSwizzle::eR 62 | ); 63 | ; 64 | }; 65 | 66 | template<> 67 | struct FormatTrait 68 | { 69 | static constexpr vk::Format Format = vk::Format::eA8B8G8R8UnormPack32; 70 | static constexpr vk::ComponentMapping Swizzle = vk::ComponentMapping(); 71 | }; 72 | 73 | template<> 74 | struct FormatTrait 75 | { 76 | static constexpr vk::Format Format = vk::Format::eA8B8G8R8UnormPack32; 77 | static constexpr vk::ComponentMapping Swizzle = vk::ComponentMapping(); 78 | }; 79 | 80 | template<> 81 | struct FormatTrait 82 | { 83 | static constexpr vk::Format Format = vk::Format::eBc1RgbSrgbBlock; 84 | static constexpr vk::ComponentMapping Swizzle = vk::ComponentMapping(); 85 | }; 86 | 87 | template<> 88 | struct FormatTrait 89 | { 90 | static constexpr vk::Format Format = vk::Format::eBc2SrgbBlock; 91 | static constexpr vk::ComponentMapping Swizzle = vk::ComponentMapping(); 92 | }; 93 | 94 | template<> 95 | struct FormatTrait 96 | { 97 | static constexpr vk::Format Format = vk::Format::eBc3SrgbBlock; 98 | static constexpr vk::ComponentMapping Swizzle = vk::ComponentMapping(); 99 | }; 100 | 101 | template<> 102 | struct FormatTrait 103 | { 104 | static constexpr vk::Format Format = vk::Format::eR8Unorm; 105 | static constexpr vk::ComponentMapping Swizzle = vk::ComponentMapping(); 106 | }; -------------------------------------------------------------------------------- /source/dump-bsp.cpp: -------------------------------------------------------------------------------- 1 | #include "Blam/Util.hpp" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include 12 | 13 | #include 14 | 15 | #include 16 | 17 | int main(int argc, char* argv[]) 18 | { 19 | if( argc < 2 ) 20 | { 21 | // Not enough arguments 22 | return EXIT_FAILURE; 23 | } 24 | auto MapFile = mio::mmap_source(argv[1]); 25 | 26 | Blam::MapFile CurMap( 27 | std::span( 28 | reinterpret_cast(MapFile.data()), MapFile.size() 29 | ), 30 | {} 31 | ); 32 | 33 | const auto MapData = CurMap.GetMapData(); 34 | 35 | if( const auto BaseTagPtr 36 | = CurMap.GetTagIndexEntry(CurMap.TagIndexHeader.BaseTag); 37 | BaseTagPtr ) 38 | { 39 | const auto& CurTag = *BaseTagPtr; 40 | const std::string_view TagName 41 | = CurMap.GetTagName(CurMap.TagIndexHeader.BaseTag); 42 | 43 | if( const auto ScenarioPtr = CurMap.GetTag( 44 | CurMap.TagIndexHeader.BaseTag 45 | ); 46 | ScenarioPtr ) 47 | { 48 | const Blam::Tag& Scenario = *ScenarioPtr; 49 | 50 | std::uint16_t IndexStart = 1; 51 | 52 | for( const Blam::Tag::StructureBSP& 53 | CurSBSP : CurMap.TagHeap.GetBlock(Scenario.StructureBSPs) ) 54 | { 55 | const Blam::VirtualHeap SBSPHeap = CurSBSP.GetSBSPHeap(MapData); 56 | 57 | const char* BSPName 58 | = &CurMap.TagHeap.Read(CurSBSP.BSP.PathVirtualOffset); 59 | 60 | std::printf( 61 | "g %s %s\n", CurSBSP.BSP.PathVirtualOffset ? BSPName : "", 62 | TagName.data() 63 | ); 64 | 65 | const Blam::Tag& 66 | ScenarioBSP 67 | = CurSBSP.GetSBSP(SBSPHeap); 68 | 69 | const auto Surfaces = SBSPHeap.GetBlock(ScenarioBSP.Surfaces); 70 | 71 | // Lightmap 72 | for( const auto& CurLightmap : 73 | SBSPHeap.GetBlock(ScenarioBSP.Lightmaps) ) 74 | { 75 | for( const auto& CurMaterial : 76 | SBSPHeap.GetBlock(CurLightmap.Materials) ) 77 | { 78 | auto Test = CurMaterial; 79 | for( const auto& CurVert : 80 | CurMaterial.GetVertices(SBSPHeap) ) 81 | { 82 | std::printf( 83 | "v %f %f %f\n" 84 | "vn %f %f %f\n" 85 | "vt %f %f\n", 86 | CurVert.Position[0], CurVert.Position[1], 87 | CurVert.Position[2], CurVert.Normal[0], 88 | CurVert.Normal[1], CurVert.Normal[2], 89 | CurVert.UV[0], CurVert.UV[1] 90 | ); 91 | } 92 | } 93 | } 94 | 95 | for( const auto& CurLightmap : 96 | SBSPHeap.GetBlock(ScenarioBSP.Lightmaps) ) 97 | { 98 | for( const auto& CurMaterial : 99 | SBSPHeap.GetBlock(CurLightmap.Materials) ) 100 | { 101 | const auto CurSurfaces = Surfaces.subspan( 102 | CurMaterial.SurfacesIndexStart, 103 | CurMaterial.SurfacesCount 104 | ); 105 | for( const auto& CurSurface : CurSurfaces ) 106 | { 107 | std::printf( 108 | "f %d/%d/%d %d/%d/%d %d/%d/%d\n", 109 | IndexStart + CurSurface[0], 110 | IndexStart + CurSurface[0], 111 | IndexStart + CurSurface[0], 112 | IndexStart + CurSurface[1], 113 | IndexStart + CurSurface[1], 114 | IndexStart + CurSurface[1], 115 | IndexStart + CurSurface[2], 116 | IndexStart + CurSurface[2], 117 | IndexStart + CurSurface[2] 118 | ); 119 | } 120 | IndexStart += CurMaterial.Geometry.VertexBufferCount; 121 | } 122 | } 123 | } 124 | } 125 | } 126 | 127 | return EXIT_SUCCESS; 128 | } 129 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Build and Test 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - dev 8 | pull_request: 9 | release: 10 | 11 | env: 12 | BUILD_TYPE: Release 13 | 14 | jobs: 15 | macos-build: 16 | runs-on: macos-latest 17 | steps: 18 | - name: Checkout 19 | uses: actions/checkout@v3 20 | with: 21 | submodules: 'recursive' 22 | 23 | - name: Setup Vulkan SDK 24 | uses: humbletim/setup-vulkan-sdk@v1.2.0 25 | with: 26 | vulkan-query-version: latest 27 | vulkan-use-cache: true 28 | vulkan-components: Vulkan-Headers, Vulkan-Loader, Glslang 29 | 30 | - name: Configure CMake 31 | run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} 32 | 33 | - name: Build 34 | run: cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}} --target vkblam --parallel 8 35 | 36 | - name: Upload Artifact 37 | if: github.ref == 'refs/heads/main' 38 | uses: actions/upload-artifact@v1 39 | with: 40 | name: "macos" 41 | path: "build/vkblam" 42 | 43 | linux-build: 44 | runs-on: ubuntu-latest 45 | steps: 46 | - name: Checkout 47 | uses: actions/checkout@v3 48 | with: 49 | submodules: 'recursive' 50 | 51 | - name: Setup Vulkan SDK 52 | uses: humbletim/setup-vulkan-sdk@v1.2.0 53 | with: 54 | vulkan-query-version: latest 55 | vulkan-use-cache: true 56 | vulkan-components: Vulkan-Headers, Vulkan-Loader, Glslang 57 | 58 | - name: Configure CMake 59 | env: 60 | CC: "gcc-10" 61 | CXX: "g++-10" 62 | run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} 63 | 64 | - name: Build 65 | run: cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}} --target vkblam --parallel 8 66 | 67 | - name: Upload Artifact 68 | if: github.ref == 'refs/heads/main' 69 | uses: actions/upload-artifact@v1 70 | with: 71 | name: "linux" 72 | path: "build/vkblam" 73 | 74 | windows-build: 75 | runs-on: windows-latest 76 | steps: 77 | - name: Checkout 78 | uses: actions/checkout@v3 79 | with: 80 | submodules: 'recursive' 81 | 82 | - name: Setup Vulkan SDK 83 | uses: humbletim/setup-vulkan-sdk@v1.2.0 84 | with: 85 | vulkan-query-version: latest 86 | vulkan-use-cache: true 87 | vulkan-components: Vulkan-Headers, Vulkan-Loader, Glslang 88 | 89 | - name: Configure CMake 90 | run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} 91 | 92 | - name: Build 93 | run: cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}} --target vkblam --parallel 8 94 | 95 | - name: Upload Artifact 96 | if: github.ref == 'refs/heads/main' 97 | uses: actions/upload-artifact@v1 98 | with: 99 | name: "windows" 100 | path: "build/${{env.BUILD_TYPE}}/vkblam.exe" 101 | 102 | create-release: 103 | if: github.ref == 'refs/heads/main' 104 | needs: [windows-build, linux-build, macos-build] 105 | runs-on: ubuntu-latest 106 | steps: 107 | - name: Download Mac Artifacts 108 | uses: actions/download-artifact@v1 109 | with: 110 | name: "macos" 111 | 112 | - name: Download Linux Artifacts 113 | uses: actions/download-artifact@v1 114 | with: 115 | name: "linux" 116 | 117 | - name: Download Windows Artifacts 118 | uses: actions/download-artifact@v1 119 | with: 120 | name: "windows" 121 | 122 | - name: Compress Artifacts 123 | uses: vimtor/action-zip@v1 124 | with: 125 | files: windows/ linux/ macos/ 126 | recursive: false 127 | dest: vkblam.zip 128 | 129 | - name: Create release 130 | uses: "marvinpinto/action-automatic-releases@latest" 131 | with: 132 | repo_token: "${{ secrets.GITHUB_TOKEN }}" 133 | automatic_release_tag: "latest" 134 | prerelease: false 135 | title: "Latest Build" 136 | files: | 137 | ${{ github.workspace }}/vkblam.zip -------------------------------------------------------------------------------- /shaders/Default.frag: -------------------------------------------------------------------------------- 1 | #version 460 2 | #extension GL_EXT_shader_explicit_arithmetic_types : require 3 | #extension GL_GOOGLE_include_directive : require 4 | 5 | #include "vkBlam.glsl" 6 | 7 | layout( push_constant ) uniform PushConstants { 8 | CameraGlobals Camera; 9 | }; 10 | 11 | // Input vertex data: Standard vertex 12 | layout( location = 0 ) in f32vec3 InPosition; 13 | layout( location = 1 ) in f32vec3 InNormal; 14 | layout( location = 2 ) in f32vec3 InBinormal; 15 | layout( location = 3 ) in f32vec3 InTangent; 16 | layout( location = 4 ) in f32vec2 InUV; 17 | 18 | // Input vertex data: Lightmap-vertex 19 | layout( location = 5 ) in f32vec3 InLightmapNormal; 20 | layout( location = 6 ) in f32vec2 InLightmapUV; 21 | 22 | //// Descriptor sets 23 | 24 | // Set 0: Scene Globals 25 | layout( set = 0, binding = 0 ) uniform sampler Default2DSamplerFiltered; 26 | layout( set = 0, binding = 1 ) uniform sampler Default2DSamplerUnfiltered; 27 | layout( set = 0, binding = 2 ) uniform sampler DefaultCubeSampler; 28 | 29 | // Set 1: Shader 30 | layout( set = 1, binding = 0 ) uniform texture2D BaseMapImage; 31 | layout( set = 1, binding = 1 ) uniform texture2D PrimaryDetailMapImage; 32 | layout( set = 1, binding = 2 ) uniform texture2D SecondaryDetailMapImage; 33 | layout( set = 1, binding = 3 ) uniform texture2D MicroDetailMapImage; 34 | layout( set = 1, binding = 4 ) uniform texture2D BumpMapImage; 35 | layout( set = 1, binding = 5 ) uniform texture2D GlowMapImage; 36 | layout( set = 1, binding = 6 ) uniform textureCube ReflectionCubeMapImage; 37 | 38 | // Set 2: Object 39 | layout( set = 2, binding = 0 ) uniform texture2D LightmapImage; 40 | 41 | // Attachments 42 | layout( location = 0 ) out f32vec4 Attachment0; 43 | 44 | f32vec3 Glow(f32vec2 UV) 45 | { 46 | const f32vec3 GlowSample = texture(sampler2D(GlowMapImage, Default2DSamplerFiltered), InUV).rgb; 47 | 48 | f32vec3 GlowResult = f32vec3(0, 0, 0); 49 | 50 | // Primary Animation Color 51 | const f32vec3 PrimaryOnColor = f32vec3(1, 1, 1); 52 | const f32vec3 PrimaryOffColor = f32vec3(1, 1, 1); 53 | const float32_t PrimaryAnimationValue = 1.0; 54 | GlowResult += GlowSample.r * mix(PrimaryOffColor, PrimaryOnColor, PrimaryAnimationValue); 55 | 56 | // Secondary Animation Color 57 | const f32vec3 SecondaryOnColor = f32vec3(1, 1, 1); 58 | const f32vec3 SecondaryOffColor = f32vec3(1, 1, 1); 59 | const float32_t SecondaryAnimationValue = 1.0; 60 | GlowResult += GlowSample.g * mix(SecondaryOffColor, SecondaryOnColor, SecondaryAnimationValue); 61 | 62 | // Plasma Animation Color 63 | const f32vec3 PlasmaOnColor = f32vec3(1, 1, 1); 64 | const f32vec3 PlasmaOffColor = f32vec3(1, 1, 1); 65 | const float32_t PlasmaAnimationValue = 1.0; 66 | GlowResult += GlowSample.b * mix(PlasmaOffColor, PlasmaOnColor, PlasmaAnimationValue); 67 | 68 | return GlowResult; 69 | } 70 | 71 | f32vec3 Reflection(f32vec3 Normal) 72 | { 73 | const f32vec3 CameraPos = (Camera.View * vec4(0.0, 0.0, 0.0, 1.0)).xyz; 74 | const f32vec3 ReflectionDirection = reflect(normalize(CameraPos - InPosition), Normal); 75 | 76 | const f32vec3 CubeSample = texture( 77 | samplerCube(ReflectionCubeMapImage, DefaultCubeSampler), 78 | ReflectionDirection).rgb; 79 | 80 | // Todo, fresnel 81 | 82 | return CubeSample; 83 | } 84 | 85 | f32vec3 BumpedNormal(out float32_t Alpha) 86 | { 87 | const f32vec4 BumpSample = texture(sampler2D(BumpMapImage, Default2DSamplerFiltered), InUV); 88 | Alpha = BumpSample.a; 89 | 90 | const f32vec3 BumpVector = normalize(BumpSample.xyz * 2.0 - 1.0); 91 | 92 | const f32mat3 Basis = f32mat3( 93 | // These vectors are linearly interpolated, re-normalize them 94 | normalize(InTangent), 95 | normalize(InBinormal), 96 | normalize(InNormal) 97 | ); 98 | 99 | return normalize(Basis * BumpVector); 100 | } 101 | 102 | void main() 103 | { 104 | const f32vec4 DiffuseSample = texture(sampler2D(BaseMapImage, Default2DSamplerFiltered), InUV); 105 | const f32vec4 LightmapSample = texture(sampler2D(LightmapImage, Default2DSamplerFiltered), InLightmapUV); 106 | 107 | float32_t Alpha = 1.0; 108 | const f32vec3 Normal = BumpedNormal(Alpha); 109 | 110 | Alpha = step(0.5, Alpha); 111 | 112 | Attachment0 = f32vec4( 113 | (DiffuseSample.rgb + Reflection(Normal) * DiffuseSample.a) * LightmapSample.rgb 114 | + Glow(InUV), 115 | Alpha 116 | ); 117 | } -------------------------------------------------------------------------------- /source/Vulkan/DescriptorUpdateBatch.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | 6 | namespace Vulkan 7 | { 8 | 9 | void DescriptorUpdateBatch::Flush() 10 | { 11 | VulkanContext.LogicalDevice.updateDescriptorSets( 12 | {std::span(DescriptorWrites.get(), DescriptorWriteEnd)}, 13 | {std::span(DescriptorCopies.get(), DescriptorCopyEnd)} 14 | ); 15 | 16 | DescriptorWriteEnd = 0; 17 | DescriptorCopyEnd = 0; 18 | } 19 | 20 | void DescriptorUpdateBatch::AddImage( 21 | vk::DescriptorSet TargetDescriptor, std::uint8_t TargetBinding, 22 | vk::ImageView ImageView, vk::ImageLayout ImageLayout 23 | ) 24 | { 25 | if( DescriptorWriteEnd >= DescriptorWriteMax ) 26 | { 27 | Flush(); 28 | } 29 | 30 | const auto& ImageInfo 31 | = DescriptorInfos[DescriptorWriteEnd].emplace( 32 | vk::Sampler(), ImageView, ImageLayout 33 | ); 34 | 35 | DescriptorWrites[DescriptorWriteEnd] = vk::WriteDescriptorSet( 36 | TargetDescriptor, TargetBinding, 0, 1, 37 | vk::DescriptorType::eSampledImage, &ImageInfo, nullptr, nullptr 38 | ); 39 | 40 | ++DescriptorWriteEnd; 41 | } 42 | 43 | void DescriptorUpdateBatch::AddSampler( 44 | vk::DescriptorSet TargetDescriptor, std::uint8_t TargetBinding, 45 | vk::Sampler Sampler 46 | ) 47 | { 48 | if( DescriptorWriteEnd >= DescriptorWriteMax ) 49 | { 50 | Flush(); 51 | } 52 | 53 | const auto& ImageInfo 54 | = DescriptorInfos[DescriptorWriteEnd].emplace( 55 | Sampler, vk::ImageView(), vk::ImageLayout() 56 | ); 57 | 58 | DescriptorWrites[DescriptorWriteEnd] = vk::WriteDescriptorSet( 59 | TargetDescriptor, TargetBinding, 0, 1, vk::DescriptorType::eSampler, 60 | &ImageInfo, nullptr, nullptr 61 | ); 62 | 63 | ++DescriptorWriteEnd; 64 | } 65 | 66 | void DescriptorUpdateBatch::AddImageSampler( 67 | vk::DescriptorSet TargetDescriptor, std::uint8_t TargetBinding, 68 | vk::ImageView ImageView, vk::Sampler Sampler, vk::ImageLayout ImageLayout 69 | ) 70 | { 71 | if( DescriptorWriteEnd >= DescriptorWriteMax ) 72 | { 73 | Flush(); 74 | } 75 | 76 | const auto& ImageInfo 77 | = DescriptorInfos[DescriptorWriteEnd].emplace( 78 | Sampler, ImageView, ImageLayout 79 | ); 80 | 81 | DescriptorWrites[DescriptorWriteEnd] = vk::WriteDescriptorSet( 82 | TargetDescriptor, TargetBinding, 0, 1, 83 | vk::DescriptorType::eCombinedImageSampler, &ImageInfo, nullptr, nullptr 84 | ); 85 | 86 | ++DescriptorWriteEnd; 87 | } 88 | 89 | void DescriptorUpdateBatch::AddBuffer( 90 | vk::DescriptorSet TargetDescriptor, std::uint8_t TargetBinding, 91 | vk::Buffer Buffer, vk::DeviceSize Offset, vk::DeviceSize Size 92 | ) 93 | { 94 | if( DescriptorWriteEnd >= DescriptorWriteMax ) 95 | { 96 | Flush(); 97 | } 98 | 99 | const auto& BufferInfo 100 | = DescriptorInfos[DescriptorWriteEnd].emplace( 101 | Buffer, Offset, Size 102 | ); 103 | 104 | DescriptorWrites[DescriptorWriteEnd] = vk::WriteDescriptorSet( 105 | TargetDescriptor, TargetBinding, 0, 1, 106 | vk::DescriptorType::eStorageImage, nullptr, &BufferInfo, nullptr 107 | ); 108 | 109 | ++DescriptorWriteEnd; 110 | } 111 | 112 | void DescriptorUpdateBatch::CopyBinding( 113 | vk::DescriptorSet SourceDescriptor, vk::DescriptorSet TargetDescriptor, 114 | std::uint8_t SourceBinding, std::uint8_t TargetBinding, 115 | std::uint8_t SourceArrayElement, std::uint8_t TargetArrayElement, 116 | std::uint8_t DescriptorCount 117 | ) 118 | { 119 | if( DescriptorCopyEnd >= DescriptorCopyMax ) 120 | { 121 | Flush(); 122 | } 123 | 124 | DescriptorCopies[DescriptorCopyEnd] = vk::CopyDescriptorSet( 125 | SourceDescriptor, SourceBinding, SourceArrayElement, TargetDescriptor, 126 | TargetBinding, TargetArrayElement, DescriptorCount 127 | ); 128 | 129 | ++DescriptorCopyEnd; 130 | } 131 | 132 | std::optional DescriptorUpdateBatch::Create( 133 | const Vulkan::Context& VulkanContext, std::size_t DescriptorWriteMax, 134 | std::size_t DescriptorCopyMax 135 | ) 136 | 137 | { 138 | DescriptorUpdateBatch NewDescriptorUpdateBatch( 139 | VulkanContext, DescriptorWriteMax, DescriptorCopyMax 140 | ); 141 | 142 | NewDescriptorUpdateBatch.DescriptorInfos 143 | = std::make_unique(DescriptorWriteMax); 144 | NewDescriptorUpdateBatch.DescriptorWrites 145 | = std::make_unique(DescriptorWriteMax); 146 | NewDescriptorUpdateBatch.DescriptorCopies 147 | = std::make_unique(DescriptorCopyMax); 148 | 149 | return {std::move(NewDescriptorUpdateBatch)}; 150 | } 151 | 152 | } // namespace Vulkan -------------------------------------------------------------------------------- /source/Blam/Util.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | namespace Blam 6 | { 7 | std::string FormatTagClass(Blam::TagClass Class) 8 | { 9 | std::uint32_t TagStr = static_cast(Class); 10 | if( Class == Blam::TagClass::None ) 11 | { 12 | TagStr = '-' * 0x01010101; 13 | } 14 | TagStr = Common::Swap32(TagStr); 15 | return std::string(reinterpret_cast(&TagStr), 4); 16 | } 17 | 18 | static std::array VertexFormatStride{{ 19 | 56, 32, 20, 8, 68, 32, 24, 36, 24, 16, 20 | 16, 20, 32, 8, 32, 32, 36, 28, 32, 40, 21 | }}; 22 | 23 | std::size_t GetVertexStride(VertexFormat Format) 24 | { 25 | return VertexFormatStride.at(static_cast(Format)); 26 | } 27 | 28 | void GenerateVisibleSurfaceIndices( 29 | const VirtualHeap& Heap, 30 | std::span::Cluster::SubCluster> 31 | SubClusters, 32 | const Bounds3D& OverlapTest, SurfaceOcclusionBitArray SurfaceOcclusionArray 33 | ) 34 | { 35 | for( const auto& SubCluster : SubClusters ) 36 | { 37 | // Test if the SubCluster's AABB overlaps 38 | if( OverlapTest.Intersects(SubCluster.WorldBounds) ) 39 | { 40 | // If the subcluster overlaps, then enable each surface index's 41 | // visibility-bit 42 | for( const std::uint32_t& SurfaceIndex : 43 | Heap.GetBlock(SubCluster.SurfaceIndices) ) 44 | { 45 | const std::uint32_t OcclusionWordIndex = SurfaceIndex / 32; 46 | const std::uint32_t OcclusionBitIndex = SurfaceIndex % 32; 47 | 48 | SurfaceOcclusionArray[OcclusionWordIndex] 49 | |= (1 << OcclusionBitIndex); 50 | } 51 | } 52 | } 53 | } 54 | 55 | template 56 | std::string FormatString(const std::string& Format, ArgsT... Args) 57 | { 58 | const auto FormatSize 59 | = std::snprintf(nullptr, 0, Format.c_str(), Args...) + 1; 60 | if( FormatSize <= 0 ) 61 | { 62 | return "(Error formatting)"; 63 | } 64 | const std::size_t Size = static_cast(FormatSize); 65 | std::unique_ptr Buffer = std::make_unique(Size); 66 | std::snprintf(Buffer.get(), Size, Format.c_str(), Args...); 67 | return std::string(Buffer.get(), Buffer.get() + Size - 1); 68 | } 69 | 70 | const char* ToString(const CacheVersion& Value) 71 | { 72 | switch( Value ) 73 | { 74 | case CacheVersion::Xbox: 75 | return "Xbox"; 76 | case CacheVersion::Demo: 77 | return "Demo"; 78 | case CacheVersion::Retail: 79 | return "Retail"; 80 | case CacheVersion::H1A: 81 | return "H1A"; 82 | case CacheVersion::CustomEdition: 83 | return "CustomEdition"; 84 | default: 85 | return "(Unknown)"; 86 | } 87 | } 88 | 89 | const char* ToString(const ScenarioType& Value) 90 | { 91 | switch( Value ) 92 | { 93 | case ScenarioType::SinglePlayer: 94 | return "SinglePlayer"; 95 | case ScenarioType::MultiPlayer: 96 | return "MultiPlayer"; 97 | case ScenarioType::UserInterface: 98 | return "UserInterface"; 99 | default: 100 | return "(Unknown)"; 101 | } 102 | } 103 | 104 | std::string ToString(const MapHeader& Value) 105 | { 106 | return FormatString( 107 | "Version: %s\n" 108 | "FileSize: 0x%08x\n" 109 | "PaddingLength: 0x%08x\n" 110 | "TagIndexOffset: 0x%08x\n" 111 | "TagIndexSize: %u\n" 112 | "ScenarioName: \"%.32s\"\n" 113 | "BuildVersion: \"%.32s\"\n" 114 | "Type: %s\n" 115 | "Checksum: 0x%08x\n", 116 | ToString(Value.Version), Value.FileSize, Value.PaddingLength, 117 | Value.TagIndexOffset, Value.TagIndexSize, Value.ScenarioName, 118 | Value.BuildVersion, ToString(Value.Type), Value.Checksum 119 | ); 120 | } 121 | 122 | std::string ToString(const TagIndexHeader& Value) 123 | { 124 | return FormatString( 125 | "TagIndexVirtualOffset: 0x%08x\n" 126 | "BaseTag: 0x%08x\n" 127 | "ScenarioTagID: 0x%08x\n" 128 | "TagCount: %u\n" 129 | "VertexCount: %u\n" 130 | "VertexOffset: 0x%08x\n" 131 | "IndexCount: %u\n" 132 | "IndexOffset: 0x%08x\n" 133 | "ModelDataSize: %u\n", 134 | Value.TagIndexVirtualOffset, Value.BaseTag, Value.ScenarioTagID, 135 | Value.TagCount, Value.VertexCount, Value.VertexOffset, Value.IndexCount, 136 | Value.IndexOffset, Value.ModelDataSize 137 | ); 138 | } 139 | 140 | std::string ToString(const TagIndexEntry& Value) 141 | { 142 | return FormatString( 143 | "ClassPrimary: %.4s\n" 144 | "ClassSecondary: %.4s\n" 145 | "ClassTertiary: %.4s\n" 146 | "TagID: 0x%08x\n" 147 | "TagPathVirtualOffset: 0x%08x\n" 148 | "TagDataVirtualOffset: 0x%08x\n" 149 | "IsExternal: %s\n", 150 | FormatTagClass(Value.ClassPrimary).c_str(), 151 | FormatTagClass(Value.ClassSecondary).c_str(), 152 | FormatTagClass(Value.ClassTertiary).c_str(), Value.TagID, 153 | Value.TagPathVirtualOffset, Value.TagDataVirtualOffset, 154 | Value.IsExternal ? "true" : "false" 155 | ); 156 | } 157 | } // namespace Blam -------------------------------------------------------------------------------- /source/VkBlam/Shaders/ShaderEnvironment.cpp: -------------------------------------------------------------------------------- 1 | 2 | #include "Blam/Enums.hpp" 3 | #include "Vulkan/Debug.hpp" 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | static vk::DescriptorSetLayoutBinding Bindings[] = { 11 | {// BaseMap 12 | 0, vk::DescriptorType::eSampledImage, 1, 13 | vk::ShaderStageFlagBits::eFragment}, 14 | {// BumpMap 15 | 1, vk::DescriptorType::eSampledImage, 1, 16 | vk::ShaderStageFlagBits::eFragment}, 17 | {// PrimaryDetailMap 18 | 2, vk::DescriptorType::eSampledImage, 1, 19 | vk::ShaderStageFlagBits::eFragment}, 20 | {// SecondaryDetailMap 21 | 3, vk::DescriptorType::eSampledImage, 1, 22 | vk::ShaderStageFlagBits::eFragment}, 23 | {// MicroDetailMap 24 | 4, vk::DescriptorType::eSampledImage, 1, 25 | vk::ShaderStageFlagBits::eFragment}, 26 | {// GlowMap 27 | 5, vk::DescriptorType::eSampledImage, 1, 28 | vk::ShaderStageFlagBits::eFragment}, 29 | {// ReflectionCubeMap 30 | 6, vk::DescriptorType::eSampledImage, 1, 31 | vk::ShaderStageFlagBits::eFragment}, 32 | }; 33 | 34 | static vk::ImageViewType ImageTypes[] = { 35 | // BaseMap 36 | vk::ImageViewType::e2D, 37 | // BumpMap 38 | vk::ImageViewType::e2D, 39 | // PrimaryDetailMap 40 | vk::ImageViewType::e2D, 41 | // SecondaryDetailMap 42 | vk::ImageViewType::e2D, 43 | // MicroDetailMap 44 | vk::ImageViewType::e2D, 45 | // GlowMap 46 | vk::ImageViewType::e2D, 47 | // ReflectionCubeMap 48 | vk::ImageViewType::eCube, 49 | }; 50 | 51 | namespace VkBlam 52 | { 53 | 54 | ShaderEnvironment::ShaderEnvironment( 55 | const Vulkan::Context& VulkanContext, const BitmapHeapT& BitmapHeap, 56 | Vulkan::DescriptorUpdateBatch& DescriptorUpdateBatch 57 | ) 58 | : Shader(VulkanContext, BitmapHeap, DescriptorUpdateBatch) 59 | { 60 | DescriptorHeap = std::make_unique( 61 | Vulkan::DescriptorHeap::Create(VulkanContext, Bindings).value() 62 | ); 63 | 64 | Vulkan::SetObjectName( 65 | VulkanContext.LogicalDevice, DescriptorHeap->GetDescriptorPool(), 66 | "Shader Environment Descriptor Pool" 67 | ); 68 | Vulkan::SetObjectName( 69 | VulkanContext.LogicalDevice, DescriptorHeap->GetDescriptorSetLayout(), 70 | "Shader Environment Descriptor Set Layout" 71 | ); 72 | } 73 | 74 | ShaderEnvironment::~ShaderEnvironment() 75 | { 76 | } 77 | 78 | bool ShaderEnvironment::RegisterShader( 79 | const Blam::TagIndexEntry& TagEntry, 80 | const Blam::Tag& Shader 81 | ) 82 | { 83 | if( TagEntry.ClassPrimary != Blam::TagClass::ShaderEnvironment ) 84 | { 85 | return false; 86 | } 87 | const auto* ShaderEnvironment 88 | = reinterpret_cast*>( 89 | &Shader 90 | ); 91 | 92 | vk::DescriptorSet NewSet = {}; 93 | 94 | if( auto AllocResult = DescriptorHeap->AllocateDescriptorSet(); 95 | AllocResult.has_value() ) 96 | { 97 | NewSet = AllocResult.value(); 98 | } 99 | else 100 | { 101 | // Error allocating new descriptor set 102 | return false; 103 | } 104 | 105 | ShaderEnvironmentBindings[TagEntry.TagID] = NewSet; 106 | 107 | const auto WriteImageTag 108 | = [&](std::uint8_t Binding, std::uint32_t TagID, 109 | Blam::DefaultTextureIndex DefaultIndex) -> void { 110 | if( TagID == 0xFFFFFFFF ) 111 | { 112 | std::uint32_t DefaultImageTag; 113 | switch( ImageTypes[Binding] ) 114 | { 115 | default: 116 | case vk::ImageViewType::e2D: 117 | DefaultImageTag = BitmapHeap.Default2D; 118 | break; 119 | case vk::ImageViewType::e3D: 120 | DefaultImageTag = BitmapHeap.Default3D; 121 | break; 122 | case vk::ImageViewType::eCube: 123 | DefaultImageTag = BitmapHeap.DefaultCube; 124 | break; 125 | } 126 | DescriptorUpdateBatch.AddImage( 127 | NewSet, Binding, 128 | BitmapHeap.Bitmaps.at(DefaultImageTag) 129 | .at(std::uint32_t(DefaultIndex)) 130 | .View.get() 131 | ); 132 | return; 133 | } 134 | DescriptorUpdateBatch.AddImage( 135 | NewSet, Binding, BitmapHeap.Bitmaps.at(TagID).at(0).View.get() 136 | ); 137 | }; 138 | 139 | WriteImageTag( 140 | 0, ShaderEnvironment->BaseMap.TagID, Blam::DefaultTextureIndex::Additive 141 | ); 142 | WriteImageTag( 143 | 1, ShaderEnvironment->BumpMap.TagID, Blam::DefaultTextureIndex::Vector 144 | ); 145 | WriteImageTag( 146 | 2, ShaderEnvironment->PrimaryDetailMap.TagID, 147 | Blam::DefaultTextureIndex::SignedAdditive 148 | ); 149 | WriteImageTag( 150 | 3, ShaderEnvironment->SecondaryDetailMap.TagID, 151 | Blam::DefaultTextureIndex::SignedAdditive 152 | ); 153 | WriteImageTag( 154 | 4, ShaderEnvironment->MicroDetailMap.TagID, 155 | Blam::DefaultTextureIndex::SignedAdditive 156 | ); 157 | WriteImageTag( 158 | 5, ShaderEnvironment->GlowMap.TagID, Blam::DefaultTextureIndex::Additive 159 | ); 160 | WriteImageTag( 161 | 6, ShaderEnvironment->ReflectionCubeMap.TagID, 162 | Blam::DefaultTextureIndex::Additive 163 | ); 164 | 165 | return true; 166 | } 167 | 168 | } // namespace VkBlam -------------------------------------------------------------------------------- /source/VkBlam/Renderer.cpp: -------------------------------------------------------------------------------- 1 | #include "Vulkan/DescriptorUpdateBatch.hpp" 2 | #include "Vulkan/StreamBuffer.hpp" 3 | #include 4 | #include 5 | #include 6 | 7 | vk::UniqueRenderPass 8 | CreateMainRenderPass(vk::Device Device, vk::SampleCountFlagBits SampleCount) 9 | { 10 | vk::RenderPassCreateInfo RenderPassInfo = {}; 11 | 12 | const vk::AttachmentDescription Attachments[] = { 13 | // Color Attachment 14 | // We just care about it storing its color data 15 | vk::AttachmentDescription( 16 | vk::AttachmentDescriptionFlags(), vk::Format::eR8G8B8A8Srgb, 17 | vk::SampleCountFlagBits::e1, vk::AttachmentLoadOp::eClear, 18 | vk::AttachmentStoreOp::eStore, vk::AttachmentLoadOp::eDontCare, 19 | vk::AttachmentStoreOp::eDontCare, vk::ImageLayout::eUndefined, 20 | vk::ImageLayout::eTransferSrcOptimal 21 | ), 22 | // Depth Attachment 23 | // Dont care about reading or storing it 24 | vk::AttachmentDescription( 25 | vk::AttachmentDescriptionFlags(), vk::Format::eD32Sfloat, 26 | SampleCount, vk::AttachmentLoadOp::eClear, 27 | vk::AttachmentStoreOp::eDontCare, vk::AttachmentLoadOp::eClear, 28 | vk::AttachmentStoreOp::eDontCare, vk::ImageLayout::eUndefined, 29 | vk::ImageLayout::eDepthStencilAttachmentOptimal 30 | ), 31 | // Color Attachment(MSAA) 32 | // We just care about it storing its color data 33 | vk::AttachmentDescription( 34 | vk::AttachmentDescriptionFlags(), vk::Format::eR8G8B8A8Srgb, 35 | SampleCount, vk::AttachmentLoadOp::eClear, 36 | vk::AttachmentStoreOp::eDontCare, vk::AttachmentLoadOp::eDontCare, 37 | vk::AttachmentStoreOp::eDontCare, vk::ImageLayout::eUndefined, 38 | vk::ImageLayout::eColorAttachmentOptimal 39 | )}; 40 | 41 | const vk::AttachmentReference AttachmentRefs[] = { 42 | vk::AttachmentReference(0, vk::ImageLayout::eColorAttachmentOptimal), 43 | vk::AttachmentReference( 44 | 1, vk::ImageLayout::eDepthStencilAttachmentOptimal 45 | ), 46 | vk::AttachmentReference(2, vk::ImageLayout::eColorAttachmentOptimal), 47 | }; 48 | 49 | RenderPassInfo.attachmentCount = std::size(Attachments); 50 | RenderPassInfo.pAttachments = Attachments; 51 | 52 | vk::SubpassDescription Subpasses[1] = {{}}; 53 | 54 | // First subpass 55 | Subpasses[0].colorAttachmentCount = 1; 56 | Subpasses[0].pColorAttachments = &AttachmentRefs[2]; 57 | Subpasses[0].pDepthStencilAttachment = &AttachmentRefs[1]; 58 | Subpasses[0].pResolveAttachments = &AttachmentRefs[0]; 59 | 60 | RenderPassInfo.subpassCount = std::size(Subpasses); 61 | RenderPassInfo.pSubpasses = Subpasses; 62 | 63 | const vk::SubpassDependency SubpassDependencies[] = {vk::SubpassDependency( 64 | VK_SUBPASS_EXTERNAL, 0, vk::PipelineStageFlagBits::eTransfer, 65 | vk::PipelineStageFlagBits::eVertexInput, 66 | vk::AccessFlagBits::eTransferWrite, 67 | vk::AccessFlagBits::eVertexAttributeRead, 68 | vk::DependencyFlagBits::eByRegion 69 | )}; 70 | 71 | RenderPassInfo.dependencyCount = std::size(SubpassDependencies); 72 | RenderPassInfo.pDependencies = SubpassDependencies; 73 | 74 | if( auto CreateResult = Device.createRenderPassUnique(RenderPassInfo); 75 | CreateResult.result == vk::Result::eSuccess ) 76 | { 77 | return std::move(CreateResult.value); 78 | } 79 | else 80 | { 81 | std::fprintf( 82 | stderr, "Error creating render pass: %s\n", 83 | vk::to_string(CreateResult.result).c_str() 84 | ); 85 | return {}; 86 | } 87 | } 88 | 89 | namespace VkBlam 90 | { 91 | Renderer::Renderer(const Vulkan::Context& VulkanContext) 92 | : VulkanContext(VulkanContext) 93 | { 94 | } 95 | 96 | Renderer::~Renderer() 97 | { 98 | } 99 | 100 | const vk::RenderPass& 101 | Renderer::GetDefaultRenderPass(vk::SampleCountFlagBits SampleCount) 102 | { 103 | if( DefaultRenderPasses.contains(SampleCount) ) 104 | { 105 | return DefaultRenderPasses.at(SampleCount).get(); 106 | } 107 | 108 | DefaultRenderPasses[SampleCount] 109 | = CreateMainRenderPass(VulkanContext.LogicalDevice, SampleCount); 110 | 111 | return DefaultRenderPasses[SampleCount].get(); 112 | } 113 | 114 | std::optional Renderer::Create( 115 | const Vulkan::Context& VulkanContext, const RendererConfig& Config 116 | ) 117 | { 118 | Renderer NewRenderer(VulkanContext); 119 | 120 | NewRenderer.StreamBuffer = std::make_unique( 121 | VulkanContext, Config.StreamBufferSize 122 | ); 123 | 124 | NewRenderer.SamplerCache = std::make_unique( 125 | Vulkan::SamplerCache::Create(VulkanContext).value() 126 | ); 127 | 128 | NewRenderer.ShaderModuleCache = std::make_unique( 129 | Vulkan::ShaderModuleCache::Create(VulkanContext).value() 130 | ); 131 | 132 | NewRenderer.DescriptorUpdateBatch 133 | = std::make_unique( 134 | Vulkan::DescriptorUpdateBatch::Create( 135 | VulkanContext, Config.DescriptorWriteMax, 136 | Config.DescriptorCopyMax 137 | ) 138 | .value() 139 | ); 140 | 141 | return {std::move(NewRenderer)}; 142 | } 143 | 144 | } // namespace VkBlam -------------------------------------------------------------------------------- /source/Vulkan/DescriptorHeap.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | namespace Vulkan 8 | { 9 | 10 | DescriptorHeap::DescriptorHeap(const Vulkan::Context& VulkanContext) 11 | : VulkanContext(VulkanContext) 12 | { 13 | } 14 | 15 | std::optional DescriptorHeap::AllocateDescriptorSet() 16 | { 17 | // Find a free slot 18 | const auto FreeSlot 19 | = std::find(AllocationMap.begin(), AllocationMap.end(), false); 20 | 21 | // If there is no free slot, return 22 | if( FreeSlot == AllocationMap.end() ) 23 | { 24 | return std::nullopt; 25 | } 26 | 27 | // Mark the slot as allocated 28 | *FreeSlot = true; 29 | 30 | const std::uint16_t Index = static_cast( 31 | std::distance(AllocationMap.begin(), FreeSlot) 32 | ); 33 | 34 | vk::UniqueDescriptorSet& NewDescriptorSet = DescriptorSets[Index]; 35 | 36 | if( !NewDescriptorSet ) 37 | { 38 | // Descriptor set doesn't exist yet. Allocate a new one 39 | vk::DescriptorSetAllocateInfo AllocateInfo = {}; 40 | 41 | AllocateInfo.descriptorPool = DescriptorPool.get(); 42 | AllocateInfo.pSetLayouts = &DescriptorSetLayout.get(); 43 | AllocateInfo.descriptorSetCount = 1; 44 | 45 | if( auto AllocateResult 46 | = VulkanContext.LogicalDevice.allocateDescriptorSetsUnique( 47 | AllocateInfo 48 | ); 49 | AllocateResult.result == vk::Result::eSuccess ) 50 | { 51 | NewDescriptorSet = std::move(AllocateResult.value[0]); 52 | } 53 | else 54 | { 55 | // Error allocating descriptor set 56 | return std::nullopt; 57 | } 58 | } 59 | 60 | return NewDescriptorSet.get(); 61 | } 62 | 63 | bool DescriptorHeap::FreeDescriptorSet(vk::DescriptorSet Set) 64 | { 65 | // Find the descriptor set 66 | const auto Found = std::find_if( 67 | DescriptorSets.begin(), DescriptorSets.end(), 68 | [&Set](const auto& CurSet) -> bool { return CurSet.get() == Set; } 69 | ); 70 | 71 | // If the descriptor set is not found, return 72 | if( Found == DescriptorSets.end() ) 73 | { 74 | return false; 75 | } 76 | 77 | // Mark the slot as free 78 | const std::uint16_t Index = static_cast( 79 | std::distance(DescriptorSets.begin(), Found) 80 | ); 81 | 82 | AllocationMap[Index] = false; 83 | 84 | return true; 85 | } 86 | 87 | std::optional DescriptorHeap::Create( 88 | const Vulkan::Context& VulkanContext, 89 | std::span Bindings, 90 | std::uint16_t DescriptorHeapCount 91 | ) 92 | { 93 | DescriptorHeap NewDescriptorHeap(VulkanContext); 94 | 95 | // Create a histogram of each of the descriptor types and how many of each 96 | // the pool should have 97 | // Todo: maybe keep this around as a hash table to do more dynamic 98 | // allocations of descriptor sets rather than allocating them all up-front 99 | // - Sun May 15 07:52:14 AM PDT 2022 100 | std::vector PoolSizes; 101 | { 102 | std::unordered_map 103 | DescriptorTypeCounts; 104 | 105 | for( const auto& CurBinding : Bindings ) 106 | { 107 | DescriptorTypeCounts[CurBinding.descriptorType] 108 | += CurBinding.descriptorCount; 109 | } 110 | for( const auto& CurDescriptorTypeCount : DescriptorTypeCounts ) 111 | { 112 | PoolSizes.push_back(vk::DescriptorPoolSize( 113 | CurDescriptorTypeCount.first, 114 | CurDescriptorTypeCount.second * DescriptorHeapCount 115 | )); 116 | } 117 | } 118 | 119 | // Create descriptor pool 120 | { 121 | vk::DescriptorPoolCreateInfo PoolInfo; 122 | PoolInfo.flags = vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet; 123 | PoolInfo.maxSets = DescriptorHeapCount; 124 | PoolInfo.pPoolSizes = PoolSizes.data(); 125 | PoolInfo.poolSizeCount = PoolSizes.size(); 126 | if( auto CreateResult 127 | = VulkanContext.LogicalDevice.createDescriptorPoolUnique(PoolInfo); 128 | CreateResult.result == vk::Result::eSuccess ) 129 | { 130 | NewDescriptorHeap.DescriptorPool = std::move(CreateResult.value); 131 | } 132 | else 133 | { 134 | return std::nullopt; 135 | } 136 | } 137 | 138 | // Create descriptor set layout 139 | { 140 | vk::DescriptorSetLayoutCreateInfo LayoutInfo; 141 | LayoutInfo.pBindings = Bindings.data(); 142 | LayoutInfo.bindingCount = Bindings.size(); 143 | 144 | if( auto CreateResult 145 | = VulkanContext.LogicalDevice.createDescriptorSetLayoutUnique( 146 | LayoutInfo 147 | ); 148 | CreateResult.result == vk::Result::eSuccess ) 149 | { 150 | NewDescriptorHeap.DescriptorSetLayout 151 | = std::move(CreateResult.value); 152 | } 153 | else 154 | { 155 | return std::nullopt; 156 | } 157 | } 158 | 159 | NewDescriptorHeap.DescriptorSets.resize(DescriptorHeapCount); 160 | NewDescriptorHeap.AllocationMap.resize(DescriptorHeapCount); 161 | 162 | NewDescriptorHeap.Bindings.assign(Bindings.begin(), Bindings.end()); 163 | 164 | return {std::move(NewDescriptorHeap)}; 165 | } 166 | } // namespace Vulkan -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required( VERSION 3.21.0 ) 2 | project( vkblam CXX ) 3 | 4 | ### Standard 5 | set( CMAKE_CXX_STANDARD 20 ) 6 | set( CMAKE_CXX_STANDARD_REQUIRED ON ) 7 | set( CMAKE_CXX_EXTENSIONS OFF ) 8 | 9 | set( CMAKE_COLOR_MAKEFILE ON ) 10 | 11 | # Generate 'compile_commands.json' for clang_complete 12 | set( CMAKE_EXPORT_COMPILE_COMMANDS ON ) 13 | 14 | if( MSVC ) 15 | add_compile_options( 16 | /MP # Parallel builds 17 | /permissive- # Stricter C++ conformance 18 | 19 | # Warnings 20 | /W3 21 | 22 | # Consider these warnings as errors 23 | /we4018 # 'expression': signed/unsigned mismatch 24 | /we4062 # Enumerator 'identifier' in a switch of enum 'enumeration' is not handled 25 | /we4101 # 'identifier': unreferenced local variable 26 | /we4265 # 'class': class has virtual functions, but destructor is not virtual 27 | /we4305 # 'context': truncation from 'type1' to 'type2' 28 | /we4388 # 'expression': signed/unsigned mismatch 29 | /we4389 # 'operator': signed/unsigned mismatch 30 | 31 | /we4456 # Declaration of 'identifier' hides previous local declaration 32 | /we4457 # Declaration of 'identifier' hides function parameter 33 | /we4458 # Declaration of 'identifier' hides class member 34 | /we4459 # Declaration of 'identifier' hides global declaration 35 | 36 | /we4505 # 'function': unreferenced local function has been removed 37 | /we4547 # 'operator': operator before comma has no effect; expected operator with side-effect 38 | /we4549 # 'operator1': operator before comma has no effect; did you intend 'operator2'? 39 | /we4555 # Expression has no effect; expected expression with side-effect 40 | /we4715 # 'function': not all control paths return a value 41 | /we4834 # Discarding return value of function with 'nodiscard' attribute 42 | /we5038 # data member 'member1' will be initialized after data member 'member2' 43 | /we5245 # 'function': unreferenced function with internal linkage has been removed 44 | 45 | ) 46 | elseif( CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang" ) 47 | # Force colored diagnostic messages in Ninja's output 48 | if( CMAKE_GENERATOR STREQUAL "Ninja" ) 49 | add_compile_options( -fdiagnostics-color=always ) 50 | endif() 51 | 52 | add_compile_options( 53 | -Wall 54 | -Warray-bounds 55 | -Wextra 56 | -Wimplicit-fallthrough 57 | -Wmissing-declarations 58 | -Wmissing-declarations 59 | -Wmissing-field-initializers 60 | -Wno-attributes 61 | -Wno-invalid-offsetof 62 | -Wno-unused-parameter 63 | -Wreorder 64 | -Wshadow 65 | -Wsign-compare 66 | -Wswitch 67 | -Wuninitialized 68 | -Wunused-function 69 | -Wunused-result 70 | -Wunused-variable 71 | ) 72 | endif() 73 | 74 | if( APPLE ) 75 | # Create Universal Binary 76 | set( CMAKE_OSX_ARCHITECTURES "arm64;x86_64" ) 77 | endif() 78 | 79 | find_package( 80 | Vulkan 1.3.206 REQUIRED 81 | COMPONENTS 82 | glslangValidator 83 | ) 84 | 85 | find_package( Threads REQUIRED ) 86 | 87 | find_package( glm 0.9.9.9 QUIET ) 88 | 89 | if( glm_FOUND ) 90 | else() 91 | add_subdirectory( external/glm EXCLUDE_FROM_ALL ) 92 | endif() 93 | 94 | # mio 95 | add_subdirectory( external/mio EXCLUDE_FROM_ALL ) 96 | 97 | #cmrc 98 | include( external/cmrc/CMakeRC.cmake ) 99 | 100 | ### shaders build target 101 | file( 102 | GLOB_RECURSE GLSL_SOURCE_FILES 103 | "shaders/*.frag" 104 | "shaders/*.vert" 105 | "shaders/*.comp" 106 | ) 107 | 108 | # Get spirv-opt 109 | if( WIN32 ) 110 | if( ${CMAKE_HOST_SYSTEM_PROCESSOR} STREQUAL "AMD64" ) 111 | set( SPIRV_OPT "$ENV{VULKAN_SDK}/Bin/spirv-opt" ) 112 | else() 113 | set( SPIRV_OPT "$ENV{VULKAN_SDK}/Bin32/spirv-opt" ) 114 | endif() 115 | else() 116 | set( SPIRV_OPT "spirv-opt" ) 117 | endif() 118 | 119 | foreach( GLSL ${GLSL_SOURCE_FILES} ) 120 | get_filename_component( FILE_NAME ${GLSL} NAME ) 121 | set( SPIRV "${PROJECT_BINARY_DIR}/shaders/${FILE_NAME}.spv" ) 122 | add_custom_command( 123 | OUTPUT ${SPIRV} 124 | COMMAND ${CMAKE_COMMAND} -E make_directory "${PROJECT_BINARY_DIR}/shaders/" 125 | COMMAND Vulkan::glslangValidator -t --target-env vulkan1.1 -g -V ${GLSL} -o ${SPIRV} 126 | #COMMAND ${SPIRV_OPT} -O ${SPIRV} -o ${SPIRV} 127 | DEPENDS ${GLSL} 128 | ) 129 | list( APPEND SPIRV_BINARY_FILES ${SPIRV} ) 130 | endforeach() 131 | 132 | add_custom_target( 133 | shaders 134 | DEPENDS ${SPIRV_BINARY_FILES} 135 | ) 136 | 137 | cmrc_add_resource_library( 138 | vkblam-resources 139 | ALIAS Resource::vkblam 140 | NAMESPACE vkblam 141 | WHENCE ${PROJECT_BINARY_DIR} 142 | ${SPIRV_BINARY_FILES} 143 | ) 144 | add_dependencies(vkblam-resources shaders) 145 | 146 | # LTO by default 147 | set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE) 148 | 149 | ### common 150 | add_library( 151 | common 152 | source/Common/Format.cpp 153 | ) 154 | target_include_directories( 155 | common 156 | PRIVATE 157 | include 158 | ) 159 | 160 | ### blam 161 | add_library( 162 | blam 163 | source/Blam/Blam.cpp 164 | source/Blam/TagVisitor.cpp 165 | source/Blam/Util.cpp 166 | ) 167 | target_include_directories( 168 | blam 169 | PRIVATE 170 | include 171 | ) 172 | target_link_libraries( 173 | blam 174 | PRIVATE 175 | common 176 | Threads::Threads 177 | ) 178 | 179 | ### dump-bsp 180 | add_executable( 181 | dump-bsp 182 | source/dump-bsp.cpp 183 | ) 184 | target_include_directories( 185 | dump-bsp 186 | PRIVATE 187 | include 188 | ) 189 | target_link_libraries( 190 | dump-bsp 191 | PRIVATE 192 | blam 193 | mio::mio 194 | ) 195 | 196 | ### decrypt-shader 197 | add_executable( 198 | decrypt-shader 199 | source/decrypt-shader.cpp 200 | ) 201 | target_include_directories( 202 | decrypt-shader 203 | PRIVATE 204 | include 205 | ) 206 | target_link_libraries( 207 | decrypt-shader 208 | PRIVATE 209 | mio::mio 210 | ) 211 | 212 | ### vkblam 213 | 214 | add_executable( 215 | vkblam 216 | source/main.cpp 217 | source/stb_image_write.cpp 218 | source/VkBlam/Renderer.cpp 219 | source/VkBlam/Scene.cpp 220 | source/VkBlam/Format.cpp 221 | source/VkBlam/SceneView.cpp 222 | source/VkBlam/Shader.cpp 223 | source/VkBlam/Shaders/ShaderEnvironment.cpp 224 | source/VkBlam/VkBlam.cpp 225 | source/VkBlam/World.cpp 226 | source/Vulkan/Debug.cpp 227 | source/Vulkan/DescriptorHeap.cpp 228 | source/Vulkan/DescriptorUpdateBatch.cpp 229 | source/Vulkan/Memory.cpp 230 | source/Vulkan/Pipeline.cpp 231 | source/Vulkan/SamplerCache.cpp 232 | source/Vulkan/ShaderModuleCache.cpp 233 | source/Vulkan/StreamBuffer.cpp 234 | source/Vulkan/VulkanAPI.cpp 235 | ) 236 | target_include_directories( 237 | vkblam 238 | PRIVATE 239 | include 240 | ) 241 | target_link_libraries( 242 | vkblam 243 | PRIVATE 244 | vkblam-resources 245 | blam 246 | Vulkan::Vulkan 247 | mio::mio 248 | glm 249 | ${CMAKE_DL_LIBS} 250 | ) -------------------------------------------------------------------------------- /source/VkBlam/VkBlam.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | 6 | #include 7 | 8 | CMRC_DECLARE(vkblam); 9 | static cmrc::embedded_filesystem DataFS = cmrc::vkblam::get_filesystem(); 10 | 11 | namespace VkBlam 12 | { 13 | 14 | vk::SamplerCreateInfo Sampler2D(bool Filtered, bool Clamp) 15 | { 16 | vk::SamplerCreateInfo SamplerInfo = {}; 17 | SamplerInfo.magFilter = Filtered ? vk::Filter::eLinear 18 | : vk::Filter::eNearest; 19 | SamplerInfo.minFilter = Filtered ? vk::Filter::eLinear 20 | : vk::Filter::eNearest; 21 | 22 | SamplerInfo.mipmapMode = vk::SamplerMipmapMode::eLinear; 23 | 24 | SamplerInfo.addressModeU = Clamp ? vk::SamplerAddressMode::eClampToEdge 25 | : vk::SamplerAddressMode::eRepeat; 26 | SamplerInfo.addressModeV = Clamp ? vk::SamplerAddressMode::eClampToEdge 27 | : vk::SamplerAddressMode::eRepeat; 28 | SamplerInfo.addressModeW = Clamp ? vk::SamplerAddressMode::eClampToEdge 29 | : vk::SamplerAddressMode::eRepeat; 30 | 31 | SamplerInfo.mipLodBias = 0.0f; 32 | SamplerInfo.anisotropyEnable = VK_TRUE; 33 | SamplerInfo.maxAnisotropy = 16.0f; 34 | 35 | SamplerInfo.compareEnable = VK_FALSE; 36 | SamplerInfo.compareOp = vk::CompareOp::eAlways; 37 | 38 | SamplerInfo.minLod = 0.0f; 39 | SamplerInfo.maxLod = VK_LOD_CLAMP_NONE; 40 | SamplerInfo.borderColor = vk::BorderColor::eFloatTransparentBlack; 41 | SamplerInfo.unnormalizedCoordinates = VK_FALSE; 42 | return SamplerInfo; 43 | } 44 | 45 | vk::SamplerCreateInfo SamplerCube() 46 | { 47 | vk::SamplerCreateInfo SamplerInfo = {}; 48 | 49 | SamplerInfo.magFilter = vk::Filter::eLinear; 50 | SamplerInfo.minFilter = vk::Filter::eLinear; 51 | 52 | SamplerInfo.mipmapMode = vk::SamplerMipmapMode::eLinear; 53 | 54 | SamplerInfo.addressModeU = vk::SamplerAddressMode::eClampToEdge; 55 | SamplerInfo.addressModeV = vk::SamplerAddressMode::eClampToEdge; 56 | SamplerInfo.addressModeW = vk::SamplerAddressMode::eClampToEdge; 57 | 58 | SamplerInfo.mipLodBias = 0.0f; 59 | SamplerInfo.anisotropyEnable = VK_FALSE; 60 | SamplerInfo.maxAnisotropy = 1.0f; 61 | 62 | SamplerInfo.compareEnable = VK_FALSE; 63 | SamplerInfo.compareOp = vk::CompareOp::eAlways; 64 | 65 | SamplerInfo.minLod = 0.0f; 66 | SamplerInfo.maxLod = VK_LOD_CLAMP_NONE; 67 | SamplerInfo.borderColor = vk::BorderColor::eFloatTransparentBlack; 68 | SamplerInfo.unnormalizedCoordinates = VK_FALSE; 69 | return SamplerInfo; 70 | } 71 | 72 | std::optional> OpenResource(const std::string& Path) 73 | { 74 | if( !DataFS.exists(Path) ) 75 | { 76 | return {}; 77 | } 78 | const cmrc::file File = DataFS.open(Path); 79 | return std::span( 80 | reinterpret_cast(File.cbegin()), File.size() 81 | ); 82 | } 83 | 84 | std::vector 85 | GetVertexInputBindings(std::span Formats) 86 | { 87 | std::vector Result; 88 | 89 | std::size_t BindingIndex = 0; 90 | for( const Blam::VertexFormat& CurFormat : Formats ) 91 | { 92 | Result.push_back(vk::VertexInputBindingDescription( 93 | BindingIndex, Blam::GetVertexStride(CurFormat), 94 | vk::VertexInputRate::eVertex 95 | )); 96 | 97 | ++BindingIndex; 98 | } 99 | 100 | return Result; 101 | } 102 | 103 | static std::array< 104 | std::initializer_list, 20> 105 | VertexFormatAttributes = {{ 106 | // SBSPVertexUncompressed 107 | { 108 | {0, 0, vk::Format::eR32G32B32Sfloat, 0x00}, 109 | {0, 0, vk::Format::eR32G32B32Sfloat, 0x0C}, 110 | {0, 0, vk::Format::eR32G32B32Sfloat, 0x18}, 111 | {0, 0, vk::Format::eR32G32B32Sfloat, 0x24}, 112 | {0, 0, vk::Format::eR32G32Sfloat, 0x30}, 113 | }, 114 | // SBSPVertexCompressed 115 | { 116 | {}, 117 | {}, 118 | {}, 119 | }, 120 | // SBSPLightmapVertexUncompressed 121 | { 122 | {0, 0, vk::Format::eR32G32B32Sfloat, 0x00}, // D3DDECLUSAGE_NORMAL 123 | {0, 0, vk::Format::eR32G32Sfloat, 0x0C}, // D3DDECLUSAGE_TEXCOORD 124 | }, 125 | // SBSPLightmapVertexCompressed 126 | { 127 | {}, 128 | {}, 129 | {}, 130 | }, 131 | // ModelUncompressed 132 | { 133 | {0, 0, vk::Format::eR32G32B32Sfloat, 0x00}, // D3DDECLUSAGE_POSITION 134 | {0, 0, vk::Format::eR32G32B32Sfloat, 0x0C}, // D3DDECLUSAGE_NORMAL 135 | {0, 0, vk::Format::eR32G32B32Sfloat, 0x18}, // D3DDECLUSAGE_BINORMAL 136 | {0, 0, vk::Format::eR32G32B32Sfloat, 0x24}, // D3DDECLUSAGE_TANGENT 137 | {0, 0, vk::Format::eR32G32Sfloat, 0x30}, // D3DDECLUSAGE_TEXCOORD 138 | 139 | }, 140 | // ModelCompressed 141 | { 142 | {}, 143 | {}, 144 | {}, 145 | }, 146 | // 6 147 | { 148 | {}, 149 | {}, 150 | {}, 151 | }, 152 | // 7 153 | { 154 | {}, 155 | {}, 156 | {}, 157 | }, 158 | // 8 159 | { 160 | {}, 161 | {}, 162 | {}, 163 | }, 164 | // 9 165 | { 166 | {}, 167 | {}, 168 | {}, 169 | }, 170 | // 10 171 | { 172 | {}, 173 | {}, 174 | {}, 175 | }, 176 | // 11 177 | { 178 | {}, 179 | {}, 180 | {}, 181 | }, 182 | // 12 183 | { 184 | {}, 185 | {}, 186 | {}, 187 | }, 188 | // 13 189 | { 190 | {}, 191 | {}, 192 | {}, 193 | }, 194 | // 14 195 | { 196 | {}, 197 | {}, 198 | {}, 199 | }, 200 | // 15 201 | { 202 | {}, 203 | {}, 204 | {}, 205 | }, 206 | // 16 207 | { 208 | {}, 209 | {}, 210 | {}, 211 | }, 212 | // 17 213 | { 214 | {}, 215 | {}, 216 | {}, 217 | }, 218 | // 18 219 | { 220 | {}, 221 | {}, 222 | {}, 223 | }, 224 | // 19 225 | { 226 | {}, 227 | {}, 228 | {}, 229 | }, 230 | }}; 231 | 232 | std::vector 233 | GetVertexInputAttributes(std::span Formats) 234 | { 235 | std::vector Result; 236 | 237 | std::size_t BindingIndex = 0; 238 | std::size_t LocationIndex = 0; 239 | for( const Blam::VertexFormat& CurFormat : Formats ) 240 | { 241 | std::vector CurVertexAttributes 242 | = VertexFormatAttributes.at(static_cast(CurFormat)); 243 | 244 | for( vk::VertexInputAttributeDescription& CurAttribute : 245 | CurVertexAttributes ) 246 | { 247 | CurAttribute.binding = BindingIndex; 248 | CurAttribute.location = LocationIndex; 249 | ++LocationIndex; 250 | } 251 | 252 | Result.insert( 253 | Result.end(), CurVertexAttributes.begin(), CurVertexAttributes.end() 254 | ); 255 | 256 | ++BindingIndex; 257 | } 258 | 259 | return Result; 260 | } 261 | 262 | } // namespace VkBlam -------------------------------------------------------------------------------- /source/decrypt-shader.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | 10 | #include 11 | 12 | const char Help[] 13 | = "Decrypts the `Shaders/*.{bin,enc}` files found within Halo CE and PC \n" 14 | "Usage: ./decrypt-shader {list of files}\n" 15 | "\tfor each file, a {file}.decrypted.bin will be created along side it\n" 16 | "\twith the decrypted contents\n"; 17 | 18 | // Found within the Halo Custom Edition executable 19 | const static std::uint32_t HaloCEKey[4] = { 20 | 0x3FFFFFDD, 21 | 0x00007FC3, 22 | 0x000000E5, 23 | 0x003FFFEF, 24 | }; 25 | 26 | void TEADecryptBlock( 27 | const std::span Data, 28 | std::span Key 29 | ) 30 | { 31 | const std::uint32_t Delta = 0x61C88647; 32 | std::uint32_t Sum = 0xC6EF3720; 33 | 34 | std::uint32_t V0 = Data[0]; 35 | std::uint32_t V1 = Data[1]; 36 | 37 | for( std::uint8_t i = 0; i < 32; i++ ) 38 | { 39 | V1 -= (Sum + V0) ^ ((Key[2] + (V0 << 4) ^ Key[3] + (V0 >> 5))); 40 | V0 -= (Sum + V1) ^ ((Key[0] + (V1 << 4) ^ Key[1] + (V1 >> 5))); 41 | Sum += Delta; 42 | } 43 | Data[0] = V0; 44 | Data[1] = V1; 45 | } 46 | 47 | // - The last 32 bytes of the decrypted file are an ascii MD5 hash of all of the 48 | // data before it 49 | bool DecryptShader( 50 | const std::filesystem::path& InFile, const std::filesystem::path& OutFile 51 | ) 52 | { 53 | std::error_code ErrorCode; 54 | if( !std::filesystem::exists(InFile) ) 55 | { 56 | // File does not exists 57 | return false; 58 | } 59 | if( std::filesystem::file_size(InFile) < 8 ) 60 | { 61 | // File is not valid 62 | return false; 63 | } 64 | 65 | std::filesystem::create_directories(OutFile.parent_path(), ErrorCode); 66 | if( ErrorCode ) 67 | { 68 | std::fprintf( 69 | stderr, "create_directories(%s): %s", 70 | OutFile.parent_path().string().c_str(), ErrorCode.message().c_str() 71 | ); 72 | return false; 73 | } 74 | std::filesystem::copy_file( 75 | InFile, OutFile, std::filesystem::copy_options::overwrite_existing, 76 | ErrorCode 77 | ); 78 | if( ErrorCode ) 79 | { 80 | std::fprintf( 81 | stderr, "copy_file(%s -> %s): %s", InFile.string().c_str(), 82 | OutFile.string().c_str(), ErrorCode.message().c_str() 83 | ); 84 | return false; 85 | } 86 | 87 | mio::mmap_sink DecryptedFile = mio::mmap_sink(OutFile.c_str()); 88 | std::span DecryptedData( 89 | reinterpret_cast(DecryptedFile.data()), 90 | DecryptedFile.size() / sizeof(std::uint32_t) 91 | ); 92 | 93 | if( DecryptedFile.size() & 0x80000007 ) 94 | { 95 | TEADecryptBlock( 96 | std::span( 97 | reinterpret_cast( 98 | DecryptedFile.data() + DecryptedFile.size() - 8 99 | ), 100 | 2 101 | ) 102 | .first<2>(), 103 | HaloCEKey 104 | ); 105 | } 106 | 107 | while( DecryptedData.size() >= 2 ) 108 | { 109 | const std::span CurSpan = DecryptedData.first<2>(); 110 | TEADecryptBlock(CurSpan, HaloCEKey); 111 | DecryptedData = DecryptedData.subspan(2); 112 | } 113 | 114 | return true; 115 | } 116 | 117 | /* For ImHex 118 | struct VertexShaderBlob 119 | { 120 | u32 Size; 121 | u8 Data[Size]; 122 | }; 123 | 124 | VertesShaderBlob shaders[64] @0x00; 125 | */ 126 | bool DumpVertexShaderFile(std::span ShaderFile) 127 | { 128 | for( std::uint8_t i = 0; i < 64; ++i ) 129 | { 130 | const std::uint32_t& ShaderDataSize 131 | = *reinterpret_cast(ShaderFile.data()); 132 | ShaderFile = ShaderFile.subspan(sizeof(uint32_t)); 133 | 134 | const std::span VertexShaderByteCode 135 | = ShaderFile.subspan(0, ShaderDataSize); 136 | ShaderFile = ShaderFile.subspan(ShaderDataSize); 137 | 138 | std::ofstream OutFile(Common::Format("vsh.%02zu.dxso", i)); 139 | 140 | if( OutFile ) 141 | { 142 | OutFile.write( 143 | reinterpret_cast(VertexShaderByteCode.data()), 144 | VertexShaderByteCode.size() 145 | ); 146 | } 147 | else 148 | { 149 | std::fprintf(stderr, "Error dumping shader"); 150 | } 151 | } 152 | 153 | return true; 154 | } 155 | 156 | struct FragmentShaderEntry 157 | { 158 | std::uint32_t PermutationCount; 159 | char Name[0x80]; 160 | }; 161 | static_assert(sizeof(FragmentShaderEntry) == 0x84); 162 | 163 | // THis is particularly for Halo CE. Will not work with Halo PC 164 | bool DumpFragmentShaderFileCE(const std::span ShaderFile) 165 | { 166 | auto StreamSpan = ShaderFile; 167 | 168 | const std::uint32_t& ShaderCount 169 | = *reinterpret_cast(ShaderFile.data()); 170 | StreamSpan = StreamSpan.subspan(sizeof(uint32_t)); 171 | 172 | for( std::uint8_t ShaderIdx = 0; ShaderIdx < ShaderCount; ++ShaderIdx ) 173 | { 174 | const std::uint32_t& ShaderNameLength 175 | = *reinterpret_cast(StreamSpan.data()); 176 | StreamSpan = StreamSpan.subspan(sizeof(uint32_t)); 177 | 178 | const auto ShaderNameData = StreamSpan.subspan(0, ShaderNameLength); 179 | StreamSpan = StreamSpan.subspan(ShaderNameLength); 180 | 181 | const char* ShaderName = (const char*)ShaderNameData.data(); 182 | 183 | const std::uint32_t& ShaderPermutationCount 184 | = *reinterpret_cast(StreamSpan.data()); 185 | StreamSpan = StreamSpan.subspan(sizeof(uint32_t)); 186 | 187 | std::printf( 188 | "%.*s | Permutations: %u\n", ShaderNameLength, ShaderName, 189 | ShaderPermutationCount 190 | ); 191 | 192 | for( std::uint8_t PermutationIdx = 0; 193 | PermutationIdx < ShaderPermutationCount; ++PermutationIdx ) 194 | { 195 | const std::uint32_t& PermutationNameLength 196 | = *reinterpret_cast(StreamSpan.data()); 197 | StreamSpan = StreamSpan.subspan(sizeof(uint32_t)); 198 | 199 | const auto PermutationNameData 200 | = StreamSpan.subspan(0, PermutationNameLength); 201 | StreamSpan = StreamSpan.subspan(PermutationNameLength); 202 | 203 | const char* PermutationName 204 | = (const char*)PermutationNameData.data(); 205 | std::printf("\t - %.*s\n", PermutationNameLength, PermutationName); 206 | 207 | const std::uint32_t& PermutationShaderDataLength 208 | = *reinterpret_cast(StreamSpan.data()); 209 | StreamSpan = StreamSpan.subspan(sizeof(uint32_t)); 210 | 211 | const auto PermutationShaderData 212 | = StreamSpan.subspan(0, PermutationShaderDataLength * 4); 213 | StreamSpan = StreamSpan.subspan(PermutationShaderDataLength * 4); 214 | } 215 | } 216 | 217 | return true; 218 | } 219 | 220 | int main(int argc, char* argv[]) 221 | { 222 | if( argc <= 1 ) 223 | { 224 | // Not enough arguments 225 | return EXIT_FAILURE; 226 | } 227 | 228 | for( int i = 1; i < argc; ++i ) 229 | { 230 | const std::filesystem::path InPath = argv[i]; 231 | const std::filesystem::path OutPath 232 | = std::filesystem::path(InPath).replace_extension(".decrypted.bin"); 233 | std::fprintf( 234 | stdout, "%s -> %s: ", InPath.string().c_str(), 235 | OutPath.string().c_str() 236 | ); 237 | if( DecryptShader(InPath, OutPath) ) 238 | { 239 | std::fprintf(stdout, "Done\n"); 240 | 241 | mio::mmap_source DecryptedFile = mio::mmap_source(OutPath.c_str()); 242 | std::span DecryptedData( 243 | reinterpret_cast(DecryptedFile.data()), 244 | DecryptedFile.size() 245 | ); 246 | 247 | // DumpVertexShaderFile(DecryptedData); 248 | DumpFragmentShaderFileCE(DecryptedData); 249 | } 250 | else 251 | { 252 | std::fprintf(stdout, "Failed to decrypt file\n"); 253 | } 254 | } 255 | 256 | return EXIT_SUCCESS; 257 | } -------------------------------------------------------------------------------- /source/Vulkan/Memory.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | namespace Vulkan 5 | { 6 | 7 | // Given a speculative heap-allocation, defined by its current size and 8 | // memory-type bits, appends a memory requirements structure to it, updating 9 | // both the size and the required memory-type-bits. Returns the offset within 10 | // the heap for the current MemoryRequirements Todo: Sun Apr 23 13:28:25 PDT 11 | // 2023 Rather than using a running-size of the heap, look at all of the memory 12 | // requests and optimally create a packing for all of the offset and alignment 13 | // requirements. Such as by satisfying all of the largest alignments first, and 14 | // then the smallest, to reduce padding 15 | static vk::DeviceSize CommitMemoryRequestToHeap( 16 | const vk::MemoryRequirements& CurMemoryRequirements, 17 | vk::DeviceSize& CurHeapEnd, std::uint32_t& CurMemoryTypeBits, 18 | vk::DeviceSize SizeAlignment 19 | ) 20 | { 21 | // Accumulate a mask of all the memory types that satisfies each of the 22 | // handles 23 | CurMemoryTypeBits &= CurMemoryRequirements.memoryTypeBits; 24 | 25 | // Pad up the memory sizes so they are not considered aliasing 26 | const vk::DeviceSize CurMemoryOffset 27 | = Common::AlignUp(CurHeapEnd, CurMemoryRequirements.alignment); 28 | // Pad the size by the required size-alignment. 29 | // Intended for BufferImageGranularity 30 | const vk::DeviceSize CurMemorySize 31 | = Common::AlignUp(CurMemoryRequirements.size, SizeAlignment); 32 | 33 | CurHeapEnd = (CurMemoryOffset + CurMemorySize); 34 | return CurMemoryOffset; 35 | } 36 | 37 | std::int32_t FindMemoryTypeIndex( 38 | vk::PhysicalDevice PhysicalDevice, std::uint32_t MemoryTypeMask, 39 | vk::MemoryPropertyFlags MemoryProperties, 40 | vk::MemoryPropertyFlags MemoryExcludeProperties 41 | ) 42 | { 43 | const vk::PhysicalDeviceMemoryProperties DeviceMemoryProperties 44 | = PhysicalDevice.getMemoryProperties(); 45 | // Iterate the physical device's memory types until we find a match 46 | for( std::size_t i = 0; i < DeviceMemoryProperties.memoryTypeCount; i++ ) 47 | { 48 | if( 49 | // Is within memory type mask 50 | (((MemoryTypeMask >> i) & 0b1) == 0b1) && 51 | // Has property flags 52 | (DeviceMemoryProperties.memoryTypes[i].propertyFlags 53 | & MemoryProperties) 54 | == MemoryProperties 55 | && 56 | // None of the excluded properties are enabled 57 | !(DeviceMemoryProperties.memoryTypes[i].propertyFlags 58 | & MemoryExcludeProperties) ) 59 | { 60 | return static_cast(i); 61 | } 62 | } 63 | 64 | return -1; 65 | } 66 | 67 | std::tuple CommitImageHeap( 68 | vk::Device Device, vk::PhysicalDevice PhysicalDevice, 69 | const std::span Images, 70 | vk::MemoryPropertyFlags MemoryProperties, 71 | vk::MemoryPropertyFlags MemoryExcludeProperties 72 | ) 73 | { 74 | vk::MemoryAllocateInfo ImageHeapAllocInfo = {}; 75 | std::uint32_t ImageHeapMemoryTypeBits = 0xFFFFFFFF; 76 | std::vector ImageHeapBinds; 77 | 78 | const vk::DeviceSize BufferImageGranularity 79 | = PhysicalDevice.getProperties().limits.bufferImageGranularity; 80 | 81 | for( const vk::Image& CurImage : Images ) 82 | { 83 | const vk::DeviceSize CurBindOffset = CommitMemoryRequestToHeap( 84 | Device.getImageMemoryRequirements(CurImage), 85 | ImageHeapAllocInfo.allocationSize, ImageHeapMemoryTypeBits, 86 | BufferImageGranularity 87 | ); 88 | 89 | if( ImageHeapMemoryTypeBits == 0 ) 90 | { 91 | // No possible memory heap for all of the images to share 92 | return std::make_tuple( 93 | vk::Result::eErrorOutOfDeviceMemory, vk::UniqueDeviceMemory() 94 | ); 95 | } 96 | 97 | // Put nullptr for the device memory for now 98 | ImageHeapBinds.emplace_back(vk::BindImageMemoryInfo{ 99 | CurImage, nullptr, CurBindOffset}); 100 | } 101 | 102 | const std::int32_t MemoryTypeIndex = FindMemoryTypeIndex( 103 | PhysicalDevice, ImageHeapMemoryTypeBits, MemoryProperties, 104 | MemoryExcludeProperties 105 | ); 106 | 107 | if( MemoryTypeIndex < 0 ) 108 | { 109 | // Unable to find a memory heap that satisfies all the images 110 | return std::make_tuple( 111 | vk::Result::eErrorOutOfDeviceMemory, vk::UniqueDeviceMemory() 112 | ); 113 | } 114 | 115 | ImageHeapAllocInfo.memoryTypeIndex = MemoryTypeIndex; 116 | 117 | vk::UniqueDeviceMemory ImageHeapMemory = {}; 118 | 119 | if( auto AllocResult = Device.allocateMemoryUnique(ImageHeapAllocInfo); 120 | AllocResult.result == vk::Result::eSuccess ) 121 | { 122 | ImageHeapMemory = std::move(AllocResult.value); 123 | } 124 | else 125 | { 126 | return std::make_tuple(AllocResult.result, vk::UniqueDeviceMemory()); 127 | } 128 | 129 | // Assign the device memory to the bindings 130 | for( vk::BindImageMemoryInfo& CurBind : ImageHeapBinds ) 131 | { 132 | CurBind.memory = ImageHeapMemory.get(); 133 | } 134 | 135 | // Now bind them all in one call 136 | if( const vk::Result BindResult = Device.bindImageMemory2(ImageHeapBinds); 137 | BindResult == vk::Result::eSuccess ) 138 | { 139 | // Binding memory succeeded 140 | } 141 | else 142 | { 143 | return std::make_tuple(BindResult, vk::UniqueDeviceMemory()); 144 | } 145 | 146 | return std::make_tuple(vk::Result::eSuccess, std::move(ImageHeapMemory)); 147 | } 148 | 149 | std::tuple CommitBufferHeap( 150 | vk::Device Device, vk::PhysicalDevice PhysicalDevice, 151 | const std::span Buffers, 152 | vk::MemoryPropertyFlags MemoryProperties, 153 | vk::MemoryPropertyFlags MemoryExcludeProperties 154 | ) 155 | { 156 | vk::MemoryAllocateInfo BufferHeapAllocInfo = {}; 157 | std::uint32_t BufferHeapMemoryTypeBits = 0xFFFFFFFF; 158 | std::vector BufferHeapBinds; 159 | 160 | const vk::DeviceSize BufferImageGranularity 161 | = PhysicalDevice.getProperties().limits.bufferImageGranularity; 162 | 163 | for( const vk::Buffer& CurBuffer : Buffers ) 164 | { 165 | const vk::DeviceSize CurBindOffset = CommitMemoryRequestToHeap( 166 | Device.getBufferMemoryRequirements(CurBuffer), 167 | BufferHeapAllocInfo.allocationSize, BufferHeapMemoryTypeBits, 168 | BufferImageGranularity 169 | ); 170 | 171 | if( BufferHeapMemoryTypeBits == 0 ) 172 | { 173 | // No possible memory heap for all of the buffers to share 174 | return std::make_tuple( 175 | vk::Result::eErrorOutOfDeviceMemory, vk::UniqueDeviceMemory() 176 | ); 177 | } 178 | 179 | // Put nullptr for the device memory for now 180 | BufferHeapBinds.emplace_back(vk::BindBufferMemoryInfo{ 181 | CurBuffer, nullptr, CurBindOffset}); 182 | } 183 | 184 | const std::int32_t MemoryTypeIndex = FindMemoryTypeIndex( 185 | PhysicalDevice, BufferHeapMemoryTypeBits, MemoryProperties, 186 | MemoryExcludeProperties 187 | ); 188 | 189 | if( MemoryTypeIndex < 0 ) 190 | { 191 | // Unable to find a memory heap that satisfies all the buffers 192 | return std::make_tuple( 193 | vk::Result::eErrorOutOfDeviceMemory, vk::UniqueDeviceMemory() 194 | ); 195 | } 196 | 197 | BufferHeapAllocInfo.memoryTypeIndex = MemoryTypeIndex; 198 | 199 | vk::UniqueDeviceMemory BufferHeapMemory = {}; 200 | 201 | if( auto AllocResult = Device.allocateMemoryUnique(BufferHeapAllocInfo); 202 | AllocResult.result == vk::Result::eSuccess ) 203 | { 204 | BufferHeapMemory = std::move(AllocResult.value); 205 | } 206 | else 207 | { 208 | return std::make_tuple(AllocResult.result, vk::UniqueDeviceMemory()); 209 | } 210 | 211 | // Assign the device memory to the bindings 212 | for( vk::BindBufferMemoryInfo& CurBind : BufferHeapBinds ) 213 | { 214 | CurBind.memory = BufferHeapMemory.get(); 215 | } 216 | 217 | // Now bind them all in one call 218 | if( const vk::Result BindResult = Device.bindBufferMemory2(BufferHeapBinds); 219 | BindResult == vk::Result::eSuccess ) 220 | { 221 | // Binding memory succeeded 222 | } 223 | else 224 | { 225 | return std::make_tuple(BindResult, vk::UniqueDeviceMemory()); 226 | } 227 | 228 | return std::make_tuple(vk::Result::eSuccess, std::move(BufferHeapMemory)); 229 | } 230 | 231 | } // namespace Vulkan -------------------------------------------------------------------------------- /include/Blam/Enums.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | namespace Blam 7 | { 8 | 9 | enum class CacheVersion : std::uint32_t 10 | { 11 | Xbox = 0x5, 12 | Demo = 0x6, 13 | Retail = 0x7, 14 | H1A = 0xD, 15 | CustomEdition = 0x261 16 | }; 17 | 18 | enum class ScenarioType : std::uint16_t 19 | { 20 | SinglePlayer = 0x0, 21 | MultiPlayer = 0x1, 22 | UserInterface = 0x2, 23 | }; 24 | 25 | enum class ResourceMapType : std::uint32_t 26 | { 27 | Bitmaps = 0x0, 28 | Sounds = 0x1, 29 | Loc = 0x2, 30 | }; 31 | 32 | enum class BitmapType : std::uint16_t 33 | { 34 | Texture2D = 0x0, 35 | Texture3D = 0x1, 36 | CubeMap = 0x2, 37 | Sprite = 0x3, 38 | Interface = 0x4, 39 | }; 40 | 41 | enum class BitmapFormat : std::uint16_t 42 | { 43 | CompressedColorKeyTransparency = 0x00, 44 | CompressedExplicitAlpha = 0x01, 45 | CompressedInterpolatedAlpha = 0x02, 46 | Color16Bit = 0x03, 47 | Color32Bit = 0x04, 48 | Monochrome = 0x05, 49 | }; 50 | 51 | enum class BitmapUsage : std::uint16_t 52 | { 53 | AlphaBlend = 0x00, 54 | Default = 0x01, 55 | HeightMap = 0x02, 56 | DetailMap = 0x03, 57 | LightMap = 0x04, 58 | VectorMap = 0x05, 59 | }; 60 | 61 | enum class BitmapSpriteBudgetSize : std::uint16_t 62 | { 63 | _32 = 0x00, 64 | _64 = 0x01, 65 | _128 = 0x02, 66 | _256 = 0x03, 67 | _512 = 0x04, 68 | }; 69 | 70 | enum class BitmapSpriteUsage : std::uint16_t 71 | { 72 | Blend_Add_Subtract_Max = 0x00, 73 | Multiply_Min = 0x01, 74 | Double_Multiply = 0x02, 75 | }; 76 | 77 | enum class BitmapFlags : std::uint16_t 78 | { 79 | EnableDiffusionDithering = 0, 80 | DisableHeightMapCompression = 1, 81 | UniformSpriteSequences = 2, 82 | FilthySpriteBugFix = 3, 83 | }; 84 | 85 | enum class DefaultTextureIndex : std::uint16_t 86 | { 87 | Additive = 0, 88 | Multiplicative = 1, 89 | SignedAdditive = 2, 90 | Vector = 3, 91 | }; 92 | 93 | enum class BitmapEntryType : std::uint16_t 94 | { 95 | Texture2D = 0x00, 96 | Texture3D = 0x01, 97 | CubeMap = 0x02, 98 | White = 0x03, 99 | }; 100 | 101 | enum class BitmapEntryBitFlags : std::uint16_t 102 | { 103 | PowerOfTwoDimensions = 1u << 0, 104 | Compressed = 1u << 1, 105 | Palettized = 1u << 2, 106 | Swizzled = 1u << 3, 107 | Linear = 1u << 4, 108 | V16U16 = 1u << 5, 109 | }; 110 | 111 | enum class BitmapEntryFormat : std::uint16_t 112 | { 113 | A8 = 0x00, 114 | Y8 = 0x01, 115 | AY8 = 0x02, 116 | A8Y8 = 0x03, 117 | R5G6B5 = 0x06, 118 | A1R5G5B5 = 0x08, 119 | A4R4G4B4 = 0x09, 120 | X8R8G8B8 = 0x0A, 121 | A8R8G8B8 = 0x0B, 122 | DXT1 = 0x0E, 123 | DXT2AND3 = 0x0F, 124 | DXT4AND5 = 0x10, 125 | P8 = 0x11, 126 | }; 127 | 128 | enum class PhysicsMaterialType : std::uint16_t 129 | { 130 | Dirt, 131 | Sand, 132 | Stone, 133 | Snow, 134 | Wood, 135 | MetalHollow, 136 | MetalThin, 137 | MetalThick, 138 | Rubber, 139 | Glass, 140 | ForceField, 141 | Grunt, 142 | HunterArmor, 143 | HunterSkin, 144 | Elite, 145 | Jackal, 146 | JackalEnergyShield, 147 | EngineerSkin, 148 | EngineerForceField, 149 | FloodCombatForm, 150 | FloodCarrierForm, 151 | CyborgArmor, 152 | CyborgEnergyShield, 153 | HumanArmor, 154 | HumanSkin, 155 | Sentinel, 156 | Monitor, 157 | Plastic, 158 | Water, 159 | Leaves, 160 | EliteEnergyShield, 161 | Ice, 162 | HunterShield, 163 | }; 164 | 165 | enum class DetailMapFunction : std::uint16_t 166 | { 167 | DoubleBiasedMultiply, 168 | Multiply, 169 | DoubleBiasedAdd, 170 | }; 171 | 172 | enum class AnimationFunction : std::uint16_t 173 | { 174 | // t = time_in_seconds / 36.0 175 | // v = random * 28.0 176 | One = 0, // 1.0 177 | Zero = 1, // 0.0 178 | Cosine = 2, // cos(t * 2 * pi) 179 | CosineVariablePeriod = 3, // cos(v * 2 * pi) 180 | DiagonalWave = 4, // (t < 0.5)?(1-((fract(t)-0.5):fract(t))*2 181 | DiagonalWaveVariablePeriod = 5, // (v < 0.5)?(1-((fract(v)-0.5):fract(v))*2 182 | Slide = 6, // fract(t) 183 | SlideVariablePeriod = 7, // fract(v) 184 | 185 | // uint32_t NextRand() 186 | // { 187 | // static uint32_t RandState = 0x20F3F660; 188 | // 189 | // const uint32_t CurRand = (0x19660D * RandState) + 0x3C6EF35F; 190 | // RandState = CurRand; 191 | // 192 | // return CurRand; 193 | // } 194 | 195 | // float(NextRand() >> 16) / 65535.0 196 | Noise = 8, 197 | 198 | Jitter = 9, 199 | Wander = 10, 200 | 201 | Spark = 11, // fract(t) * t 202 | }; 203 | 204 | enum class AnimationSource : std::uint16_t 205 | { 206 | None, // x 207 | AOut, // pow(x, 0.50) 208 | BOut, // pow(x, 0.25) 209 | COut, // pow(x, 2.00) 210 | DOut, // pow(x, 4.00) 211 | EOut, // (sin(x * pi - (pi/2)) + 1.0) * 0.5; 212 | }; 213 | 214 | enum class VertexFormat : std::uint32_t 215 | { 216 | // D3DVERTEXELEMENT9 <0, 00h, D3DDECLTYPE_FLOAT3, D3DDECLUSAGE_POSITION, 0> 217 | // D3DVERTEXELEMENT9 <0, 0Ch, D3DDECLTYPE_FLOAT3, D3DDECLUSAGE_NORMAL, 0> 218 | // D3DVERTEXELEMENT9 <0, 18h, D3DDECLTYPE_FLOAT3, D3DDECLUSAGE_BINORMAL, 0> 219 | // D3DVERTEXELEMENT9 <0, 24h, D3DDECLTYPE_FLOAT3, D3DDECLUSAGE_TANGENT, 0> 220 | // D3DVERTEXELEMENT9 <0, 30h, D3DDECLTYPE_FLOAT2, D3DDECLUSAGE_TEXCOORD, 0> 221 | // SBSP uncompressed rendered geometry 222 | // Size: 56 223 | SBSPVertexUncompressed = 0, 224 | // SBSP compressed rendered geometry 225 | // Size: 32 226 | SBSPVertexCompressed = 1, 227 | // D3DVERTEXELEMENT9 <1, 00h, D3DDECLTYPE_FLOAT3, D3DDECLUSAGE_NORMAL, 1> 228 | // D3DVERTEXELEMENT9 <1, 0Ch, D3DDECLTYPE_FLOAT2, D3DDECLUSAGE_TEXCOORD, 1> 229 | // SBSP uncompressed lightmap geometry 230 | // Size: 20 231 | SBSPLightmapVertexUncompressed = 2, 232 | // SBSL compressed lightmap geometry 233 | // Size: 8 234 | SBSPLightmapVertexCompressed = 3, 235 | // GBX uncompressed model geometry 236 | // Size: 68 237 | ModelUncompressed = 4, 238 | // GBX compressed model geometry 239 | // Size: 32 240 | ModelCompressed = 5, 241 | // Size: 24 242 | Format6 = 6, 243 | // Size: 36 244 | Format7 = 7, 245 | // Size: 24 246 | Format8 = 8, 247 | // Size: 16 248 | Format9 = 9, 249 | // Size: 16 250 | Format10 = 10, 251 | // Size: 20 252 | Format11 = 11, 253 | // Size: 32 254 | Format12 = 12, 255 | // Size: 8 256 | Format13 = 13, 257 | // Size: 32 258 | Format14 = 14, 259 | // Size: 32 260 | Format15 = 15, 261 | // Size: 36 262 | Format16 = 16, 263 | // Size: 28 264 | Format17 = 17, 265 | // Size: 32 266 | Format18 = 18, 267 | // Size: 40 268 | Format19 = 19, 269 | }; 270 | 271 | template 272 | struct FourCC 273 | { 274 | std::uint32_t Value; 275 | 276 | constexpr FourCC(const char (&Identifier)[N]) : Value(0) 277 | { 278 | static_assert(N == 5, "Tag must be 4 characters"); 279 | Value 280 | = ((Identifier[3] << 0) | (Identifier[2] << 8) 281 | | (Identifier[1] << 16) | (Identifier[0] << 24)); 282 | } 283 | }; 284 | 285 | template 286 | constexpr std::uint32_t operator""_u32() 287 | { 288 | return Code.Value; 289 | } 290 | 291 | enum class TagClass : std::uint32_t 292 | { 293 | None = 0xFFFFFFFF, // 4294967295 294 | Null = 0x00000000, // 0000000000 295 | Actor = "actr"_u32, // 1633907826 296 | ActorVariant = "actv"_u32, // 1633907830 297 | Antenna = "ant!"_u32, // 1634628641 298 | Biped = "bipd"_u32, // 1651077220 299 | Bitmap = "bitm"_u32, // 1651078253 300 | ContinuousDamageEffect = "cdmg"_u32, // 1667525991 301 | ModelCollisionGeometry = "coll"_u32, // 1668246636 302 | ColorTable = "colo"_u32, // 1668246639 303 | Contrail = "cont"_u32, // 1668247156 304 | DeviceControl = "ctrl"_u32, // 1668575852 305 | Decal = "deca"_u32, // 1684366177 306 | UiWidgetDefinition = "DeLa"_u32, // 1147489377 307 | InputDeviceDefaults = "devc"_u32, // 1684371043 308 | Device = "devi"_u32, // 1684371049 309 | DetailObjectCollection = "dobc"_u32, // 1685021283 310 | Effect = "effe"_u32, // 1701209701 311 | Equipment = "eqip"_u32, // 1701931376 312 | Flag = "flag"_u32, // 1718378855 313 | Fog = "fog "_u32, // 1718576928 314 | Font = "font"_u32, // 1718578804 315 | MaterialEffects = "foot"_u32, // 1718579060 316 | Garbage = "garb"_u32, // 1734439522 317 | Glow = "glw!"_u32, // 1735161633 318 | GrenadeHudInterface = "grhi"_u32, // 1735551081 319 | HudMessageText = "hmt "_u32, // 1752003616 320 | HudNumber = "hud#"_u32, // 1752523811 321 | HudGlobals = "hudg"_u32, // 1752523879 322 | Item = "item"_u32, // 1769235821 323 | ItemCollection = "itmc"_u32, // 1769237859 324 | DamageEffect = "jpt!"_u32, // 1785754657 325 | LensFlare = "lens"_u32, // 1818586739 326 | Lightning = "elec"_u32, // 1701602659 327 | DeviceLightFixture = "lifi"_u32, // 1818846825 328 | Light = "ligh"_u32, // 1818847080 329 | SoundLooping = "lsnd"_u32, // 1819504228 330 | DeviceMachine = "mach"_u32, // 1835098984 331 | Globals = "matg"_u32, // 1835103335 332 | Meter = "metr"_u32, // 1835365490 333 | LightVolume = "mgs2"_u32, // 1835496242 334 | Gbxmodel = "mod2"_u32, // 1836016690 335 | Model = "mode"_u32, // 1836016741 336 | MultiplayerScenarioDescription = "mply"_u32, // 1836084345 337 | PreferencesNetworkGame = "ngpr"_u32, // 1852272754 338 | Object = "obje"_u32, // 1868720741 339 | Particle = "part"_u32, // 1885434484 340 | ParticleSystem = "pctl"_u32, // 1885566060 341 | Physics = "phys"_u32, // 1885895027 342 | Placeholder = "plac"_u32, // 1886151011 343 | PointPhysics = "pphy"_u32, // 1886414969 344 | Projectile = "proj"_u32, // 1886547818 345 | WeatherParticleSystem = "rain"_u32, // 1918986606 346 | ScenarioStructureBsp = "sbsp"_u32, // 1935831920 347 | Scenery = "scen"_u32, // 1935893870 348 | ShaderTransparentChicagoExtended = "scex"_u32, // 1935893880 349 | ShaderTransparentChicago = "schi"_u32, // 1935894633 350 | Scenario = "scnr"_u32, // 1935896178 351 | ShaderEnvironment = "senv"_u32, // 1936027254 352 | ShaderTransparentGlass = "sgla"_u32, // 1936157793 353 | Shader = "shdr"_u32, // 1936221298 354 | Sky = "sky "_u32, // 1936423200 355 | ShaderTransparentMeter = "smet"_u32, // 1936549236 356 | Sound = "snd!"_u32, // 1936614433 357 | SoundEnvironment = "snde"_u32, // 1936614501 358 | ShaderModel = "soso"_u32, // 1936683887 359 | ShaderTransparentGeneric = "sotr"_u32, // 1936684146 360 | UiWidgetCollection = "Soul"_u32, // 1399813484 361 | ShaderTransparentPlasma = "spla"_u32, // 1936747617 362 | SoundScenery = "ssce"_u32, // 1936941925 363 | StringList = "str#"_u32, // 1937011235 364 | ShaderTransparentWater = "swat"_u32, // 1937203572 365 | TagCollection = "tagc"_u32, // 1952540515 366 | CameraTrack = "trak"_u32, // 1953653099 367 | Dialogue = "udlg"_u32, // 1969515623 368 | UnitHudInterface = "unhi"_u32, // 1970169961 369 | Unit = "unit"_u32, // 1970170228 370 | UnicodeStringList = "ustr"_u32, // 1970500722 371 | VirtualKeyboard = "vcky"_u32, // 1986227065 372 | Vehicle = "vehi"_u32, // 1986357353 373 | Weapon = "weap"_u32, // 2003132784 374 | Wind = "wind"_u32, // 2003398244 375 | WeaponHudInterface = "wphi"_u32, // 2003855465 376 | }; 377 | } // namespace Blam -------------------------------------------------------------------------------- /source/Vulkan/StreamBuffer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "Common/Alignment.hpp" 4 | #include "Common/Format.hpp" 5 | #include 6 | #include 7 | #include 8 | 9 | namespace Vulkan 10 | { 11 | StreamBuffer::StreamBuffer( 12 | const Vulkan::Context& VulkanContext, vk::DeviceSize BufferSize 13 | ) 14 | : VulkanContext(VulkanContext), BufferSize(BufferSize), FlushTick(0), 15 | RingOffset(0) 16 | { 17 | //// Create Semaphore 18 | { 19 | vk::StructureChain 20 | FlushSemaphoreInfoChain = {}; 21 | 22 | auto& FlushSemaphoreInfo 23 | = FlushSemaphoreInfoChain.get(); 24 | 25 | FlushSemaphoreInfo.flags = {}; 26 | 27 | auto& FlushSemaphoreTypeInfo 28 | = FlushSemaphoreInfoChain.get(); 29 | 30 | FlushSemaphoreTypeInfo.initialValue = 0; 31 | FlushSemaphoreTypeInfo.semaphoreType = vk::SemaphoreType::eTimeline; 32 | 33 | if( auto CreateResult 34 | = VulkanContext.LogicalDevice.createSemaphoreUnique( 35 | FlushSemaphoreInfoChain.get() 36 | ); 37 | CreateResult.result == vk::Result::eSuccess ) 38 | { 39 | FlushSemaphore = std::move(CreateResult.value); 40 | } 41 | else 42 | { 43 | std::fprintf( 44 | stderr, "Error creating vertex buffer: %s\n", 45 | vk::to_string(CreateResult.result).c_str() 46 | ); 47 | /// ??? should we exit the program 48 | } 49 | Vulkan::SetObjectName( 50 | VulkanContext.LogicalDevice, FlushSemaphore.get(), 51 | "StreamBuffer: Flush Semaphore" 52 | ); 53 | } 54 | 55 | //// Create buffer 56 | { 57 | vk::BufferCreateInfo RingBufferInfo; 58 | RingBufferInfo.size = BufferSize; 59 | RingBufferInfo.usage = vk::BufferUsageFlagBits::eTransferSrc 60 | | vk::BufferUsageFlagBits::eTransferDst; 61 | 62 | if( auto CreateResult 63 | = VulkanContext.LogicalDevice.createBufferUnique(RingBufferInfo); 64 | CreateResult.result == vk::Result::eSuccess ) 65 | { 66 | RingBuffer = std::move(CreateResult.value); 67 | } 68 | else 69 | { 70 | std::fprintf( 71 | stderr, "Error creating vertex buffer: %s\n", 72 | vk::to_string(CreateResult.result).c_str() 73 | ); 74 | /// ??? should we exit the program 75 | } 76 | Vulkan::SetObjectName( 77 | VulkanContext.LogicalDevice, RingBuffer.get(), 78 | "StreamBuffer: Ring Buffer( %s )", 79 | Common::FormatByteCount(BufferSize).c_str() 80 | ); 81 | } 82 | 83 | //// Allocate memory for staging ring buffer 84 | { 85 | const vk::MemoryRequirements RingBufferMemoryRequirements 86 | = VulkanContext.LogicalDevice.getBufferMemoryRequirements( 87 | RingBuffer.get() 88 | ); 89 | 90 | vk::MemoryAllocateInfo RingBufferAllocInfo = {}; 91 | RingBufferAllocInfo.allocationSize = RingBufferMemoryRequirements.size; 92 | 93 | // Try to get some shared memory 94 | std::int32_t RingBufferHeapIndex = Vulkan::FindMemoryTypeIndex( 95 | VulkanContext.PhysicalDevice, 96 | RingBufferMemoryRequirements.memoryTypeBits, 97 | vk::MemoryPropertyFlagBits::eHostVisible 98 | | vk::MemoryPropertyFlagBits::eHostCoherent 99 | | vk::MemoryPropertyFlagBits::eDeviceLocal 100 | ); 101 | 102 | // If that failed, then just get some host memory 103 | if( RingBufferHeapIndex < 0 ) 104 | { 105 | RingBufferHeapIndex = Vulkan::FindMemoryTypeIndex( 106 | VulkanContext.PhysicalDevice, 107 | RingBufferMemoryRequirements.memoryTypeBits, 108 | vk::MemoryPropertyFlagBits::eHostVisible 109 | | vk::MemoryPropertyFlagBits::eHostCoherent 110 | ); 111 | } 112 | 113 | RingBufferAllocInfo.memoryTypeIndex = RingBufferHeapIndex; 114 | 115 | if( auto AllocResult = VulkanContext.LogicalDevice.allocateMemoryUnique( 116 | RingBufferAllocInfo 117 | ); 118 | AllocResult.result == vk::Result::eSuccess ) 119 | { 120 | RingBufferMemory = std::move(AllocResult.value); 121 | } 122 | else 123 | { 124 | std::fprintf( 125 | stderr, "Error allocating memory for staging buffer: %s\n", 126 | vk::to_string(AllocResult.result).c_str() 127 | ); 128 | /// ??? should we exit the program 129 | } 130 | Vulkan::SetObjectName( 131 | VulkanContext.LogicalDevice, RingBufferMemory.get(), 132 | "StreamBuffer: Ring Buffer Memory( %s )", 133 | Common::FormatByteCount(BufferSize).c_str() 134 | ); 135 | 136 | if( auto BindResult = VulkanContext.LogicalDevice.bindBufferMemory( 137 | RingBuffer.get(), RingBufferMemory.get(), 0 138 | ); 139 | BindResult == vk::Result::eSuccess ) 140 | { 141 | // Successfully binded memory to buffer 142 | } 143 | else 144 | { 145 | std::fprintf( 146 | stderr, "Error binding memory to staging ring buffer: %s\n", 147 | vk::to_string(BindResult).c_str() 148 | ); 149 | /// ??? should we exit the program 150 | } 151 | } 152 | 153 | //// Map the device memory 154 | if( auto MapResult = VulkanContext.LogicalDevice.mapMemory( 155 | RingBufferMemory.get(), 0, BufferSize 156 | ); 157 | MapResult.result == vk::Result::eSuccess ) 158 | { 159 | RingMemoryMapped = std::span( 160 | reinterpret_cast(MapResult.value), BufferSize 161 | ); 162 | } 163 | else 164 | { 165 | std::fprintf( 166 | stderr, "Error mapping staging ring buffer memory: %s\n", 167 | vk::to_string(MapResult.result).c_str() 168 | ); 169 | /// ??? should we exit the program 170 | } 171 | 172 | //// Allocate command pool 173 | { 174 | vk::CommandPoolCreateInfo CommandPoolInfo; 175 | CommandPoolInfo.flags 176 | = vk::CommandPoolCreateFlagBits::eResetCommandBuffer; 177 | CommandPoolInfo.queueFamilyIndex 178 | = VulkanContext.TransferQueueFamilyIndex; 179 | 180 | if( auto CreateResult = VulkanContext.LogicalDevice 181 | .createCommandPoolUnique(CommandPoolInfo); 182 | CreateResult.result == vk::Result::eSuccess ) 183 | { 184 | CommandPool = std::move(CreateResult.value); 185 | } 186 | else 187 | { 188 | std::fprintf( 189 | stderr, "Error creating staging buffer command pool: %s\n", 190 | vk::to_string(CreateResult.result).c_str() 191 | ); 192 | /// ??? should we exit the program 193 | } 194 | 195 | Vulkan::SetObjectName( 196 | VulkanContext.LogicalDevice, CommandPool.get(), 197 | "StreamBuffer: Command Pool" 198 | ); 199 | } 200 | } 201 | 202 | StreamBuffer::~StreamBuffer() 203 | { 204 | VulkanContext.LogicalDevice.unmapMemory(RingBufferMemory.get()); 205 | } 206 | 207 | std::uint64_t StreamBuffer::QueueBufferUpload( 208 | const std::span Data, vk::Buffer Buffer, 209 | vk::DeviceSize Offset 210 | ) 211 | { 212 | if( Data.empty() ) 213 | { 214 | return FlushTick; 215 | } 216 | if( Data.size_bytes() > BufferSize ) 217 | { 218 | std::fprintf( 219 | stderr, "Staging buffer overflow: %zu > %zu \n", Data.size_bytes(), 220 | BufferSize 221 | ); 222 | } 223 | 224 | // Satisfy any alignment requirements here 225 | std::uint64_t CurRingOffset = RingOffset; 226 | 227 | if( (CurRingOffset + Data.size_bytes()) >= BufferSize ) 228 | { 229 | const std::uint64_t CurFlushTick = Flush(); 230 | 231 | // Blocking wait since we need to ensure that the staging buffer is 232 | // entirely free todo, attach timestamps to particular regions of the 233 | // ring buffer so that we can use parts of the buffer immediately when 234 | // it is ready 235 | vk::SemaphoreWaitInfo WaitInfo; 236 | WaitInfo.semaphoreCount = 1; 237 | WaitInfo.pSemaphores = &GetSemaphore(); 238 | WaitInfo.pValues = &CurFlushTick; 239 | if( auto WaitResult 240 | = VulkanContext.LogicalDevice.waitSemaphores(WaitInfo, ~0ULL); 241 | WaitResult != vk::Result::eSuccess ) 242 | { 243 | std::fprintf(stderr, "Error waiting on Stream buffer semaphore \n"); 244 | } 245 | 246 | // Satisfy any alignment requirements here 247 | CurRingOffset = RingOffset; 248 | } 249 | 250 | RingOffset = CurRingOffset + Data.size_bytes(); 251 | 252 | std::copy( 253 | Data.begin(), Data.end(), 254 | RingMemoryMapped.subspan(CurRingOffset).begin() 255 | ); 256 | 257 | BufferCopies[Buffer].emplace_back(vk::BufferCopy{ 258 | CurRingOffset, Offset, Data.size_bytes()}); 259 | 260 | return FlushTick; 261 | } 262 | 263 | std::uint64_t StreamBuffer::QueueImageUpload( 264 | const std::span Data, vk::Image Image, vk::Offset3D Offset, 265 | vk::Extent3D Extent, vk::ImageSubresourceLayers SubresourceLayers, 266 | vk::ImageLayout DstLayout 267 | ) 268 | { 269 | if( Data.empty() ) 270 | { 271 | return FlushTick; 272 | } 273 | if( Data.size_bytes() > BufferSize ) 274 | { 275 | std::fprintf( 276 | stderr, "Staging buffer overflow: %zu > %zu \n", Data.size_bytes(), 277 | BufferSize 278 | ); 279 | } 280 | 281 | // Memory offset must at least match the alignment of the image format's 282 | // texel size. Here we just force it to handle the upper-bound alignment 283 | // as a temporary catch-all 284 | std::uint64_t CurRingOffset = Common::AlignUp(RingOffset, 16); 285 | 286 | if( (CurRingOffset + Data.size_bytes()) >= BufferSize ) 287 | { 288 | const std::uint64_t CurFlushTick = Flush(); 289 | 290 | // Blocking wait since we need to ensure that the staging buffer is 291 | // entirely free todo, attach timestamps to particular regions of the 292 | // ring buffer so that we can use parts of the buffer immediately when 293 | // it is ready 294 | vk::SemaphoreWaitInfo WaitInfo; 295 | WaitInfo.semaphoreCount = 1; 296 | WaitInfo.pSemaphores = &GetSemaphore(); 297 | WaitInfo.pValues = &CurFlushTick; 298 | if( VulkanContext.LogicalDevice.waitSemaphores(WaitInfo, ~0ULL) 299 | != vk::Result::eSuccess ) 300 | { 301 | std::fprintf(stderr, "Error waiting on Stream buffer semaphore \n"); 302 | } 303 | 304 | CurRingOffset = Common::AlignUp(RingOffset, 16); 305 | } 306 | 307 | RingOffset = CurRingOffset + Data.size_bytes(); 308 | 309 | std::copy( 310 | Data.begin(), Data.end(), 311 | RingMemoryMapped.subspan(CurRingOffset).begin() 312 | ); 313 | 314 | ImageCopies[Image].emplace_back(vk::BufferImageCopy{ 315 | CurRingOffset, 0, 0, SubresourceLayers, Offset, Extent}); 316 | 317 | ImagePreBarrier.emplace_back(vk::ImageMemoryBarrier( 318 | vk::AccessFlagBits::eMemoryWrite, vk::AccessFlagBits::eMemoryRead, 319 | vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal, 320 | VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, Image, 321 | vk::ImageSubresourceRange( 322 | SubresourceLayers.aspectMask, SubresourceLayers.mipLevel, 1, 323 | SubresourceLayers.baseArrayLayer, SubresourceLayers.layerCount 324 | ) 325 | )); 326 | ImagePostBarrier.emplace_back(vk::ImageMemoryBarrier( 327 | vk::AccessFlagBits::eTransferWrite, vk::AccessFlagBits::eMemoryRead, 328 | vk::ImageLayout::eTransferDstOptimal, 329 | vk::ImageLayout::eShaderReadOnlyOptimal, VK_QUEUE_FAMILY_IGNORED, 330 | VK_QUEUE_FAMILY_IGNORED, Image, 331 | vk::ImageSubresourceRange( 332 | SubresourceLayers.aspectMask, SubresourceLayers.mipLevel, 1, 333 | SubresourceLayers.baseArrayLayer, SubresourceLayers.layerCount 334 | ) 335 | )); 336 | 337 | return FlushTick; 338 | } 339 | 340 | std::uint64_t StreamBuffer::Flush() 341 | { 342 | if( RingOffset == 0 ) 343 | { 344 | return FlushTick; 345 | } 346 | // Any further pushes are going to be a part of the next tick 347 | const std::uint64_t PrevFlushTick = FlushTick++; 348 | vk::CommandBuffer FlushCommandBuffer = {}; 349 | 350 | // Get where the GPU is at in our submit-timeline 351 | std::uint64_t GpuFlushTick = 0; 352 | if( auto GetResult = VulkanContext.LogicalDevice.getSemaphoreCounterValue( 353 | FlushSemaphore.get() 354 | ); 355 | GetResult.result == vk::Result::eSuccess ) 356 | { 357 | GpuFlushTick = GetResult.value; 358 | } 359 | else 360 | { 361 | std::fprintf( 362 | stderr, "Error getting timeline semaphore value: %s\n", 363 | vk::to_string(GetResult.result).c_str() 364 | ); 365 | /// ??? should we exit the program 366 | } 367 | 368 | // Find a free command buffer 369 | for( std::size_t i = 0; i < CommandBuffers.size(); ++i ) 370 | { 371 | // This command context is free! recycle it 372 | if( CommandBufferTimeStamps[i] < GpuFlushTick ) 373 | { 374 | CommandBufferTimeStamps[i] = FlushTick; 375 | FlushCommandBuffer = CommandBuffers[i].get(); 376 | break; 377 | } 378 | } 379 | 380 | if( !FlushCommandBuffer ) 381 | { 382 | // No command buffer was free, we need to push a new one 383 | CommandBufferTimeStamps.push_back(FlushTick); 384 | 385 | vk::CommandBufferAllocateInfo CommandBufferInfo; 386 | CommandBufferInfo.commandPool = CommandPool.get(); 387 | CommandBufferInfo.commandBufferCount = 1; 388 | 389 | if( auto AllocateResult 390 | = VulkanContext.LogicalDevice.allocateCommandBuffersUnique( 391 | CommandBufferInfo 392 | ); 393 | AllocateResult.result == vk::Result::eSuccess ) 394 | { 395 | FlushCommandBuffer = AllocateResult.value[0].get(); 396 | CommandBuffers.emplace_back(std::move(AllocateResult.value[0])); 397 | } 398 | else 399 | { 400 | std::fprintf( 401 | stderr, "Error allocating command buffer: %s\n", 402 | vk::to_string(AllocateResult.result).c_str() 403 | ); 404 | /// ??? should we exit the program 405 | } 406 | 407 | Vulkan::SetObjectName( 408 | VulkanContext.LogicalDevice, FlushCommandBuffer, 409 | "StreamBuffer: Command Buffer %zu", CommandBuffers.size() 410 | ); 411 | } 412 | 413 | vk::CommandBufferBeginInfo BeginInfo; 414 | BeginInfo.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit; 415 | if( auto BeginResult 416 | = FlushCommandBuffer.begin(vk::CommandBufferBeginInfo{}); 417 | BeginResult != vk::Result::eSuccess ) 418 | { 419 | std::fprintf( 420 | stderr, "Error beginning command buffer: %s\n", 421 | vk::to_string(BeginResult).c_str() 422 | ); 423 | } 424 | 425 | { 426 | Vulkan::DebugLabelScope DebugCopyScope( 427 | FlushCommandBuffer, {1.0, 1.0, 0.0, 1.0}, "Upload Buffers" 428 | ); 429 | for( const auto& [CurBuffer, CurBufferCopies] : BufferCopies ) 430 | { 431 | FlushCommandBuffer.copyBuffer( 432 | RingBuffer.get(), CurBuffer, CurBufferCopies 433 | ); 434 | } 435 | } 436 | 437 | { 438 | Vulkan::DebugLabelScope DebugCopyScope( 439 | FlushCommandBuffer, {1.0, 1.0, 0.0, 1.0}, "Upload Images" 440 | ); 441 | FlushCommandBuffer.pipelineBarrier( 442 | vk::PipelineStageFlagBits::eTransfer, 443 | vk::PipelineStageFlagBits::eTransfer, vk::DependencyFlags{}, {}, {}, 444 | ImagePreBarrier 445 | ); 446 | for( const auto& [CurImage, CurImageCopies] : ImageCopies ) 447 | { 448 | FlushCommandBuffer.copyBufferToImage( 449 | RingBuffer.get(), CurImage, 450 | vk::ImageLayout::eTransferDstOptimal, CurImageCopies 451 | ); 452 | } 453 | FlushCommandBuffer.pipelineBarrier( 454 | vk::PipelineStageFlagBits::eTransfer, 455 | vk::PipelineStageFlagBits::eAllCommands, vk::DependencyFlags{}, {}, 456 | {}, ImagePostBarrier 457 | ); 458 | } 459 | 460 | if( auto EndResult = FlushCommandBuffer.end(); 461 | EndResult != vk::Result::eSuccess ) 462 | { 463 | std::fprintf( 464 | stderr, "Error ending command buffer: %s\n", 465 | vk::to_string(EndResult).c_str() 466 | ); 467 | } 468 | 469 | vk::StructureChain 470 | SubmitInfoChain = {}; 471 | 472 | auto& SubmitInfo = SubmitInfoChain.get(); 473 | SubmitInfo.commandBufferCount = 1; 474 | SubmitInfo.pCommandBuffers = &FlushCommandBuffer; 475 | 476 | SubmitInfo.waitSemaphoreCount = 1; 477 | SubmitInfo.pWaitSemaphores = &FlushSemaphore.get(); 478 | 479 | static const vk::PipelineStageFlags WaitStage 480 | = vk::PipelineStageFlagBits::eTransfer; 481 | SubmitInfo.pWaitDstStageMask = &WaitStage; 482 | 483 | SubmitInfo.signalSemaphoreCount = 1; 484 | SubmitInfo.pSignalSemaphores = &FlushSemaphore.get(); 485 | 486 | auto& SubmitTimelineInfo 487 | = SubmitInfoChain.get(); 488 | 489 | SubmitTimelineInfo.waitSemaphoreValueCount = 1; 490 | SubmitTimelineInfo.pWaitSemaphoreValues = &PrevFlushTick; 491 | 492 | SubmitTimelineInfo.signalSemaphoreValueCount = 1; 493 | SubmitTimelineInfo.pSignalSemaphoreValues = &FlushTick; 494 | 495 | if( auto SubmitResult 496 | = VulkanContext.TransferQueue.submit(SubmitInfoChain.get()); 497 | SubmitResult != vk::Result::eSuccess ) 498 | { 499 | // Error submitting 500 | std::fprintf( 501 | stderr, "Error submitting streaming buffer flush: %s\n", 502 | vk::to_string(SubmitResult).c_str() 503 | ); 504 | } 505 | 506 | RingOffset = 0; 507 | BufferCopies.clear(); 508 | ImageCopies.clear(); 509 | ImagePreBarrier.clear(); 510 | ImagePostBarrier.clear(); 511 | 512 | return FlushTick; 513 | } 514 | 515 | const vk::Semaphore& StreamBuffer::GetSemaphore() const 516 | { 517 | return FlushSemaphore.get(); 518 | } 519 | 520 | } // namespace Vulkan -------------------------------------------------------------------------------- /source/main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | #include 28 | 29 | #include 30 | 31 | #include "stb_image_write.h" 32 | 33 | // Enable render-doc captures on non-windows for now 34 | #if !defined(_WIN32) && !defined(__APPLE__) && !defined(NDEBUG) 35 | #define CAPTURE 36 | #endif 37 | 38 | #ifdef CAPTURE 39 | #include 40 | #include 41 | RENDERDOC_API_1_4_1* rdoc_api = NULL; 42 | #endif 43 | 44 | static constexpr glm::uvec2 RenderSize = {1024, 1024}; 45 | 46 | vk::UniqueRenderPass CreateMainRenderPass( 47 | vk::Device Device, 48 | vk::SampleCountFlagBits SampleCount = vk::SampleCountFlagBits::e1 49 | ); 50 | 51 | vk::UniqueFramebuffer CreateMainFrameBuffer( 52 | vk::Device Device, vk::ImageView Color, vk::ImageView DepthAA, 53 | vk::ImageView ColorAA, glm::uvec2 ImageSize, vk::RenderPass RenderPass 54 | ); 55 | 56 | std::string FormatDeviceCaps(vk::PhysicalDevice PhysicalDevice); 57 | 58 | int main(int argc, char* argv[]) 59 | { 60 | using namespace Common::Literals; 61 | 62 | if( argc < 3 ) 63 | { 64 | // Not enough arguments 65 | return EXIT_FAILURE; 66 | } 67 | 68 | #ifdef CAPTURE 69 | void* mod = dlopen("librenderdoc.so", RTLD_NOW); 70 | char* msg = dlerror(); 71 | if( msg ) 72 | std::puts(msg); 73 | if( mod ) 74 | { 75 | pRENDERDOC_GetAPI RENDERDOC_GetAPI 76 | = (pRENDERDOC_GetAPI)dlsym(mod, "RENDERDOC_GetAPI"); 77 | int ret 78 | = RENDERDOC_GetAPI(eRENDERDOC_API_Version_1_4_1, (void**)&rdoc_api); 79 | rdoc_api->SetCaptureOptionU32(eRENDERDOC_Option_APIValidation, 1); 80 | rdoc_api->SetCaptureOptionU32(eRENDERDOC_Option_CaptureCallstacks, 1); 81 | rdoc_api->SetCaptureOptionU32(eRENDERDOC_Option_SaveAllInitials, 1); 82 | rdoc_api->SetCaptureOptionU32(eRENDERDOC_Option_DebugOutputMute, 0); 83 | assert(ret == 1); 84 | } 85 | #endif 86 | 87 | std::filesystem::path MapPath(argv[1]); 88 | std::filesystem::path BitmapPath(argv[2]); 89 | 90 | auto MapFile = mio::mmap_source(MapPath.c_str()); 91 | auto BitmapFile = mio::mmap_source(BitmapPath.c_str()); 92 | 93 | Blam::MapFile CurMap( 94 | std::span( 95 | reinterpret_cast(MapFile.data()), MapFile.size() 96 | ), 97 | std::span( 98 | reinterpret_cast(BitmapFile.data()), 99 | BitmapFile.size() 100 | ) 101 | ); 102 | 103 | VkBlam::World CurWorld = VkBlam::World::Create(CurMap).value(); 104 | 105 | std::fputs(Blam::ToString(CurWorld.GetMapFile().MapHeader).c_str(), stdout); 106 | std::fputs( 107 | Blam::ToString(CurWorld.GetMapFile().TagIndexHeader).c_str(), stdout 108 | ); 109 | 110 | //// Create Instance 111 | 112 | vk::ApplicationInfo ApplicationInfo = {}; 113 | ApplicationInfo.apiVersion = VK_API_VERSION_1_1; 114 | 115 | ApplicationInfo.pEngineName = "VkBlam"; 116 | ApplicationInfo.engineVersion = VK_MAKE_VERSION(1, 0, 0); 117 | 118 | ApplicationInfo.pApplicationName = "VkBlam"; 119 | ApplicationInfo.applicationVersion = VK_MAKE_VERSION(1, 0, 0); 120 | 121 | vk::InstanceCreateInfo InstanceInfo = {}; 122 | 123 | InstanceInfo.pApplicationInfo = &ApplicationInfo; 124 | 125 | static const std::array InstanceExtensions = std::to_array({ 126 | #if defined(__APPLE__) 127 | VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME, 128 | #endif 129 | VK_EXT_DEBUG_UTILS_EXTENSION_NAME 130 | }); 131 | 132 | #if defined(__APPLE__) 133 | InstanceInfo.flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR; 134 | #endif 135 | 136 | InstanceInfo.ppEnabledExtensionNames = InstanceExtensions.data(); 137 | InstanceInfo.enabledExtensionCount = InstanceExtensions.size(); 138 | 139 | vk::UniqueInstance Instance = {}; 140 | 141 | if( auto CreateResult = vk::createInstanceUnique(InstanceInfo); 142 | CreateResult.result == vk::Result::eSuccess ) 143 | { 144 | Instance = std::move(CreateResult.value); 145 | } 146 | else 147 | { 148 | std::fprintf( 149 | stderr, "Error creating Vulkan instance: %s\n", 150 | vk::to_string(CreateResult.result).c_str() 151 | ); 152 | return EXIT_FAILURE; 153 | } 154 | VULKAN_HPP_DEFAULT_DISPATCHER.init(Instance.get()); 155 | 156 | //// Pick physical device 157 | vk::PhysicalDevice PhysicalDevice = {}; 158 | 159 | if( auto EnumerateResult = Instance->enumeratePhysicalDevices(); 160 | EnumerateResult.result == vk::Result::eSuccess ) 161 | { 162 | std::vector PhysicalDevices 163 | = std::move(EnumerateResult.value); 164 | 165 | // Prefer Discrete GPUs 166 | const auto IsDiscrete 167 | = [](const vk::PhysicalDevice& PhysicalDevice) -> bool { 168 | return PhysicalDevice.getProperties().deviceType 169 | == vk::PhysicalDeviceType::eDiscreteGpu; 170 | }; 171 | 172 | std::partition( 173 | PhysicalDevices.begin(), PhysicalDevices.end(), IsDiscrete 174 | ); 175 | 176 | // Pick the "best" out of all of the previous criteria 177 | PhysicalDevice = PhysicalDevices.front(); 178 | } 179 | else 180 | { 181 | std::fprintf( 182 | stderr, "Error enumerating physical devices: %s\n", 183 | vk::to_string(EnumerateResult.result).c_str() 184 | ); 185 | return EXIT_FAILURE; 186 | } 187 | 188 | std::fprintf( 189 | stdout, 190 | "---\n" 191 | "%s" 192 | "---\n", 193 | FormatDeviceCaps(PhysicalDevice).c_str() 194 | ); 195 | 196 | //// Create Device 197 | vk::DeviceCreateInfo DeviceInfo = {}; 198 | 199 | static const char* DeviceExtensions[] = { 200 | #if defined(__APPLE__) 201 | "VK_KHR_portability_subset", 202 | #endif 203 | VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME 204 | }; 205 | DeviceInfo.ppEnabledExtensionNames = DeviceExtensions; 206 | DeviceInfo.enabledExtensionCount = std::size(DeviceExtensions); 207 | 208 | vk::StructureChain< 209 | vk::PhysicalDeviceFeatures2, 210 | vk::PhysicalDeviceTimelineSemaphoreFeatures> 211 | DeviceFeatureChain = {}; 212 | 213 | auto& DeviceFeatures 214 | = DeviceFeatureChain.get().features; 215 | DeviceFeatures.samplerAnisotropy = true; 216 | DeviceFeatures.sampleRateShading = true; 217 | // DeviceFeatures.wideLines = true; // Not supported on MoltenVK 218 | DeviceFeatures.fillModeNonSolid = true; 219 | 220 | auto& DeviceTimelineFeatures 221 | = DeviceFeatureChain.get(); 222 | DeviceTimelineFeatures.timelineSemaphore = true; 223 | 224 | DeviceInfo.pNext = &DeviceFeatureChain.get(); 225 | 226 | static const float QueuePriority = 1.0f; 227 | 228 | vk::DeviceQueueCreateInfo QueueInfo = {}; 229 | QueueInfo.queueFamilyIndex = 0; 230 | QueueInfo.queueCount = 1; 231 | QueueInfo.pQueuePriorities = &QueuePriority; 232 | 233 | DeviceInfo.queueCreateInfoCount = 1; 234 | DeviceInfo.pQueueCreateInfos = &QueueInfo; 235 | 236 | vk::UniqueDevice Device = {}; 237 | if( auto CreateResult = PhysicalDevice.createDeviceUnique(DeviceInfo); 238 | CreateResult.result == vk::Result::eSuccess ) 239 | { 240 | Device = std::move(CreateResult.value); 241 | } 242 | else 243 | { 244 | std::fprintf( 245 | stderr, "Error creating logical device: %s\n", 246 | vk::to_string(CreateResult.result).c_str() 247 | ); 248 | return EXIT_FAILURE; 249 | } 250 | 251 | VULKAN_HPP_DEFAULT_DISPATCHER.init(Device.get()); 252 | 253 | #ifdef CAPTURE 254 | if( rdoc_api ) 255 | rdoc_api->StartFrameCapture( 256 | *(void**)(VkInstance)(*Instance.operator->()), NULL 257 | ); 258 | #endif 259 | 260 | // Main Rendering queue 261 | vk::Queue RenderQueue = Device->getQueue(0, 0); 262 | // Todo: Pick the fastest transfer queue here 263 | vk::Queue TransferQueue = Device->getQueue(0, 0); 264 | 265 | const Vulkan::Context VulkanContext{ 266 | Device.get(), PhysicalDevice, RenderQueue, 0, TransferQueue, 0}; 267 | 268 | VkBlam::Renderer Renderer = VkBlam::Renderer::Create(VulkanContext).value(); 269 | 270 | VkBlam::Scene CurScene = VkBlam::Scene::Create(Renderer, CurWorld).value(); 271 | 272 | //// Main Render Pass 273 | vk::UniqueRenderPass MainRenderPass 274 | = CreateMainRenderPass(Device.get(), VkBlam::RenderSamples); 275 | 276 | vk::UniqueBuffer StagingBuffer = {}; 277 | vk::UniqueDeviceMemory StagingBufferMemory = {}; 278 | 279 | vk::BufferCreateInfo StagingBufferInfo = {}; 280 | StagingBufferInfo.size 281 | = RenderSize.x * RenderSize.y * sizeof(std::uint32_t); 282 | StagingBufferInfo.usage = vk::BufferUsageFlagBits::eTransferDst 283 | | vk::BufferUsageFlagBits::eTransferSrc; 284 | 285 | if( auto CreateResult = Device->createBufferUnique(StagingBufferInfo); 286 | CreateResult.result == vk::Result::eSuccess ) 287 | { 288 | StagingBuffer = std::move(CreateResult.value); 289 | } 290 | else 291 | { 292 | std::fprintf( 293 | stderr, "Error creating staging buffer: %s\n", 294 | vk::to_string(CreateResult.result).c_str() 295 | ); 296 | return EXIT_FAILURE; 297 | } 298 | 299 | Vulkan::SetObjectName( 300 | Device.get(), StagingBuffer.get(), "Staging Buffer( %s )", 301 | Common::FormatByteCount(StagingBufferInfo.size).c_str() 302 | ); 303 | 304 | // Allocate memory for staging buffer 305 | { 306 | const vk::MemoryRequirements StagingBufferMemoryRequirements 307 | = Device->getBufferMemoryRequirements(StagingBuffer.get()); 308 | 309 | vk::MemoryAllocateInfo StagingBufferAllocInfo = {}; 310 | StagingBufferAllocInfo.allocationSize 311 | = StagingBufferMemoryRequirements.size; 312 | StagingBufferAllocInfo.memoryTypeIndex = Vulkan::FindMemoryTypeIndex( 313 | PhysicalDevice, StagingBufferMemoryRequirements.memoryTypeBits, 314 | vk::MemoryPropertyFlagBits::eHostVisible 315 | | vk::MemoryPropertyFlagBits::eHostCoherent 316 | ); 317 | 318 | if( auto AllocResult 319 | = Device->allocateMemoryUnique(StagingBufferAllocInfo); 320 | AllocResult.result == vk::Result::eSuccess ) 321 | { 322 | StagingBufferMemory = std::move(AllocResult.value); 323 | } 324 | else 325 | { 326 | std::fprintf( 327 | stderr, "Error allocating memory for staging buffer: %s\n", 328 | vk::to_string(AllocResult.result).c_str() 329 | ); 330 | return EXIT_FAILURE; 331 | } 332 | 333 | if( auto BindResult = Device->bindBufferMemory( 334 | StagingBuffer.get(), StagingBufferMemory.get(), 0 335 | ); 336 | BindResult == vk::Result::eSuccess ) 337 | { 338 | // Successfully binded memory to buffer 339 | } 340 | else 341 | { 342 | std::fprintf( 343 | stderr, "Error binding memory to staging buffer: %s\n", 344 | vk::to_string(BindResult).c_str() 345 | ); 346 | return EXIT_FAILURE; 347 | } 348 | } 349 | 350 | std::span StagingBufferData; 351 | if( auto MapResult = Device->mapMemory( 352 | StagingBufferMemory.get(), 0, StagingBufferInfo.size 353 | ); 354 | MapResult.result == vk::Result::eSuccess ) 355 | { 356 | StagingBufferData = std::span( 357 | reinterpret_cast(MapResult.value), 358 | StagingBufferInfo.size 359 | ); 360 | } 361 | else 362 | { 363 | std::fprintf( 364 | stderr, "Error mapping staging buffer memory: %s\n", 365 | vk::to_string(MapResult.result).c_str() 366 | ); 367 | return EXIT_FAILURE; 368 | } 369 | 370 | // Render Target images 371 | vk::UniqueImage RenderImage; 372 | 373 | vk::UniqueImage RenderImageAA; 374 | 375 | vk::UniqueImage RenderImageDepth; 376 | 377 | // Render-image, R8G8B8A8_SRGB 378 | vk::ImageCreateInfo RenderImageInfo = {}; 379 | RenderImageInfo.imageType = vk::ImageType::e2D; 380 | RenderImageInfo.format = vk::Format::eR8G8B8A8Srgb; 381 | RenderImageInfo.extent = vk::Extent3D(RenderSize.x, RenderSize.y, 1); 382 | RenderImageInfo.mipLevels = 1; 383 | RenderImageInfo.arrayLayers = 1; 384 | RenderImageInfo.samples = vk::SampleCountFlagBits::e1; 385 | RenderImageInfo.tiling = vk::ImageTiling::eOptimal; 386 | RenderImageInfo.usage = vk::ImageUsageFlagBits::eColorAttachment 387 | | vk::ImageUsageFlagBits::eTransferSrc; 388 | RenderImageInfo.sharingMode = vk::SharingMode::eExclusive; 389 | RenderImageInfo.initialLayout = vk::ImageLayout::eUndefined; 390 | 391 | // Render-image(MSAA), R8G8B8A8_SRGB 392 | vk::ImageCreateInfo RenderImageAAInfo = {}; 393 | RenderImageAAInfo.imageType = vk::ImageType::e2D; 394 | RenderImageAAInfo.format = vk::Format::eR8G8B8A8Srgb; 395 | RenderImageAAInfo.samples = VkBlam::RenderSamples; 396 | RenderImageAAInfo.extent = vk::Extent3D(RenderSize.x, RenderSize.y, 1); 397 | RenderImageAAInfo.mipLevels = 1; 398 | RenderImageAAInfo.arrayLayers = 1; 399 | RenderImageAAInfo.tiling = vk::ImageTiling::eOptimal; 400 | RenderImageAAInfo.usage = vk::ImageUsageFlagBits::eColorAttachment; 401 | RenderImageAAInfo.sharingMode = vk::SharingMode::eExclusive; 402 | RenderImageAAInfo.initialLayout = vk::ImageLayout::eUndefined; 403 | 404 | // Render-image-depth(MSAA), D32_sfloat 405 | vk::ImageCreateInfo RenderImageDepthInfo = {}; 406 | RenderImageDepthInfo.imageType = vk::ImageType::e2D; 407 | RenderImageDepthInfo.format = vk::Format::eD32Sfloat; 408 | RenderImageDepthInfo.samples = VkBlam::RenderSamples; 409 | RenderImageDepthInfo.extent = vk::Extent3D(RenderSize.x, RenderSize.y, 1); 410 | RenderImageDepthInfo.mipLevels = 1; 411 | RenderImageDepthInfo.arrayLayers = 1; 412 | RenderImageDepthInfo.tiling = vk::ImageTiling::eOptimal; 413 | RenderImageDepthInfo.usage 414 | = vk::ImageUsageFlagBits::eDepthStencilAttachment; 415 | RenderImageDepthInfo.sharingMode = vk::SharingMode::eExclusive; 416 | RenderImageDepthInfo.initialLayout = vk::ImageLayout::eUndefined; 417 | 418 | if( auto CreateResult = Device->createImageUnique(RenderImageInfo); 419 | CreateResult.result == vk::Result::eSuccess ) 420 | { 421 | RenderImage = std::move(CreateResult.value); 422 | } 423 | else 424 | { 425 | std::fprintf( 426 | stderr, "Error creating render target: %s\n", 427 | vk::to_string(CreateResult.result).c_str() 428 | ); 429 | return EXIT_FAILURE; 430 | } 431 | Vulkan::SetObjectName( 432 | Device.get(), RenderImage.get(), "Render Image Resolve" 433 | ); 434 | 435 | if( auto CreateResult = Device->createImageUnique(RenderImageAAInfo); 436 | CreateResult.result == vk::Result::eSuccess ) 437 | { 438 | RenderImageAA = std::move(CreateResult.value); 439 | } 440 | else 441 | { 442 | std::fprintf( 443 | stderr, "Error creating render target: %s\n", 444 | vk::to_string(CreateResult.result).c_str() 445 | ); 446 | return EXIT_FAILURE; 447 | } 448 | Vulkan::SetObjectName( 449 | Device.get(), RenderImageAA.get(), "Render Image(AA)" 450 | ); 451 | 452 | if( auto CreateResult = Device->createImageUnique(RenderImageDepthInfo); 453 | CreateResult.result == vk::Result::eSuccess ) 454 | { 455 | RenderImageDepth = std::move(CreateResult.value); 456 | } 457 | else 458 | { 459 | std::fprintf( 460 | stderr, "Error creating render target: %s\n", 461 | vk::to_string(CreateResult.result).c_str() 462 | ); 463 | return EXIT_FAILURE; 464 | } 465 | Vulkan::SetObjectName( 466 | Device.get(), RenderImageDepth.get(), "Render Image Depth(AA)" 467 | ); 468 | 469 | std::vector ImageHeapTargets = {}; 470 | ImageHeapTargets.push_back(RenderImage.get()); 471 | ImageHeapTargets.push_back(RenderImageAA.get()); 472 | ImageHeapTargets.push_back(RenderImageDepth.get()); 473 | 474 | // Allocate all the memory we need for these images up-front into a single 475 | // heap. 476 | vk::UniqueDeviceMemory ImageHeapMemory = {}; 477 | 478 | if( auto [Result, Value] = Vulkan::CommitImageHeap( 479 | Device.get(), PhysicalDevice, ImageHeapTargets 480 | ); 481 | Result == vk::Result::eSuccess ) 482 | { 483 | ImageHeapMemory = std::move(Value); 484 | } 485 | else 486 | { 487 | std::fprintf( 488 | stderr, "Error committing image memory: %s\n", 489 | vk::to_string(Result).c_str() 490 | ); 491 | return EXIT_FAILURE; 492 | } 493 | 494 | //// Image Views 495 | // Create the image views for the render-targets 496 | vk::ImageViewCreateInfo ImageViewInfoTemplate = {}; 497 | ImageViewInfoTemplate.viewType = vk::ImageViewType::e2D; 498 | ImageViewInfoTemplate.components.r = vk::ComponentSwizzle::eR; 499 | ImageViewInfoTemplate.components.g = vk::ComponentSwizzle::eG; 500 | ImageViewInfoTemplate.components.b = vk::ComponentSwizzle::eB; 501 | ImageViewInfoTemplate.components.a = vk::ComponentSwizzle::eA; 502 | ImageViewInfoTemplate.subresourceRange.aspectMask 503 | = vk::ImageAspectFlagBits::eColor; 504 | ImageViewInfoTemplate.subresourceRange.baseMipLevel = 0; 505 | ImageViewInfoTemplate.subresourceRange.levelCount = 1; 506 | ImageViewInfoTemplate.subresourceRange.baseArrayLayer = 0; 507 | ImageViewInfoTemplate.subresourceRange.layerCount = 1; 508 | 509 | ImageViewInfoTemplate.image = RenderImage.get(); 510 | ImageViewInfoTemplate.format = RenderImageInfo.format; 511 | 512 | vk::UniqueImageView RenderImageView = {}; 513 | if( auto CreateResult 514 | = Device->createImageViewUnique(ImageViewInfoTemplate); 515 | CreateResult.result == vk::Result::eSuccess ) 516 | { 517 | RenderImageView = std::move(CreateResult.value); 518 | } 519 | else 520 | { 521 | std::fprintf( 522 | stderr, "Error creating render target view: %s\n", 523 | vk::to_string(CreateResult.result).c_str() 524 | ); 525 | return EXIT_FAILURE; 526 | } 527 | 528 | ImageViewInfoTemplate.image = RenderImageAA.get(); 529 | ImageViewInfoTemplate.format = RenderImageAAInfo.format; 530 | vk::UniqueImageView RenderImageAAView = {}; 531 | if( auto CreateResult 532 | = Device->createImageViewUnique(ImageViewInfoTemplate); 533 | CreateResult.result == vk::Result::eSuccess ) 534 | { 535 | RenderImageAAView = std::move(CreateResult.value); 536 | } 537 | else 538 | { 539 | std::fprintf( 540 | stderr, "Error creating render target view: %s\n", 541 | vk::to_string(CreateResult.result).c_str() 542 | ); 543 | return EXIT_FAILURE; 544 | } 545 | 546 | ImageViewInfoTemplate.image = RenderImageDepth.get(); 547 | ImageViewInfoTemplate.format = RenderImageDepthInfo.format; 548 | ImageViewInfoTemplate.subresourceRange.aspectMask 549 | = vk::ImageAspectFlagBits::eDepth; 550 | vk::UniqueImageView RenderImageDepthView = {}; 551 | if( auto CreateResult 552 | = Device->createImageViewUnique(ImageViewInfoTemplate); 553 | CreateResult.result == vk::Result::eSuccess ) 554 | { 555 | RenderImageDepthView = std::move(CreateResult.value); 556 | } 557 | else 558 | { 559 | std::fprintf( 560 | stderr, "Error creating render target view: %s\n", 561 | vk::to_string(CreateResult.result).c_str() 562 | ); 563 | return EXIT_FAILURE; 564 | } 565 | 566 | //// MainFrameBuffer 567 | vk::UniqueFramebuffer RenderFramebuffer = CreateMainFrameBuffer( 568 | Device.get(), RenderImageView.get(), RenderImageDepthView.get(), 569 | RenderImageAAView.get(), RenderSize, MainRenderPass.get() 570 | ); 571 | 572 | // VkBlam::ShaderEnvironment ShaderEnvironments( 573 | // VulkanContext, BitmapHeap, Renderer.GetDescriptorUpdateBatch()); 574 | 575 | // CurWorld.GetMapFile().VisitTagClass( 576 | // [&](const Blam::TagIndexEntry& TagEntry, 577 | // const Blam::Tag& 578 | // ShaderEnvironment) -> void { 579 | // ShaderEnvironments.RegisterShader(TagEntry, ShaderEnvironment); 580 | // }); 581 | 582 | Renderer.GetDescriptorUpdateBatch().Flush(); 583 | 584 | //// Create Command Pool 585 | vk::CommandPoolCreateInfo CommandPoolInfo = {}; 586 | CommandPoolInfo.flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer; 587 | CommandPoolInfo.queueFamilyIndex = 0; 588 | 589 | vk::UniqueCommandPool CommandPool = {}; 590 | if( auto CreateResult = Device->createCommandPoolUnique(CommandPoolInfo); 591 | CreateResult.result == vk::Result::eSuccess ) 592 | { 593 | CommandPool = std::move(CreateResult.value); 594 | } 595 | else 596 | { 597 | std::fprintf( 598 | stderr, "Error creating command pool: %s\n", 599 | vk::to_string(CreateResult.result).c_str() 600 | ); 601 | return EXIT_FAILURE; 602 | } 603 | 604 | //// Create Command Buffer 605 | vk::CommandBufferAllocateInfo CommandBufferInfo = {}; 606 | CommandBufferInfo.commandPool = CommandPool.get(); 607 | CommandBufferInfo.level = vk::CommandBufferLevel::ePrimary; 608 | CommandBufferInfo.commandBufferCount = 1; 609 | 610 | vk::UniqueCommandBuffer CommandBuffer = {}; 611 | 612 | if( auto AllocateResult 613 | = Device->allocateCommandBuffersUnique(CommandBufferInfo); 614 | AllocateResult.result == vk::Result::eSuccess ) 615 | { 616 | CommandBuffer = std::move(AllocateResult.value[0]); 617 | } 618 | else 619 | { 620 | std::fprintf( 621 | stderr, "Error allocating command buffer: %s\n", 622 | vk::to_string(AllocateResult.result).c_str() 623 | ); 624 | return EXIT_FAILURE; 625 | } 626 | 627 | if( auto BeginResult = CommandBuffer->begin(vk::CommandBufferBeginInfo{}); 628 | BeginResult != vk::Result::eSuccess ) 629 | { 630 | std::fprintf( 631 | stderr, "Error beginning command buffer: %s\n", 632 | vk::to_string(BeginResult).c_str() 633 | ); 634 | return EXIT_FAILURE; 635 | } 636 | 637 | { 638 | Vulkan::DebugLabelScope FrameScope( 639 | CommandBuffer.get(), {1.0, 0.0, 1.0, 1.0}, "Frame" 640 | ); 641 | 642 | { 643 | Vulkan::DebugLabelScope RenderPassScope( 644 | CommandBuffer.get(), {1.0, 1.0, 0.0, 1.0}, "Main Render Pass" 645 | ); 646 | 647 | vk::RenderPassBeginInfo RenderBeginInfo = {}; 648 | RenderBeginInfo.renderPass = MainRenderPass.get(); 649 | static const vk::ClearValue ClearColors[] = { 650 | vk::ClearColorValue(std::array{0.0f, 0.0f, 0.0f, 0.0f} 651 | ), 652 | vk::ClearDepthStencilValue(1.0f, 0), 653 | vk::ClearColorValue(std::array{0.0f, 0.0f, 0.0f, 0.0f} 654 | ), 655 | }; 656 | RenderBeginInfo.pClearValues = ClearColors; 657 | RenderBeginInfo.clearValueCount = std::size(ClearColors); 658 | RenderBeginInfo.renderArea.extent.width = RenderSize.x; 659 | RenderBeginInfo.renderArea.extent.height = RenderSize.y; 660 | RenderBeginInfo.framebuffer = RenderFramebuffer.get(); 661 | CommandBuffer->beginRenderPass( 662 | RenderBeginInfo, vk::SubpassContents::eInline 663 | ); 664 | 665 | // Draw 666 | 667 | const auto WorldBounds = CurWorld.GetWorldBounds(); 668 | 669 | const glm::vec3 WorldCenter 670 | = glm::mix(WorldBounds[0], WorldBounds[1], 0.5); 671 | 672 | const glm::f32 MaxExtent 673 | = glm::compMax(glm::xyz(WorldBounds[1] - WorldBounds[0])) 674 | / 2.0f; 675 | 676 | const auto View = glm::lookAt( 677 | glm::vec3(WorldBounds[1].x, WorldBounds[1].y, MaxExtent) * 1.5f, 678 | // glm::vec3(WorldCenter.x, WorldCenter.y, WorldBoundMax.z), 679 | glm::vec3(WorldCenter.x, WorldCenter.y, WorldBounds[0].z), 680 | glm::vec3(0, 0, 1) 681 | ); 682 | 683 | const auto Projection 684 | // = glm::ortho( 685 | // -MaxExtent, MaxExtent, -MaxExtent, MaxExtent, 0.0f, 686 | // WorldBoundMax.z - WorldBoundMin.z); 687 | = glm::perspective( 688 | glm::radians(60.0f), 689 | static_cast(RenderSize.x) / RenderSize.y, 1.0f, 690 | 1000.0f 691 | ); 692 | 693 | VkBlam::SceneView SceneView(View, Projection, RenderSize); 694 | 695 | CurScene.Render(SceneView, CommandBuffer.get()); 696 | 697 | CommandBuffer->endRenderPass(); 698 | } 699 | 700 | // Wait for image data to be ready 701 | { 702 | Vulkan::DebugLabelScope DebugCopyScope( 703 | CommandBuffer.get(), {1.0, 1.0, 0.0, 1.0}, 704 | "Upload framebuffer to staging buffer" 705 | ); 706 | CommandBuffer->pipelineBarrier( 707 | vk::PipelineStageFlagBits::eColorAttachmentOutput, 708 | vk::PipelineStageFlagBits::eTransfer, vk::DependencyFlags(), {}, 709 | {}, 710 | {// Source Image 711 | vk::ImageMemoryBarrier( 712 | vk::AccessFlagBits::eColorAttachmentWrite, 713 | vk::AccessFlagBits::eTransferRead, 714 | vk::ImageLayout::eTransferSrcOptimal, 715 | vk::ImageLayout::eTransferSrcOptimal, 716 | VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, 717 | RenderImage.get(), 718 | vk::ImageSubresourceRange( 719 | vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 720 | ) 721 | )} 722 | ); 723 | CommandBuffer->copyImageToBuffer( 724 | RenderImage.get(), vk::ImageLayout::eTransferSrcOptimal, 725 | StagingBuffer.get(), 726 | {vk::BufferImageCopy( 727 | 0, RenderSize.x, RenderSize.y, 728 | vk::ImageSubresourceLayers( 729 | vk::ImageAspectFlagBits::eColor, 0, 0, 1 730 | ), 731 | vk::Offset3D(), vk::Extent3D(RenderSize.x, RenderSize.y, 1) 732 | )} 733 | ); 734 | } 735 | } 736 | 737 | if( auto EndResult = CommandBuffer->end(); 738 | EndResult != vk::Result::eSuccess ) 739 | { 740 | std::fprintf( 741 | stderr, "Error ending command buffer: %s\n", 742 | vk::to_string(EndResult).c_str() 743 | ); 744 | return EXIT_FAILURE; 745 | } 746 | 747 | const std::uint64_t UploadTick = Renderer.GetStreamBuffer().Flush(); 748 | 749 | // Submit work 750 | vk::UniqueFence Fence = {}; 751 | if( auto CreateResult = Device->createFenceUnique({}); 752 | CreateResult.result == vk::Result::eSuccess ) 753 | { 754 | Fence = std::move(CreateResult.value); 755 | } 756 | else 757 | { 758 | std::fprintf( 759 | stderr, "Error creating fence: %s\n", 760 | vk::to_string(CreateResult.result).c_str() 761 | ); 762 | return EXIT_FAILURE; 763 | } 764 | 765 | vk::StructureChain 766 | SubmitInfoChain; 767 | 768 | auto& SubmitInfo = SubmitInfoChain.get(); 769 | 770 | SubmitInfo.commandBufferCount = 1; 771 | SubmitInfo.pCommandBuffers = &CommandBuffer.get(); 772 | 773 | SubmitInfo.waitSemaphoreCount = 1; 774 | SubmitInfo.pWaitSemaphores = &Renderer.GetStreamBuffer().GetSemaphore(); 775 | 776 | static const vk::PipelineStageFlags WaitStage 777 | = vk::PipelineStageFlagBits::eTransfer; 778 | SubmitInfo.pWaitDstStageMask = &WaitStage; 779 | 780 | auto& SubmitTimelineInfo 781 | = SubmitInfoChain.get(); 782 | 783 | SubmitTimelineInfo.waitSemaphoreValueCount = 1; 784 | SubmitTimelineInfo.pWaitSemaphoreValues = &UploadTick; 785 | 786 | if( auto SubmitResult = RenderQueue.submit(SubmitInfo, Fence.get()); 787 | SubmitResult != vk::Result::eSuccess ) 788 | { 789 | std::fprintf( 790 | stderr, "Error submitting command buffer: %s\n", 791 | vk::to_string(SubmitResult).c_str() 792 | ); 793 | return EXIT_FAILURE; 794 | } 795 | 796 | // Wait for it 797 | if( auto WaitResult = Device->waitForFences(Fence.get(), true, ~0ULL); 798 | WaitResult != vk::Result::eSuccess ) 799 | { 800 | std::fprintf( 801 | stderr, "Error waiting for fence: %s\n", 802 | vk::to_string(WaitResult).c_str() 803 | ); 804 | return EXIT_FAILURE; 805 | } 806 | 807 | #ifdef CAPTURE 808 | if( rdoc_api ) 809 | rdoc_api->EndFrameCapture( 810 | *(void**)(VkInstance)(*Instance.operator->()), NULL 811 | ); 812 | #endif 813 | 814 | stbi_write_png_compression_level = 0; 815 | stbi_write_png( 816 | ("./" + MapPath.stem().string() + ".png").c_str(), RenderSize.x, 817 | RenderSize.y, 4, StagingBufferData.data(), 0 818 | ); 819 | 820 | return EXIT_SUCCESS; 821 | } 822 | 823 | vk::UniqueFramebuffer CreateMainFrameBuffer( 824 | vk::Device Device, vk::ImageView Color, vk::ImageView DepthAA, 825 | vk::ImageView ColorAA, glm::uvec2 ImageSize, vk::RenderPass RenderPass 826 | ) 827 | { 828 | vk::FramebufferCreateInfo FramebufferInfo = {}; 829 | 830 | FramebufferInfo.width = ImageSize.x; 831 | FramebufferInfo.height = ImageSize.y; 832 | FramebufferInfo.layers = 1; 833 | FramebufferInfo.renderPass = RenderPass; 834 | 835 | const vk::ImageView Attachments[] = {Color, DepthAA, ColorAA}; 836 | FramebufferInfo.attachmentCount = std::size(Attachments); 837 | FramebufferInfo.pAttachments = Attachments; 838 | 839 | if( auto CreateResult = Device.createFramebufferUnique(FramebufferInfo); 840 | CreateResult.result == vk::Result::eSuccess ) 841 | { 842 | return std::move(CreateResult.value); 843 | } 844 | else 845 | { 846 | std::fprintf( 847 | stderr, "Error creating framebuffer: %s\n", 848 | vk::to_string(CreateResult.result).c_str() 849 | ); 850 | return {}; 851 | } 852 | } 853 | 854 | std::string FormatDeviceCaps(vk::PhysicalDevice PhysicalDevice) 855 | { 856 | std::string Result; 857 | 858 | const vk::PhysicalDeviceProperties Properties 859 | = PhysicalDevice.getProperties(); 860 | 861 | Result += Common::Format( 862 | "Device Name: %.256s\n", Properties.deviceName.data() 863 | ); 864 | Result += Common::Format( 865 | "Device Type: %s\n", vk::to_string(Properties.deviceType).c_str() 866 | ); 867 | Result += Common::Format( 868 | "DeviceID/VendorID: %8x:%8x\n", Properties.deviceID, Properties.vendorID 869 | ); 870 | 871 | const vk::PhysicalDeviceMemoryProperties MemoryProperties 872 | = PhysicalDevice.getMemoryProperties(); 873 | for( std::uint8_t HeapIdx = 0; HeapIdx < MemoryProperties.memoryHeapCount; 874 | ++HeapIdx ) 875 | { 876 | const auto& CurHeap = MemoryProperties.memoryHeaps[HeapIdx]; 877 | Result += Common::Format( 878 | "Heap %2u: %12s %s\n", HeapIdx, 879 | Common::FormatByteCount(CurHeap.size).c_str(), 880 | vk::to_string(CurHeap.flags).c_str() 881 | ); 882 | } 883 | 884 | return Result; 885 | } 886 | --------------------------------------------------------------------------------