├── .clang-format ├── .gitignore ├── CMakeLists.txt ├── LICENSE ├── README.md ├── app ├── biome_png.cpp └── mem_leak_check.cpp ├── build.ps1 ├── data ├── colors │ ├── biome_color.json │ └── block_color.json └── images │ ├── TAG.zip │ ├── TAG_Byte.ico │ ├── TAG_Byte_Array.ico │ ├── TAG_Byte_bool.ico │ ├── TAG_Compound.ico │ ├── TAG_Double.ico │ ├── TAG_End.ico │ ├── TAG_Float.ico │ ├── TAG_Int.ico │ ├── TAG_Int_Array.ico │ ├── TAG_List.ico │ ├── TAG_Long.ico │ ├── TAG_Long_Array.ico │ ├── TAG_Short.ico │ ├── TAG_String.ico │ └── png │ ├── TAG.psd │ ├── TAG_Byte.png │ ├── TAG_Byte_Array.png │ ├── TAG_Byte_bool.png │ ├── TAG_Compound.png │ ├── TAG_Double.png │ ├── TAG_End.png │ ├── TAG_Float.png │ ├── TAG_Int.png │ ├── TAG_Int_Array.png │ ├── TAG_List.png │ ├── TAG_Long.png │ ├── TAG_Long_Array.png │ ├── TAG_Short.png │ ├── TAG_String.png │ └── TAG_all.png ├── libs ├── libleveldb-gnu.a ├── libleveldb-mingw64.a └── libz-mingw64.a ├── pics └── biome.png ├── src ├── actor.cpp ├── bedrock_key.cpp ├── bedrock_level.cpp ├── chunk.cpp ├── color.cpp ├── data_3d.cpp ├── global.cpp ├── include │ ├── actor.h │ ├── bedrock_key.h │ ├── bedrock_level.h │ ├── bit_tools.h │ ├── chunk.h │ ├── color.h │ ├── data_3d.h │ ├── global.h │ ├── level_dat.h │ ├── palette.h │ ├── player.h │ ├── scoreboard.h │ ├── sub_chunk.h │ └── utils.h ├── level_dat.cpp ├── palette.cpp ├── player.cpp ├── scoreboard.cpp ├── sub_chunk.cpp └── utils.cpp ├── tests ├── actor_test.cpp ├── bedrock_level_test.cpp ├── bit_tools_test.cpp ├── chunk_test.cpp ├── color_test.cpp ├── data3d_test.cpp ├── data_dump_test.cpp ├── key_test.cpp ├── level_dat_test.cpp ├── palettes_test.cpp ├── stb_image_test.cpp ├── sub_chunk_test.cpp └── utils_test.cpp └── third ├── json └── json.hpp ├── leveldb ├── c.h ├── cache.h ├── comparator.h ├── compressor.h ├── db.h ├── decompress_allocator.h ├── dumpfile.h ├── env.h ├── export.h ├── filter_policy.h ├── iterator.h ├── options.h ├── slice.h ├── snappy_compressor.h ├── status.h ├── table.h ├── table_builder.h ├── write_batch.h ├── zlib_compressor.h ├── zopfli_compressor.h └── zstd_compressor.h ├── magic-enum └── magic_enum.hpp ├── seh_exception.hpp └── stb └── stb_image_write.h /.clang-format: -------------------------------------------------------------------------------- 1 | Language: Cpp 2 | BasedOnStyle: Google 3 | UseTab: Never 4 | IndentWidth: 4 5 | NamespaceIndentation: All 6 | ColumnLimit: 100 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | *.d 3 | .release-mingw 4 | # Compiled Object files 5 | *.slo 6 | *.lo 7 | *.o 8 | *.obj 9 | 10 | # Precompiled Headers 11 | *.gch 12 | *.pch 13 | 14 | # Compiled Dynamic libraries 15 | *.so 16 | *.dylib 17 | *.dll 18 | 19 | # Fortran module files 20 | *.mod 21 | *.smod 22 | 23 | # Compiled Static libraries 24 | *.lai 25 | *.la 26 | # Executables 27 | *.exe 28 | *.out 29 | *.app 30 | 31 | .build 32 | 33 | .vscode 34 | .idea/* 35 | .build-debug 36 | .build-debug/* 37 | .cache 38 | .cache/* 39 | data/worlds/* 40 | data/dumps/* 41 | .vs-build 42 | .release 43 | .debug 44 | .build 45 | build -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | 2 | CMAKE_MINIMUM_REQUIRED(VERSION 3.14) 3 | 4 | 5 | project(bedrock_level) 6 | set(CMAKE_CXX_STANDARD 17) 7 | set(CMAKE_EXPORT_COMPILE_COMMANDS ON) 8 | if (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") 9 | MESSAGE(FATAL_ERROR "Unsupported tool-chain, please use mingw64 instead") 10 | endif () 11 | 12 | 13 | include_directories(src/include) 14 | include_directories(third/) 15 | 16 | enable_testing() 17 | add_library(bedrock-level 18 | STATIC 19 | src/sub_chunk.cpp 20 | src/utils.cpp 21 | src/palette.cpp 22 | src/bedrock_level.cpp 23 | src/bedrock_key.cpp 24 | src/data_3d.cpp 25 | src/chunk.cpp 26 | src/actor.cpp 27 | src/player.cpp 28 | src/scoreboard.cpp 29 | src/global.cpp 30 | src/color.cpp 31 | src/level_dat.cpp 32 | ) 33 | 34 | 35 | if (WIN32) 36 | MESSAGE(STATUS "Current compiler is Mingw") 37 | set(EXTRA_LIBS ${PROJECT_SOURCE_DIR}/libs/libleveldb-mingw64.a ${PROJECT_SOURCE_DIR}/libs/libz-mingw64.a) 38 | add_compile_options(-std=c++17 -O2 -Wall -g) 39 | target_link_libraries(bedrock-level ${EXTRA_LIBS}) 40 | endif (WIN32) 41 | 42 | 43 | if (UNIX) 44 | add_compile_options(-std=c++17 -Wall -g) 45 | MESSAGE(STATUS "Current compiler is GUN gcc") 46 | set(EXTRA_LIBS ${PROJECT_SOURCE_DIR}/libs/libleveldb-gnu.a) 47 | add_compile_options(-fsanitize=address) 48 | add_link_options(-fsanitize=address) 49 | find_package(ZLIB) 50 | target_link_libraries(bedrock-level ${EXTRA_LIBS} ZLIB::ZLIB) 51 | endif (UNIX) 52 | 53 | 54 | function(create_app name files) 55 | add_executable( 56 | ${name} 57 | ${files} 58 | ) 59 | target_link_libraries(${name} PRIVATE bedrock-level) 60 | endfunction(create_app) 61 | 62 | create_app(biome_png app/biome_png.cpp) 63 | create_app(mem_leak_check app/mem_leak_check.cpp) 64 | 65 | 66 | #testing 67 | include(FetchContent) 68 | FetchContent_Declare( 69 | googletest 70 | URL https://github.com/google/googletest/archive/03597a01ee50ed33e9dfd640b249b4be3799d395.zip 71 | ) 72 | # For Windows: Prevent overriding the parent project's compiler/linker settings 73 | set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) 74 | FetchContent_MakeAvailable(googletest) 75 | include(GoogleTest) 76 | 77 | function(create_test name files) 78 | add_executable( 79 | ${name} 80 | ${files} 81 | ) 82 | target_link_libraries(${name} PRIVATE GTest::gtest_main bedrock-level) 83 | # add_test(NAME ${name} COMMAND ${name}) 84 | endfunction(create_test) 85 | 86 | create_test(sub_chunk_test tests/sub_chunk_test.cpp) 87 | create_test(utils_test tests/utils_test.cpp) 88 | create_test(bit_tools_test tests/bit_tools_test.cpp) 89 | create_test(data3d_test tests/data3d_test.cpp) 90 | create_test(data_dump_level_test tests/data_dump_test.cpp) 91 | create_test(palette_test tests/palettes_test.cpp) 92 | create_test(actor_test tests/actor_test.cpp) 93 | create_test(key_test tests/key_test.cpp) 94 | create_test(bedrock_level_test tests/bedrock_level_test.cpp) 95 | create_test(chunk_test tests/chunk_test.cpp) 96 | create_test(stb_image_test tests/stb_image_test.cpp) 97 | create_test(color_test tests/color_test.cpp) 98 | create_test(level_dat_test tests/level_dat_test.cpp) 99 | #create_test(nbt_test tests/nbt_test.cpp) 100 | 101 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Bedrock Level 2 | 3 | Bedrock level library written in C++ 4 | (The project is still in the early demo stage and may contain some bugs) 5 | 6 | Only on Mingw64(posix version)!!! 7 | 8 | ## Samples 9 | 10 | ### Biome Map 11 | 12 | ```c++ 13 | int main() { 14 | bl::init_biome_color_palette_from_file( 15 | R"(C:\Users\xhy\dev\bedrock-level\data\colors\biome.json)"); 16 | 17 | const std::string path = R"(C:\Users\xhy\Desktop\t)"; 18 | bl::bedrock_level level; 19 | if (!level.open(path)) { 20 | fprintf(stderr, "Can not open level %s", path.c_str()); 21 | return -1; 22 | } 23 | 24 | auto spawn_pos = level.dat().spawn_position(); 25 | auto center_chunk_pos = spawn_pos.to_chunk_pos(); 26 | const int DIM = 0; 27 | const int R = 40; 28 | auto minP = bl::chunk_pos{center_chunk_pos.x - R, center_chunk_pos.z - R, DIM}; 29 | auto maxP = bl::chunk_pos{center_chunk_pos.x + R, center_chunk_pos.z + R, DIM}; 30 | const int W = maxP.x - minP.x + 1; 31 | const int H = maxP.z - minP.z + 1; 32 | std::vector> cm(H * 16, std::vector(W * 16, bl::color())); 33 | for (int x = minP.x; x <= maxP.x; x++) { 34 | for (int z = minP.z; z <= maxP.z; z++) { 35 | auto *chunk = level.get_chunk({x, z, DIM}); 36 | if (chunk) { 37 | auto sx = (x - minP.x) * 16; 38 | auto sz = (z - minP.z) * 16; 39 | for (int xx = 0; xx < 16; xx++) { 40 | for (int zz = 0; zz < 16; zz++) { 41 | cm[sz + zz][sx + xx] = bl::get_biome_color(chunk->get_top_biome(xx, zz)); 42 | } 43 | } 44 | } 45 | } 46 | } 47 | bl::export_image(cm, 1, "biome.png"); 48 | return 0; 49 | } 50 | ``` 51 | 52 | ![](pics/biome.png) 53 | 54 | ### Complie guide 55 | 56 | You just need to clone this repo and run `build.ps1` in powershell 57 | -------------------------------------------------------------------------------- /app/biome_png.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "bedrock_level.h" 5 | #include "color.h" 6 | 7 | int main() { 8 | // bl::init_biome_color_palette_from_file( 9 | // R"(C:\Users\xhy\dev\bedrock-level\data\colors\biome.json)"); 10 | // 11 | bl::init_block_color_palette_from_file( 12 | R"(C:\Users\xhy\dev\bedrock-level\data\colors\block.json)"); 13 | 14 | const std::string path = R"(C:\Users\xhy\Desktop\SAC_survival)"; 15 | bl::bedrock_level level; 16 | if (!level.open(path)) { 17 | fprintf(stderr, "Can not open level %s", path.c_str()); 18 | return -1; 19 | } 20 | 21 | // auto spawn_pos = level.dat().spawn_position(); 22 | 23 | auto center_chunk_pos = bl::chunk_pos{0, 0, 0}; 24 | const int DIM = 0; 25 | const int R = 0; 26 | auto minP = bl::chunk_pos{center_chunk_pos.x - R, center_chunk_pos.z - R, DIM}; 27 | auto maxP = bl::chunk_pos{center_chunk_pos.x + R, center_chunk_pos.z + R, DIM}; 28 | const int W = maxP.x - minP.x + 1; 29 | const int H = maxP.z - minP.z + 1; 30 | std::vector> cm(H * 16, std::vector(W * 16, bl::color())); 31 | for (int x = minP.x; x <= maxP.x; x++) { 32 | for (int z = minP.z; z <= maxP.z; z++) { 33 | auto *chunk = level.get_chunk({x, z, DIM}); 34 | if (chunk) { 35 | auto sx = (x - minP.x) * 16; 36 | auto sz = (z - minP.z) * 16; 37 | for (int xx = 0; xx < 16; xx++) { 38 | for (int zz = 0; zz < 16; zz++) { 39 | // auto name = chunk->get_top_block(xx, zz); 40 | // cm[sz + zz][sx + xx] = 41 | // chunk->get_top_block_color(xx, zz); 42 | // 43 | } 44 | } 45 | } 46 | } 47 | } 48 | bl::export_image(cm, 1, "biome.png"); 49 | return 0; 50 | } 51 | -------------------------------------------------------------------------------- /app/mem_leak_check.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/8/18. 3 | // 4 | #include 5 | 6 | #include "bedrock_level.h" 7 | #include "palette.h" 8 | #include "utils.h" 9 | 10 | void demo_check() { int *a = new int; } 11 | 12 | void nbt_leak_check() { 13 | const auto path = 14 | "/mnt/c/Users/xhy/dev/bedrock-level/data/dumps/actors/144115188092633088.palette"; 15 | 16 | auto file = bl::utils::read_file(path); 17 | auto p = bl::palette::read_palette_to_end(file.data(), file.size()); 18 | for (auto i : p) delete i; 19 | } 20 | 21 | void level_read_leak_check() { 22 | const auto path = "/mnt/d/MC/saves/2g"; 23 | bl::bedrock_level l; 24 | 25 | assert(l.open(path)); 26 | for (int i = -10; i <= 10; i++) { 27 | for (int j = -10; j <= 10; j++) { 28 | auto *ch = l.get_chunk({i, j, 0}, true); 29 | if (ch) { 30 | std::cout << ch->get_block(0, 32, 0).name << std::endl; 31 | printf("%d\n", ch->get_pos().x); 32 | delete ch; 33 | } 34 | } 35 | } 36 | 37 | l.close(); 38 | } 39 | 40 | int main() { 41 | level_read_leak_check(); 42 | return 0; 43 | } 44 | -------------------------------------------------------------------------------- /build.ps1: -------------------------------------------------------------------------------- 1 | $build_dir = "./build" 2 | New-Item -Path "." -Name $build_dir -ItemType Directory 3 | # complie 4 | cmake -G "MinGW Makefiles" -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ -B $build_dir . 5 | cmake --build $build_dir --config Release -j 18 -- -------------------------------------------------------------------------------- /data/images/TAG.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/TAG.zip -------------------------------------------------------------------------------- /data/images/TAG_Byte.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/TAG_Byte.ico -------------------------------------------------------------------------------- /data/images/TAG_Byte_Array.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/TAG_Byte_Array.ico -------------------------------------------------------------------------------- /data/images/TAG_Byte_bool.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/TAG_Byte_bool.ico -------------------------------------------------------------------------------- /data/images/TAG_Compound.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/TAG_Compound.ico -------------------------------------------------------------------------------- /data/images/TAG_Double.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/TAG_Double.ico -------------------------------------------------------------------------------- /data/images/TAG_End.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/TAG_End.ico -------------------------------------------------------------------------------- /data/images/TAG_Float.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/TAG_Float.ico -------------------------------------------------------------------------------- /data/images/TAG_Int.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/TAG_Int.ico -------------------------------------------------------------------------------- /data/images/TAG_Int_Array.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/TAG_Int_Array.ico -------------------------------------------------------------------------------- /data/images/TAG_List.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/TAG_List.ico -------------------------------------------------------------------------------- /data/images/TAG_Long.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/TAG_Long.ico -------------------------------------------------------------------------------- /data/images/TAG_Long_Array.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/TAG_Long_Array.ico -------------------------------------------------------------------------------- /data/images/TAG_Short.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/TAG_Short.ico -------------------------------------------------------------------------------- /data/images/TAG_String.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/TAG_String.ico -------------------------------------------------------------------------------- /data/images/png/TAG.psd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG.psd -------------------------------------------------------------------------------- /data/images/png/TAG_Byte.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG_Byte.png -------------------------------------------------------------------------------- /data/images/png/TAG_Byte_Array.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG_Byte_Array.png -------------------------------------------------------------------------------- /data/images/png/TAG_Byte_bool.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG_Byte_bool.png -------------------------------------------------------------------------------- /data/images/png/TAG_Compound.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG_Compound.png -------------------------------------------------------------------------------- /data/images/png/TAG_Double.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG_Double.png -------------------------------------------------------------------------------- /data/images/png/TAG_End.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG_End.png -------------------------------------------------------------------------------- /data/images/png/TAG_Float.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG_Float.png -------------------------------------------------------------------------------- /data/images/png/TAG_Int.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG_Int.png -------------------------------------------------------------------------------- /data/images/png/TAG_Int_Array.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG_Int_Array.png -------------------------------------------------------------------------------- /data/images/png/TAG_List.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG_List.png -------------------------------------------------------------------------------- /data/images/png/TAG_Long.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG_Long.png -------------------------------------------------------------------------------- /data/images/png/TAG_Long_Array.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG_Long_Array.png -------------------------------------------------------------------------------- /data/images/png/TAG_Short.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG_Short.png -------------------------------------------------------------------------------- /data/images/png/TAG_String.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG_String.png -------------------------------------------------------------------------------- /data/images/png/TAG_all.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/data/images/png/TAG_all.png -------------------------------------------------------------------------------- /libs/libleveldb-gnu.a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/libs/libleveldb-gnu.a -------------------------------------------------------------------------------- /libs/libleveldb-mingw64.a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/libs/libleveldb-mingw64.a -------------------------------------------------------------------------------- /libs/libz-mingw64.a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/libs/libz-mingw64.a -------------------------------------------------------------------------------- /pics/biome.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bedrock-dev/bedrock-level/64243ed9ed111f591d51652d3e27f990c9379be5/pics/biome.png -------------------------------------------------------------------------------- /src/actor.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/31. 3 | // 4 | 5 | #include "actor.h" 6 | 7 | #include 8 | 9 | #include "palette.h" 10 | 11 | namespace bl { 12 | 13 | bool actor::load(const byte_t *data, size_t len) { 14 | auto p = bl::palette::read_palette_to_end(data, len); 15 | if (p.size() != 1) { 16 | BL_ERROR("Invalid Actor!!"); 17 | return false; 18 | } 19 | 20 | this->root_ = p[0]; 21 | this->loaded_ = this->preload(root_); 22 | return this->loaded_; 23 | } 24 | 25 | void actor::dump() { 26 | printf("- type: %s\n", this->identifier_.c_str()); 27 | printf("- pos: [%f %f %f]\n", pos_.x, pos_.y, pos_.z); 28 | printf("- NBT:\n\n"); 29 | for (auto &kv : this->root_->value) { 30 | kv.second->write(std::cout, 0); 31 | printf("=============================\n"); 32 | } 33 | } 34 | bool actor::load_from_nbt(bl::palette::compound_tag *nbt) { 35 | if (!this->preload(nbt)) return false; 36 | this->root_ = dynamic_cast(nbt->copy()); 37 | return true; 38 | } 39 | bool actor::preload(bl::palette::compound_tag *root) { 40 | if (!root) return false; 41 | bool read_pos = false; 42 | bool read_uid = false; 43 | bool read_identifier = false; 44 | auto it = root->value.find("Pos"); 45 | if (it != root->value.end()) { 46 | auto *pos_tag = dynamic_cast(it->second); 47 | if (pos_tag && pos_tag->value.size() == 3) { 48 | auto *tag_x = dynamic_cast(pos_tag->value[0]); 49 | auto *tag_y = dynamic_cast(pos_tag->value[1]); 50 | auto *tag_z = dynamic_cast(pos_tag->value[2]); 51 | if (tag_x && tag_y && tag_z) { 52 | this->pos_.x = tag_x->value; 53 | this->pos_.y = tag_y->value; 54 | this->pos_.z = tag_z->value; 55 | read_pos = true; 56 | } 57 | } 58 | } 59 | 60 | auto it2 = root->value.find("identifier"); 61 | if (it2 != root->value.end()) { 62 | auto *id_tag = dynamic_cast(it2->second); 63 | if (id_tag) { 64 | this->identifier_ = id_tag->value; 65 | read_identifier = true; 66 | } 67 | } 68 | auto it3 = root->value.find("UniqueID"); 69 | if (it3 != root->value.end()) { 70 | auto lt = dynamic_cast(it3->second); 71 | if (lt) { 72 | this->uid_ = lt->value; 73 | read_uid = true; 74 | } 75 | } 76 | return read_pos && read_identifier && read_uid; 77 | } 78 | actor::~actor() { delete this->root_; } 79 | } // namespace bl 80 | -------------------------------------------------------------------------------- /src/bedrock_key.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/30. 3 | // 4 | 5 | #include "bedrock_key.h" 6 | 7 | #include 8 | #include 9 | 10 | namespace bl { 11 | const chunk_key chunk_key::INVALID_CHUNK_KEY = 12 | chunk_key{chunk_key::Unknown, bl::chunk_pos(), 0}; 13 | 14 | chunk_key chunk_key::parse(const std::string &key) { 15 | auto sz = key.size(); 16 | if (sz == 9 || sz == 10 || sz == 13 || sz == 14) { 17 | auto x = *reinterpret_cast(key.data()); 18 | auto z = *reinterpret_cast(key.data() + 4); 19 | auto dim = 0; 20 | auto key_type_idx = 8; 21 | if (sz == 13 || sz == 14) { // nether or the end 22 | dim = *reinterpret_cast(key.data() + 8); 23 | key_type_idx = 12; 24 | } 25 | 26 | if (dim < 0 || dim > 2) { // invalid dimension key 27 | return INVALID_CHUNK_KEY; 28 | } 29 | 30 | auto type = static_cast(key[key_type_idx]); 31 | 32 | if ((type < 43 || type > 65) && type != 118) { 33 | return INVALID_CHUNK_KEY; 34 | } 35 | 36 | // sub chunk terrain 37 | int8_t y_index = 0; 38 | if (key.size() == 10 || key.size() == 14) { 39 | if (type != SubChunkTerrain) { 40 | return INVALID_CHUNK_KEY; 41 | } 42 | y_index = key.back(); 43 | } 44 | 45 | chunk_pos cp{x, z, dim}; 46 | return chunk_key{type, cp, y_index}; 47 | } else { 48 | return INVALID_CHUNK_KEY; 49 | } 50 | } 51 | 52 | actor_key actor_key::parse(const std::string &key) { 53 | actor_key res; 54 | if (key.size() != 19 || key.rfind("actorprefix", 0) != 0) return res; 55 | res.actor_uid = *reinterpret_cast(key.data() + 11); 56 | return res; 57 | } 58 | 59 | actor_digest_key actor_digest_key::parse(const std::string &key) { 60 | actor_digest_key res{}; 61 | if (key.size() != 12 && key.size() != 16) return res; 62 | if (key.rfind("digp", 0) != 0) return res; 63 | res.cp.x = *reinterpret_cast(key.data() + 4); 64 | res.cp.z = *reinterpret_cast(key.data() + 8); 65 | res.cp.dim = 0; 66 | if (key.size() == 16) { 67 | res.cp.dim = *reinterpret_cast(key.data() + 12); 68 | } 69 | return res; 70 | } 71 | 72 | std::string actor_digest_key::to_string() const { return this->cp.to_string(); } 73 | std::string actor_digest_key::to_raw() const { 74 | if (!this->cp.valid()) return ""; 75 | size_t sz = 8; 76 | if (cp.dim != 0) sz = 12; 77 | std::string res = "digp"; 78 | std::string r(sz, '\0'); 79 | memcpy(r.data(), &cp.x, 4); 80 | memcpy(r.data() + 4, &cp.z, 4); 81 | if (this->cp.dim != 0) { 82 | memcpy(r.data() + 8, &cp.dim, 4); 83 | } 84 | return res + r; 85 | } 86 | village_key village_key::parse(const std::string &key) { 87 | village_key res; 88 | if (key.size() < 46) return res; 89 | if (key.rfind("VILLAGE_", 0) != 0) return res; 90 | res.uuid = std::string(key.begin() + 8, key.begin() + 44); // uuid 91 | std::string type_str = std::string(key.data() + 45); 92 | if (type_str == "DWELLERS") { 93 | res.type = DWELLERS; 94 | } else if (type_str == "INFO") { 95 | res.type = INFO; 96 | } else if (type_str == "PLAYERS") { 97 | res.type = PLAYERS; 98 | } else if (type_str == "POI") { 99 | res.type = POI; 100 | } else { 101 | res.type = Unknown; 102 | } 103 | return res; 104 | } 105 | std::string village_key::to_raw() const { 106 | if (!this->valid()) return {}; 107 | return "VILLAGE_" + this->uuid + "_" + village_key_type_to_str(this->type); 108 | } 109 | std::string village_key::village_key_type_to_str(village_key::key_type t) { 110 | switch (t) { 111 | case INFO: 112 | return "INFO"; 113 | case DWELLERS: 114 | return "DWELLERS"; 115 | case PLAYERS: 116 | return "PLAYERS"; 117 | case POI: 118 | return "POI"; 119 | case Unknown: 120 | return "UNKNOWN"; 121 | } 122 | return "UNKNOWN"; 123 | } 124 | 125 | std::string chunk_key::chunk_key_to_str(bl::chunk_key::key_type key) { 126 | switch (key) { 127 | case Data3D: 128 | return "Data3D"; 129 | case VersionNew: 130 | return "VersionNew"; 131 | case Data2D: 132 | return "Data2D"; 133 | case Data2DLegacy: 134 | return "Data2DLegacy"; 135 | case SubChunkTerrain: 136 | return "SubChunkTerrain"; 137 | case LegacyTerrain: 138 | return "LegacyTerrain"; 139 | case BlockEntity: 140 | return "BlockEntity"; 141 | case Entity: 142 | return "Entity"; 143 | case PendingTicks: 144 | return "PendingTicks"; 145 | case BlockExtraData: 146 | return "BlockExtraData"; 147 | case BiomeState: 148 | return "BiomeState"; 149 | case FinalizedState: 150 | return "FinalizedState"; 151 | case BorderBlocks: 152 | return "BorderBlocks"; 153 | case HardCodedSpawnAreas: 154 | return "HardCodedSpawnAreas"; 155 | case Checksums: 156 | return "Checksums"; 157 | case VersionOld: 158 | return "VersionOld"; 159 | case Unknown: 160 | return "Unknown"; 161 | case GenerationSeed: 162 | return "GenerationSeed"; 163 | case BlendingBiomeHeight: 164 | return "BlendingBiomeHeight"; 165 | case MetaDataHash: 166 | return "MetaDataHash"; 167 | case BlendingData: 168 | return "BlendingData"; 169 | case ActorDigestVersion: 170 | return "ActorDigestVersion"; 171 | case RandomTicks: 172 | return "RandomTicks"; 173 | break; 174 | } 175 | return "Unknown"; 176 | } 177 | 178 | std::string chunk_pos::to_string() const { 179 | return std::to_string(this->x) + ", " + std::to_string(this->z) + ", " + 180 | std::to_string(this->dim); 181 | } 182 | 183 | bool chunk_pos::operator<(const chunk_pos &rhs) const { 184 | if (x < rhs.x) return true; 185 | if (rhs.x < x) return false; 186 | if (z < rhs.z) return true; 187 | if (rhs.z < z) return false; 188 | return dim < rhs.dim; 189 | } 190 | 191 | bool chunk_pos::operator==(const chunk_pos &p) const { 192 | return this->x == p.x && this->dim == p.dim && this->z == p.z; 193 | } 194 | 195 | block_pos chunk_pos::get_min_pos(ChunkVersion v) const { 196 | auto [y, _] = this->get_y_range(v); 197 | return {this->x * 16, y, this->z * 16}; 198 | } 199 | block_pos chunk_pos::get_max_pos(ChunkVersion v) const { 200 | auto [_, y] = this->get_y_range(v); 201 | return {this->x * 16 + 15, y, this->z * 16 + 15}; 202 | } 203 | 204 | std::tuple chunk_pos::get_y_range(ChunkVersion v) const { 205 | if (this->dim == 1) return {0, 127}; 206 | if (this->dim == 2) return {0, 255}; 207 | if (this->dim == 0) { 208 | if (v == New) { 209 | return {-64, 319}; 210 | } else { 211 | return {0, 255}; 212 | } 213 | } 214 | return {0, -1}; 215 | } 216 | std::tuple chunk_pos::get_subchunk_index_range(ChunkVersion v) const { 217 | if (this->dim == 1) return {0, 7}; 218 | if (this->dim == 2) return {0, 15}; 219 | if (this->dim == 0) { 220 | if (v == New) { 221 | return {-4, 19}; 222 | } else { 223 | return {0, 15}; 224 | } 225 | } 226 | return {0, -1}; 227 | } 228 | 229 | bool chunk_pos::is_slime() const { 230 | auto seed = (x * 0x1f1f1f1fu) ^ (uint32_t)z; 231 | std::mt19937 mt(seed); 232 | return mt() % 10 == 0; 233 | } 234 | 235 | std::string chunk_key::to_string() const { 236 | auto type_info = 237 | chunk_key_to_str(type) + "(" + std::to_string(static_cast(type)) + ")"; 238 | auto index_info = std::string(); 239 | if (type == SubChunkTerrain) { 240 | index_info = "y = " + std::to_string(y_index); 241 | } 242 | 243 | return "[" + this->cp.to_string() + "] " + type_info + " " + index_info; 244 | } 245 | 246 | std::string chunk_key::to_raw() const { 247 | size_t sz = 9; 248 | if (this->type == SubChunkTerrain) sz += 1; 249 | if (this->cp.dim != 0) sz += 4; 250 | std::string r(sz, '\0'); 251 | memcpy(r.data(), &cp.x, 4); 252 | memcpy(r.data() + 4, &cp.z, 4); 253 | if (this->cp.dim != 0) { 254 | memcpy(r.data() + 8, &cp.dim, 4); 255 | r[12] = this->type; 256 | } else { 257 | r[8] = this->type; 258 | } 259 | 260 | if (this->type == SubChunkTerrain) { 261 | r[r.size() - 1] = y_index; 262 | } 263 | return r; 264 | } 265 | 266 | std::string actor_key::to_string() const { return std::to_string(this->actor_uid); } 267 | 268 | std::string village_key::to_string() const { 269 | return this->uuid + "," + village_key_type_to_str(this->type); 270 | } 271 | 272 | chunk_pos block_pos::to_chunk_pos() const { 273 | auto cx = x < 0 ? x - 15 : x; 274 | auto cz = z < 0 ? z - 15 : z; 275 | return {cx / 16, cz / 16, -1}; 276 | } 277 | 278 | chunk_pos block_pos::in_chunk_offset() const { 279 | auto ox = x % 16; 280 | auto oz = z % 16; 281 | if (ox < 0) ox += 16; 282 | if (oz < 0) oz += 16; 283 | return {ox, oz, -1}; 284 | } 285 | } // namespace bl 286 | -------------------------------------------------------------------------------- /src/bedrock_level.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/30. 3 | // 4 | 5 | #include "bedrock_level.h" 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | #include "bedrock_key.h" 12 | #include "bit_tools.h" 13 | #include "chunk.h" 14 | #include "leveldb/cache.h" 15 | #include "leveldb/comparator.h" 16 | #include "leveldb/db.h" 17 | #include "leveldb/env.h" 18 | #include "leveldb/filter_policy.h" 19 | #include "leveldb/write_batch.h" 20 | #include "leveldb/zlib_compressor.h" 21 | class SlowEnv : public leveldb::Env {}; 22 | 23 | namespace bl { 24 | namespace { 25 | bool load_raw(leveldb::DB *&db, const std::string &raw_key, std::string &raw) { 26 | auto r = db->Get(leveldb::ReadOptions(), raw_key, &raw); 27 | return r.ok(); 28 | } 29 | } // namespace 30 | 31 | const std::string bedrock_level::LEVEL_DATA = "level.dat"; 32 | const std::string bedrock_level::LEVEL_DB = "db"; 33 | 34 | bool bedrock_level::open(const std::string &root) { 35 | namespace fs = std::filesystem; 36 | this->root_name_ = root; 37 | fs::path path(this->root_name_); 38 | path /= LEVEL_DATA; 39 | this->is_open_ = this->dat_.load_from_file(path.string()) && this->read_db(); 40 | return this->is_open_; 41 | } 42 | 43 | bool bedrock_level::read_db() { // NOLINT 44 | namespace fs = std::filesystem; 45 | fs::path path(this->root_name_); 46 | path /= bl::bedrock_level::LEVEL_DB; 47 | leveldb::Status status = leveldb::DB::Open( 48 | this->options_, bl::utils::UTF8ToGBEx(path.string().c_str()), &this->db_); 49 | if (!status.ok()) { 50 | BL_ERROR("Can not open level database: [%s].", status.ToString().c_str()); 51 | } 52 | return status.ok(); 53 | } 54 | 55 | bedrock_level::~bedrock_level() { 56 | this->close(); 57 | delete this->options_.compressors[0]; 58 | delete this->options_.compressors[1]; 59 | delete this->options_.block_cache; 60 | delete this->options_.filter_policy; 61 | }; 62 | 63 | chunk *bedrock_level::get_chunk(const chunk_pos &cp, bool fast_load) { 64 | if (!this->is_open()) { 65 | return nullptr; 66 | } 67 | 68 | if (!cp.valid()) { 69 | BL_ERROR("Invalid Chunk position %s.", cp.to_string().c_str()); 70 | return nullptr; 71 | } 72 | if (this->enable_cache_) { 73 | auto it = this->chunk_data_cache_.find(cp); 74 | if (it != this->chunk_data_cache_.end()) { 75 | return it->second; 76 | } else { 77 | auto *ch = this->read_chunk_from_db(cp, fast_load); 78 | if (ch) { 79 | this->chunk_data_cache_[cp] = ch; 80 | } 81 | return ch; 82 | } 83 | } else { 84 | return this->read_chunk_from_db(cp, fast_load); 85 | } 86 | } 87 | 88 | void bedrock_level::close() { 89 | for (auto &kv : this->chunk_data_cache_) { 90 | delete kv.second; 91 | } 92 | this->clear_cache(); 93 | this->village_data_.clear_data(); 94 | this->player_data_.clear_data(); 95 | delete this->db_; 96 | this->db_ = nullptr; 97 | this->is_open_ = false; 98 | } 99 | 100 | void bedrock_level::set_cache(bool enable) { 101 | this->enable_cache_ = enable; 102 | if (!this->enable_cache_) { 103 | this->clear_cache(); 104 | } 105 | } 106 | 107 | chunk *bedrock_level::read_chunk_from_db(const chunk_pos &cp, bool fast_load) { 108 | auto *chunk = new bl::chunk(cp); 109 | if (!chunk->load_data(*this, fast_load)) { 110 | delete chunk; 111 | return nullptr; 112 | } else { 113 | return chunk; 114 | } 115 | } 116 | 117 | void bedrock_level::clear_cache() { 118 | for (auto &kv : this->chunk_data_cache_) delete kv.second; 119 | this->chunk_data_cache_.clear(); 120 | } 121 | 122 | actor *bedrock_level::load_actor(const std::string &raw_uid) { 123 | const auto key = "actorprefix" + raw_uid; 124 | std::string raw_data; 125 | if (!load_raw(this->db_, key, raw_data)) return nullptr; 126 | auto ac = new actor; 127 | if (!ac->load(raw_data.data(), raw_data.size())) { 128 | delete ac; 129 | return nullptr; 130 | } else { 131 | return ac; 132 | } 133 | } 134 | bool bedrock_level::remove_chunk(const chunk_pos &cp) { 135 | // remove new chunk actors 136 | leveldb::WriteBatch batch; 137 | bl::actor_digest_key key{cp}; 138 | std::string raw; 139 | // 没啥要解析的,不用管错误 140 | if (load_raw(this->db_, key.to_raw(), raw)) { 141 | bl::actor_digest_list ads; 142 | ads.load(raw); 143 | for (auto &uid : ads.actor_digests_) { 144 | batch.Delete("actorprefix" + uid); 145 | } 146 | } 147 | 148 | // terrain 149 | for (int8_t i = -4; i <= 20; i++) { 150 | bl::chunk_key terrain_key{chunk_key::SubChunkTerrain, cp, i}; 151 | batch.Delete(terrain_key.to_raw()); 152 | } 153 | // others 154 | for (int i = 43; i <= 65; i++) { 155 | auto t = static_cast(i); 156 | if (t != chunk_key::SubChunkTerrain) { 157 | auto dk = bl::chunk_key{t, cp}; 158 | batch.Delete(dk.to_raw()); 159 | } 160 | } 161 | 162 | // version 163 | bl::chunk_key version_key{chunk_key::VersionOld, cp}; 164 | batch.Delete(version_key.to_raw()); 165 | auto s = this->db_->Write(leveldb::WriteOptions(), &batch); 166 | return s.ok(); 167 | } 168 | void bedrock_level::foreach_global_keys( 169 | const std::function &f) { 170 | auto *it = this->db_->NewIterator(leveldb::ReadOptions()); 171 | for (it->SeekToFirst(); it->Valid(); it->Next()) { 172 | auto ck = bl::chunk_key::parse(it->key().ToString()); 173 | if (ck.valid()) continue; 174 | auto actor_key = bl::actor_key::parse(it->key().ToString()); 175 | if (actor_key.valid()) continue; 176 | f(it->key().ToString(), it->value().ToString()); 177 | } 178 | delete it; 179 | } 180 | 181 | void bedrock_level::load_global_data() { 182 | this->foreach_global_keys([this](const std::string &key, const std::string &value) { 183 | if (key.find("player") != std::string::npos) { 184 | this->player_data_.append_nbt(key, value); 185 | } else if (key.find("map") == 0) { 186 | this->map_item_data_.append_nbt(key, value); 187 | } else { 188 | bl::village_key vk = village_key::parse(key); 189 | if (vk.valid()) { 190 | this->village_data_.append_village(vk, value); 191 | } 192 | } 193 | }); 194 | } 195 | 196 | bedrock_level::bedrock_level() { 197 | options_.filter_policy = leveldb::NewBloomFilterPolicy(10); 198 | options_.block_cache = leveldb::NewLRUCache(20 * 1024 * 1024); 199 | options_.write_buffer_size = 4 * 1024 * 1024; 200 | options_.compressors[0] = new leveldb::ZlibCompressorRaw(-1); 201 | options_.compressors[1] = new leveldb::ZlibCompressor(); 202 | } 203 | } // namespace bl 204 | -------------------------------------------------------------------------------- /src/chunk.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/30. 3 | // 4 | 5 | #include "chunk.h" 6 | 7 | #include 8 | 9 | #include "bedrock_key.h" 10 | #include "bedrock_level.h" 11 | #include "utils.h" 12 | 13 | namespace bl { 14 | 15 | /** 16 | * Overworld [-64 ~-1]+[0~319] 17 | * [-64,-49][-48,-33][-32,-17][-16,-1] 18 | * NEther [0~127] 19 | * The End [0~255] 20 | */ 21 | 22 | namespace { 23 | bool load_raw(leveldb::DB *&db, const std::string &raw_key, std::string &raw) { 24 | auto r = db->Get(leveldb::ReadOptions(), raw_key, &raw); 25 | return r.ok(); 26 | } 27 | 28 | bool contains_key(leveldb::DB *&db, const std::string &raw_key) { 29 | std::string raw; 30 | auto r = db->Get(leveldb::ReadOptions(), raw_key, &raw); 31 | return r.ok(); 32 | } 33 | } // namespace 34 | 35 | bool chunk::valid_in_chunk_pos(int cx, int y, int cz, int dim) { 36 | if (cx < 0 || cx > 15 || cz < 0 || cz > 15 || dim < 0 || dim > 2) return false; 37 | int min_h[]{-64, 0, 0}; 38 | int max_h[]{319, 127, 255}; 39 | return y >= min_h[dim] && y <= max_h[dim]; 40 | } 41 | 42 | void chunk::map_y_to_subchunk(int y, int &index, int &offset) { 43 | index = y < 0 ? (y - 15) / 16 : y / 16; 44 | offset = y % 16; 45 | if (offset < 0) offset += 16; 46 | } 47 | 48 | /** 49 | * 50 | * @param cx 区块内x 51 | * @param y 区块内y(同时也是主世界y) 52 | * @param cz 区块内z 53 | * @return 54 | */ 55 | block_info chunk::get_block(int cx, int y, int cz) { 56 | int index; 57 | int offset; 58 | map_y_to_subchunk(y, index, offset); 59 | auto it = this->sub_chunks_.find(index); 60 | if (it == this->sub_chunks_.end()) { 61 | return {}; 62 | } 63 | return it->second->get_block(cx, offset, cz); 64 | } 65 | 66 | block_info chunk::get_block_fast(int cx, int y, int cz) { 67 | int index; 68 | int offset; 69 | map_y_to_subchunk(y, index, offset); 70 | auto it = this->sub_chunks_.find(index); 71 | if (it == this->sub_chunks_.end()) { 72 | return {}; 73 | } 74 | return it->second->get_block_fast(cx, offset, cz); 75 | } 76 | 77 | // block_info chunk::get_top_block(int cx, int cz) { 78 | // auto height = this->get_height(cx, cz); 79 | // return this->get_block(cx, height - 1, cz); 80 | // } 81 | // 82 | // palette::compound_tag *chunk::get_top_block_raw(int cx, int cz) { 83 | // auto height = this->get_height(cx, cz); 84 | // return this->get_block_raw(cx, height - 1, cz); 85 | // } 86 | // 87 | 88 | palette::compound_tag *chunk::get_block_raw(int cx, int y, int cz) { 89 | int index; 90 | int offset; 91 | map_y_to_subchunk(y, index, offset); 92 | auto it = this->sub_chunks_.find(index); 93 | if (it == this->sub_chunks_.end()) { 94 | return nullptr; 95 | } 96 | return it->second->get_block_raw(cx, offset, cz); 97 | } 98 | 99 | biome chunk::get_biome(int cx, int y, int cz) { return this->d3d_.get_biome(cx, y, cz); } 100 | 101 | bool chunk::load_subchunks(bedrock_level &level) { 102 | // 默认先用new,因为version字段太怪,看不懂版本 103 | auto [min_index, max_index] = this->pos_.get_subchunk_index_range(ChunkVersion::New); 104 | for (auto sub_index = min_index; sub_index <= max_index; sub_index++) { 105 | // load all sub chunks 106 | auto terrain_key = bl::chunk_key{chunk_key::SubChunkTerrain, this->pos_, sub_index}; 107 | std::string raw; 108 | if (load_raw(level.db(), terrain_key.to_raw(), raw)) { 109 | auto *sb = new bl::sub_chunk(); 110 | sb->set_y_index( 111 | sub_index); // set default index (no `sub-chunk index` in version 8 chunks) 112 | // //see 113 | // https://gist.github.com/Tomcc/a96af509e275b1af483b25c543cfbf37?permalink_comment_id=3901255#gistcomment-3901255 114 | if (!sb->load(raw.data(), raw.size())) { 115 | BL_ERROR("Can not load sub chunk %d %d %d %d", pos_.x, pos_.z, pos_.dim, 116 | sub_index); 117 | delete sb; // delete error sub chunks 118 | continue; 119 | } 120 | this->sub_chunks_[sub_index] = sb; 121 | } 122 | } 123 | 124 | if (!this->sub_chunks_.empty()) { 125 | // 根据subchunk格式猜测一个 version,后面可能需要修改 126 | this->version = this->sub_chunks_.begin()->second->version() == 9 ? New : Old; 127 | } 128 | return true; 129 | } 130 | bool chunk::load_biomes(bedrock_level &level) { 131 | this->d3d_.set_chunk_pos(this->pos_); 132 | this->d3d_.set_version(this->version); 133 | if (this->version == New) { 134 | auto d3d_key = bl::chunk_key{chunk_key::Data3D, this->pos_}; 135 | std::string d3d_raw; 136 | if (load_raw(level.db(), d3d_key.to_raw(), d3d_raw) && 137 | this->d3d_.load_from_d3d(d3d_raw.data(), d3d_raw.size())) { 138 | return true; 139 | } else { 140 | return false; 141 | } 142 | 143 | } else { 144 | auto d2d_key = bl::chunk_key{chunk_key::Data2D, this->pos_}; 145 | std::string d2d_raw; 146 | if (load_raw(level.db(), d2d_key.to_raw(), d2d_raw) && 147 | this->d3d_.load_from_d2d(d2d_raw.data(), d2d_raw.size())) { 148 | return true; 149 | } else { 150 | return false; 151 | } 152 | } 153 | } 154 | bool chunk::load_pending_ticks(bedrock_level &level) { 155 | auto pt_key = bl::chunk_key{chunk_key::PendingTicks, this->pos_}; 156 | std::string block_entity_raw; 157 | if (load_raw(level.db(), pt_key.to_raw(), block_entity_raw) && !block_entity_raw.empty()) { 158 | this->pending_ticks_ = 159 | palette::read_palette_to_end(block_entity_raw.data(), block_entity_raw.size()); 160 | // 161 | } 162 | return true; 163 | } 164 | void chunk::load_entities(bedrock_level &level) { 165 | // try read old version actors 166 | auto entity_key = bl::chunk_key{chunk_key::Entity, this->pos_}; 167 | std::string block_entity_raw; 168 | if (load_raw(level.db(), entity_key.to_raw(), block_entity_raw) && 169 | !block_entity_raw.empty()) { 170 | auto actors = 171 | palette::read_palette_to_end(block_entity_raw.data(), block_entity_raw.size()); 172 | for (auto &a : actors) { 173 | auto *ac = new actor; 174 | if (ac->load_from_nbt(a)) { 175 | this->entities_.push_back(ac); 176 | } else { 177 | delete ac; 178 | } 179 | delete a; 180 | } 181 | } 182 | // new version actor key: 183 | // 1. read key file form digest 184 | // 2. read actor from actor keys [actorprefix+uid] 185 | bl::actor_digest_key key{this->pos_}; 186 | std::string raw; 187 | // 没啥要解析的,不用管错误 188 | if (!load_raw(level.db(), key.to_raw(), raw)) { 189 | return; 190 | } 191 | 192 | bl::actor_digest_list list; 193 | list.load(raw); 194 | for (auto &uid : list.actor_digests_) { 195 | auto actor_key = "actorprefix" + uid; 196 | std::string raw_actor; 197 | if (load_raw(level.db(), actor_key, raw_actor)) { 198 | auto ac = new actor; 199 | if (!ac->load(raw_actor.data(), raw_actor.size())) { 200 | delete ac; 201 | } else { 202 | this->entities_.push_back(ac); 203 | } 204 | } 205 | } 206 | } 207 | void chunk::load_hsa(bedrock_level &level) { 208 | auto hsa_key = bl::chunk_key{chunk_key::HardCodedSpawnAreas, this->pos_}; 209 | std::string raw; 210 | if (!load_raw(level.db(), hsa_key.to_raw(), raw)) return; 211 | if (raw.size() < 4) return; 212 | int count = *reinterpret_cast(raw.data()); 213 | if (raw.size() != count * 25ul + 4ul) return; 214 | 215 | auto *d = raw.data(); 216 | for (int i = 0; i < count; i++) { 217 | hardcoded_spawn_area area; 218 | int offset = i * 25 + 4; 219 | area.min_pos.x = *reinterpret_cast(d + offset); 220 | area.min_pos.y = *reinterpret_cast(d + offset + 4); 221 | area.min_pos.z = *reinterpret_cast(d + offset + 8); 222 | area.max_pos.x = *reinterpret_cast(d + offset + 12); 223 | area.max_pos.y = *reinterpret_cast(d + offset + 16); 224 | area.max_pos.z = *reinterpret_cast(d + offset + 20); 225 | auto type = d[offset + 24]; 226 | if (type == SwampHut || type == OceanMonument || type == NetherFortress || 227 | type == PillagerOutpost) { 228 | area.type = static_cast(type); 229 | } 230 | this->HSAs_.push_back(area); 231 | } 232 | } 233 | bool chunk::load_block_entities(bedrock_level &level) { 234 | auto be_key = bl::chunk_key{chunk_key::BlockEntity, this->pos_}; 235 | std::string block_entity_raw; 236 | if (load_raw(level.db(), be_key.to_raw(), block_entity_raw) && !block_entity_raw.empty()) { 237 | this->block_entities_ = 238 | palette::read_palette_to_end(block_entity_raw.data(), block_entity_raw.size()); 239 | } else { 240 | } 241 | 242 | return true; 243 | } 244 | 245 | bool chunk::load_data(bedrock_level &level, bool fast_load) { 246 | if (this->loaded()) return true; 247 | if ((!contains_key(level.db(), 248 | bl::chunk_key{chunk_key::VersionOld, this->pos_}.to_raw())) && 249 | (!contains_key(level.db(), 250 | bl::chunk_key{chunk_key::VersionNew, this->pos_}.to_raw()))) { 251 | return false; 252 | } 253 | 254 | this->load_subchunks(level); 255 | if (this->sub_chunks_.empty()) return false; 256 | this->load_biomes(level); 257 | this->load_entities(level); 258 | 259 | if (!fast_load) { 260 | this->load_block_entities(level); 261 | this->load_pending_ticks(level); // 有bug 262 | } 263 | this->load_hsa(level); 264 | this->fast_load_mode_ = fast_load; 265 | this->loaded_ = true; 266 | return this->loaded_; 267 | } 268 | 269 | // 从0开始的数据 270 | int chunk::get_height(int cx, int cz) { return this->d3d_.height(cx, cz); } 271 | biome chunk::get_top_biome(int cx, int cz) { return this->d3d_.get_top_biome(cx, cz); } 272 | 273 | std::vector> chunk::get_biome_y(int y) { return this->d3d_.get_biome_y(y); } 274 | bl::chunk_pos chunk::get_pos() const { return this->pos_; } 275 | chunk::~chunk() { 276 | for (auto &sub : this->sub_chunks_) { 277 | delete sub.second; 278 | } 279 | for (auto &p : this->pending_ticks_) delete p; 280 | for (auto &p : this->block_entities_) delete p; 281 | for (auto &e : this->entities_) delete e; 282 | } 283 | 284 | bl::color chunk::get_block_color(int cx, int y, int cz) { 285 | auto *raw = this->get_block_raw(cx, y, cz); 286 | if (!raw) return {}; 287 | return get_block_color_from_SNBT(raw->to_raw()); 288 | } 289 | 290 | // bl::color chunk::get_top_block_color(int cx, int cz) { 291 | // auto height = this->get_height(cx, cz); 292 | // return this->get_block_color(cx, height - 1, cz); 293 | // } 294 | 295 | } // namespace bl 296 | -------------------------------------------------------------------------------- /src/color.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/6/18. 3 | // 4 | 5 | #include "color.h" 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include "json/json.hpp" 13 | #include "palette.h" 14 | #include "stb/stb_image_write.h" 15 | 16 | namespace bl { 17 | namespace { 18 | // biome id -> water 19 | // 和群系有关的颜色白名单 20 | std::unordered_set water_block_names; 21 | std::unordered_set leaves_block_names; 22 | std::unordered_set grass_block_names; 23 | 24 | std::unordered_map biome_water_map; 25 | std::unordered_map biome_leave_map; 26 | std::unordered_map biome_grass_map; 27 | 28 | bl::color default_water_color{63, 118, 228}; 29 | bl::color default_leave_color{113, 167, 77}; 30 | bl::color default_grass_color{142, 185, 113}; 31 | 32 | // 33 | // biome id -> name 34 | std::unordered_map biome_id_map; 35 | // biome id -> biome color 36 | std::unordered_map biome_color_map; 37 | 38 | // key 是palette的raw形态 39 | std::unordered_map block_color_map; 40 | 41 | bl::color blend_with_biome(const std::unordered_map& map, 42 | bl::color gray, bl::color default_color, bl::biome b) { 43 | auto it = map.find(b); 44 | auto x = it == map.end() ? default_color : it->second; 45 | gray.r = static_cast(gray.r / 255.0 * x.r); 46 | gray.g = static_cast(gray.g / 255.0 * x.g); 47 | gray.b = static_cast(gray.b / 255.0 * x.b); 48 | return gray; 49 | } 50 | 51 | } // namespace 52 | 53 | color get_biome_color(bl::biome b) { 54 | auto it = biome_color_map.find(b); 55 | return it == biome_color_map.end() ? bl::color() : it->second; 56 | } 57 | color get_block_color_from_SNBT(const std::string& name) { 58 | auto it = block_color_map.find(name); 59 | if (it == block_color_map.end()) { 60 | return {}; 61 | } 62 | return it->second; 63 | } 64 | 65 | std::string get_biome_name(biome b) { 66 | auto it = biome_id_map.find(b); 67 | return it == biome_id_map.end() ? "unknown" : it->second; 68 | } 69 | 70 | bool init_biome_color_palette_from_file(const std::string& filename) { 71 | try { 72 | std::ifstream f(filename); 73 | if (!f.is_open()) { 74 | BL_ERROR("Can not open biome color file %s", filename.c_str()); 75 | return false; 76 | } 77 | nlohmann::json j; 78 | f >> j; 79 | for (auto& [key, value] : j.items()) { 80 | int id = value["id"].get(); 81 | biome_id_map[static_cast(id)] = key; 82 | 83 | if (value.contains("rgb")) { 84 | auto rgb = value["rgb"]; 85 | assert(rgb.size() == 3); 86 | color c; 87 | c.r = static_cast(rgb[0].get()); 88 | c.g = static_cast(rgb[1].get()); 89 | c.b = static_cast(rgb[2].get()); 90 | biome_color_map[static_cast(id)] = c; 91 | } 92 | 93 | // water 94 | if (value.contains("water")) { 95 | auto water = value["water"]; 96 | assert(water.size() == 3); 97 | color c; 98 | c.r = static_cast(water[0].get()); 99 | c.g = static_cast(water[1].get()); 100 | c.b = static_cast(water[2].get()); 101 | biome_water_map[static_cast(id)] = c; 102 | if (key == "default") default_water_color = c; 103 | } 104 | 105 | if (value.contains("grass")) { 106 | auto grass = value["grass"]; 107 | assert(grass.size() == 3); 108 | color c; 109 | c.r = static_cast(grass[0].get()); 110 | c.g = static_cast(grass[1].get()); 111 | c.b = static_cast(grass[2].get()); 112 | biome_grass_map[static_cast(id)] = c; 113 | if (key == "default") default_grass_color = c; 114 | } 115 | 116 | if (value.contains("leaves")) { 117 | auto leaves = value["leaves"]; 118 | assert(leaves.size() == 3); 119 | color c; 120 | c.r = static_cast(leaves[0].get()); 121 | c.g = static_cast(leaves[1].get()); 122 | c.b = static_cast(leaves[2].get()); 123 | biome_leave_map[static_cast(id)] = c; 124 | if (key == "default") default_leave_color = c; 125 | } 126 | } 127 | 128 | } catch (std::exception&) { 129 | return false; 130 | } 131 | 132 | BL_LOGGER("Water color Map: %zu", biome_water_map.size()); 133 | BL_LOGGER("Leaves color Map: %zu", biome_leave_map.size()); 134 | BL_LOGGER("Grass color Map: %zu", biome_grass_map.size()); 135 | return true; 136 | } 137 | 138 | bool init_block_color_palette_from_file(const std::string& filename) { 139 | try { 140 | std::ifstream f(filename); 141 | if (!f.is_open()) { 142 | BL_ERROR("Can not open file %s", filename.c_str()); 143 | return false; 144 | } 145 | nlohmann::json j; 146 | f >> j; 147 | BL_LOGGER("Load json success: %s", filename.c_str()); 148 | for (auto& item : j) { 149 | using namespace bl::palette; 150 | auto extra_data = item["extra_data"]; 151 | auto block_name = item["name"].get(); 152 | 153 | if (extra_data.contains("use_grass_color") && 154 | extra_data["use_grass_color"].get()) { 155 | grass_block_names.insert(block_name); 156 | } 157 | if (extra_data.contains("use_leaves_color") && 158 | extra_data["use_leaves_color"].get()) { 159 | leaves_block_names.insert(block_name); 160 | } 161 | if (extra_data.contains("use_water_color") && 162 | extra_data["use_water_color"].get()) { 163 | water_block_names.insert(block_name); 164 | } 165 | 166 | if (extra_data.contains("color")) { 167 | auto rgb = extra_data["color"]; 168 | color c; 169 | c.r = static_cast(rgb[0].get() * 255.0); 170 | c.g = static_cast(rgb[1].get() * 255.0); 171 | c.b = static_cast(rgb[2].get() * 255.0); 172 | c.a = static_cast(rgb[3].get() * 255.0); 173 | 174 | auto* root = new compound_tag(""); 175 | auto* name_key = new string_tag("name"); 176 | name_key->value = block_name; 177 | auto* stat_tag = new compound_tag("states"); 178 | if (item.contains("states")) { 179 | for (auto& [k, v] : item["states"].items()) { 180 | if (v.type() == nlohmann::json::value_t::string) { 181 | auto* t = new string_tag(k); 182 | t->value = v.get(); 183 | stat_tag->put(t); 184 | } else if (v.type() == nlohmann::json::value_t::boolean) { 185 | auto* t = new byte_tag(k); 186 | t->value = v.get(); 187 | stat_tag->put(t); 188 | } else if (v.type() == nlohmann::json::value_t::number_float) { 189 | auto* t = new float_tag(k); 190 | t->value = v.get(); 191 | stat_tag->put(t); 192 | } else if (v.type() == nlohmann::json::value_t::number_integer) { 193 | auto* t = new int_tag(k); 194 | t->value = v.get(); 195 | stat_tag->put(t); 196 | 197 | } else if (v.type() == nlohmann::json::value_t::number_unsigned) { 198 | auto* t = new int_tag(k); 199 | t->value = v.get(); 200 | stat_tag->put(t); 201 | } 202 | } 203 | } 204 | root->put(name_key); 205 | root->put(stat_tag); 206 | 207 | block_color_map[root->to_raw()] = c; 208 | delete root; 209 | } 210 | } 211 | } catch (std::exception& e) { 212 | std::cout << "Err: " << e.what() << std::endl; 213 | return false; 214 | } 215 | BL_LOGGER("Water blocks:"); 216 | for (auto& b : water_block_names) { 217 | BL_LOGGER(" - %s", b.c_str()); 218 | } 219 | BL_LOGGER("Leaves blocks:"); 220 | for (auto& b : leaves_block_names) { 221 | BL_LOGGER(" - %s", b.c_str()); 222 | } 223 | BL_LOGGER("Grass blocks:"); 224 | for (auto& b : grass_block_names) { 225 | BL_LOGGER(" - %s", b.c_str()); 226 | } 227 | 228 | return true; 229 | } 230 | 231 | void export_image(const std::vector>& b, int ppi, const std::string& name) { 232 | const int c = 3; 233 | const int h = (int)b.size() * ppi; 234 | const int w = (int)b[0].size() * ppi; 235 | 236 | std::vector data(c * w * h, 0); 237 | 238 | for (int i = 0; i < h; i++) { 239 | for (int j = 0; j < w; j++) { 240 | auto color = b[i / ppi][j / ppi]; 241 | data[3 * (j + i * w)] = color.r; 242 | data[3 * (j + i * w) + 1] = color.g; 243 | data[3 * (j + i * w) + 2] = color.b; 244 | } 245 | } 246 | 247 | stbi_write_png(name.c_str(), w, h, c, data.data(), 0); 248 | } 249 | std::unordered_map& get_block_color_table() { return block_color_map; } 250 | 251 | bl::color blend_color_with_biome(const std::string& name, bl::color color, bl::biome b) { 252 | if (water_block_names.count(name)) 253 | return blend_with_biome(biome_water_map, color, default_water_color, b); 254 | if (grass_block_names.count(name)) 255 | return blend_with_biome(biome_grass_map, color, default_grass_color, b); 256 | if (leaves_block_names.count(name)) 257 | return blend_with_biome(biome_leave_map, color, default_leave_color, b); 258 | return color; 259 | } 260 | 261 | } // namespace bl 262 | -------------------------------------------------------------------------------- /src/data_3d.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/30. 3 | // 4 | 5 | #include "data_3d.h" 6 | 7 | #include 8 | #include 9 | 10 | #include "bedrock_key.h" 11 | #include "chunk.h" 12 | #include "utils.h" 13 | 14 | namespace bl { 15 | namespace { 16 | std::vector load_subchunk_biome(const byte_t *data, int &read, size_t len) { 17 | read = 1; 18 | 19 | uint8_t head = data[0]; 20 | if (head == 0xff) return std::vector(4096, biome::none); 21 | 22 | auto bits = static_cast(head >> 1); 23 | int index[4096]{0}; 24 | constexpr auto BLOCK_NUM = 4096; 25 | int palette_len = 1; 26 | if (bits != 0) { 27 | int bpw = 32 / bits; 28 | auto word_count = BLOCK_NUM / bpw; 29 | if (BLOCK_NUM % bpw != 0) word_count++; 30 | int position = 0; 31 | 32 | for (int wordi = 0; wordi < word_count; wordi++) { 33 | auto word = *reinterpret_cast(data + read + wordi * 4); 34 | for (int block = 0; block < bpw; block++) { 35 | int state = (word >> ((position % bpw) * bits)) & ((1 << bits) - 1); 36 | index[position] = state; 37 | position++; 38 | } 39 | } 40 | 41 | read += word_count << 2; 42 | palette_len = *reinterpret_cast(data + read); 43 | read += 4; 44 | } 45 | 46 | std::vector res(4096, bl::biome::none); 47 | std::vector biomes_palettes; 48 | 49 | for (int i = 0; i < palette_len; i++) { 50 | auto biomeId = *reinterpret_cast(data + read); 51 | read += 4; 52 | biomes_palettes.push_back(static_cast(biomeId)); 53 | } 54 | 55 | for (int i = 0; i < 4096; i++) { 56 | if (index[i] >= 0 && index[i] < static_cast(biomes_palettes.size())) { 57 | res[i] = biomes_palettes[index[i]]; 58 | } 59 | } 60 | 61 | return res; 62 | } 63 | } // namespace 64 | bool biome3d::load_from_d3d(const byte_t *data, size_t len) { 65 | int index = 0; 66 | if (len < 512) { 67 | BL_ERROR("Invalid Data3d format"); 68 | return false; 69 | } 70 | memcpy(this->height_map_.data(), data, 512); 71 | index += 512; 72 | while (index < static_cast(len)) { 73 | int read = 0; 74 | auto sub_chunk_biome = load_subchunk_biome(data + index, read, len); 75 | for (int y = 0; y < 16; y++) { 76 | auto layer = 77 | std::vector>(16, std::vector(16, bl::none)); 78 | for (int x = 0; x < 16; x++) { 79 | for (int z = 0; z < 16; z++) { 80 | layer[x][z] = sub_chunk_biome[x * 256 + z * 16 + y]; 81 | } 82 | } 83 | 84 | this->biomes_.push_back(layer); 85 | } 86 | index += read; 87 | } 88 | return true; 89 | } 90 | 91 | biome biome3d::get_biome(int cx, int y, int cz) { 92 | if (this->version_ == Old) { 93 | return this->biomes_.empty() ? bl::biome::none : this->biomes_[0][cx][cz]; 94 | } 95 | auto [my, _] = pos_.get_y_range(this->version_); 96 | y -= my; 97 | 98 | // printf("y = %d\n", y); 99 | if (y >= static_cast(this->biomes_.size()) || y < 0) { 100 | return biome::none; 101 | } 102 | return this->biomes_[y][cx][cz]; 103 | } 104 | 105 | std::vector> biome3d::get_biome_y(int y) { 106 | if (this->version_ == Old) { 107 | return this->biomes_.empty() ? std::vector>( 108 | 16, std::vector(16, bl::none)) 109 | : this->biomes_[0]; 110 | } 111 | auto [my, _] = pos_.get_y_range(this->version_); 112 | y -= my; 113 | if (y >= static_cast(this->biomes_.size())) { 114 | return {}; 115 | } 116 | return this->biomes_[y]; 117 | } 118 | 119 | biome biome3d::get_top_biome(int cx, int cz) { 120 | if (this->version_ == Old) return this->get_biome(cx, 0, cz); 121 | int y = (int)this->biomes_.size() - 1; 122 | while (y >= 0 && this->biomes_[y][cx][cz] == none) { 123 | y--; 124 | } 125 | return y < 0 ? biome::none : this->biomes_[y][cx][cz]; 126 | } 127 | bool biome3d::load_from_d2d(const byte_t *data, size_t len) { 128 | if (len != 768) { // height map: 512bytes biome: 256 * 4 = 1024 bytes 129 | BL_ERROR("Invalid Data2d format (%zu)", len); 130 | return false; 131 | } 132 | memcpy(this->height_map_.data(), data, 512); 133 | auto layer = std::vector>(16, std::vector(16, bl::biome::none)); 134 | for (int x = 0; x < 16; x++) { 135 | for (int z = 0; z < 16; z++) { 136 | layer[x][z] = static_cast(data[512 + x + 16 * z]); 137 | } 138 | } 139 | this->biomes_.push_back(layer); 140 | return true; 141 | } 142 | 143 | } // namespace bl 144 | -------------------------------------------------------------------------------- /src/global.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/31. 3 | // 4 | 5 | #include "global.h" 6 | 7 | #include 8 | namespace bl { 9 | void village_data::reset( 10 | const std::unordered_map>& data) { 11 | this->clear_data(); 12 | this->data_ = data; 13 | } 14 | void village_data::append_village(const village_key& key, const std::string& value) { 15 | int read = 0; 16 | auto* nbt = bl::palette::read_one_palette(value.data(), read); 17 | if (static_cast(read) == value.size() && nbt) { 18 | this->data_[key.uuid][static_cast(key.type)] = nbt; 19 | } 20 | } 21 | village_data::~village_data() { this->clear_data(); } 22 | void village_data::clear_data() { 23 | for (auto& vill : this->data_) { 24 | for (auto v : vill.second) { 25 | delete v; 26 | } 27 | } 28 | this->data_.clear(); 29 | } 30 | 31 | void general_kv_nbts::reset( 32 | const std::unordered_map& data) { 33 | this->clear_data(); 34 | this->data_ = data; 35 | } 36 | void general_kv_nbts::append_nbt(const std::string& key, const std::string& value) { 37 | int read = 0; 38 | auto* nbt = bl::palette::read_one_palette(value.data(), read); 39 | if (static_cast(read) == value.size() && nbt) { 40 | this->data_[key] = nbt; 41 | } 42 | } 43 | general_kv_nbts::~general_kv_nbts() { 44 | for (auto& kv : this->data_) { 45 | delete kv.second; 46 | } 47 | } 48 | void general_kv_nbts::clear_data() { 49 | for (auto& kv : this->data_) { 50 | delete kv.second; 51 | } 52 | this->data_.clear(); 53 | } 54 | 55 | } // namespace bl 56 | -------------------------------------------------------------------------------- /src/include/actor.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/31. 3 | // 4 | 5 | #ifndef BEDROCK_LEVEL_ACRTOR_H 6 | #define BEDROCK_LEVEL_ACRTOR_H 7 | #include "bedrock_key.h" 8 | #include "palette.h" 9 | #include "utils.h" 10 | namespace bl { 11 | class actor { 12 | public: 13 | bool load(const byte_t* data, size_t len); 14 | 15 | bool load_from_nbt(bl::palette::compound_tag* nbt); 16 | 17 | [[nodiscard]] inline int64_t uid() const { return this->uid_; } 18 | [[nodiscard]] inline std::string uid_raw() const { 19 | std::string res(8, 0); 20 | memcpy(res.data(), &this->uid_, 8); 21 | return res; 22 | } 23 | 24 | void dump(); 25 | [[nodiscard]] vec3 pos() const { return this->pos_; }; 26 | [[nodiscard]] std::string identifier() const { return this->identifier_; }; 27 | [[nodiscard]] bl::palette::compound_tag* root() const { return this->root_; } 28 | actor() = default; 29 | 30 | private: 31 | bool preload(bl::palette::compound_tag* root); 32 | 33 | bool loaded_ = false; 34 | int64_t uid_{-1}; 35 | bl::palette::compound_tag* root_{nullptr}; 36 | std::string identifier_{"minecraft:unknown"}; 37 | vec3 pos_{0, 0, 0}; 38 | 39 | public: 40 | ~actor(); 41 | }; 42 | 43 | /* 实体摘要信息 44 | * key - "dige" + chunk_pos.to_raw() 45 | * value = key* 46 | * key = "actorprefix" + uid 47 | */ 48 | 49 | struct actor_digest_list { 50 | bool load(const std::string& raw) { 51 | if (raw.size() % 8 != 0) return false; 52 | const size_t actor_num = raw.size() / 8; 53 | for (auto i = 0u; i < actor_num; i++) { 54 | this->actor_digests_.emplace_back(raw.begin() + i, raw.begin() + i + 8); 55 | } 56 | return true; 57 | } 58 | std::vector actor_digests_; 59 | }; 60 | 61 | } // namespace bl 62 | 63 | #endif // BEDROCK_LEVEL_ACRTOR_H 64 | -------------------------------------------------------------------------------- /src/include/bedrock_key.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/30. 3 | // 4 | 5 | #ifndef BEDROCK_LEVEL_BEDROCK_KEY_H 6 | #define BEDROCK_LEVEL_BEDROCK_KEY_H 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | namespace bl { 14 | 15 | enum ChunkVersion { 16 | Old = 0, // 1.12~1.17 17 | New = 1 // 1.8+ 18 | }; 19 | 20 | struct block_pos; 21 | struct chunk_pos { 22 | int32_t x{0}; 23 | int32_t z{0}; 24 | int32_t dim{-1}; 25 | 26 | chunk_pos(int32_t xx, int32_t zz, int32_t dimension) : x(xx), z(zz), dim(dimension) {} 27 | 28 | chunk_pos() = default; 29 | 30 | [[nodiscard]] bool valid() const { return this->dim >= 0 && this->dim <= 2; } 31 | 32 | [[nodiscard]] std::string to_string() const; 33 | 34 | bool operator==(const chunk_pos &p) const; 35 | 36 | bool operator<(const chunk_pos &rhs) const; 37 | 38 | [[nodiscard]] std::tuple get_y_range(ChunkVersion v) const; 39 | 40 | [[nodiscard]] std::tuple get_subchunk_index_range(ChunkVersion v) const; 41 | [[nodiscard]] block_pos get_min_pos(ChunkVersion v) const; 42 | [[nodiscard]] block_pos get_max_pos(ChunkVersion v) const; 43 | 44 | [[nodiscard]] bool is_slime() const; 45 | }; 46 | 47 | struct block_pos { 48 | int x{}; 49 | int y{}; 50 | int z{}; 51 | 52 | block_pos(int xx, int yy, int zz) : x(xx), y(yy), z(zz) {} 53 | 54 | [[nodiscard]] chunk_pos to_chunk_pos() const; 55 | 56 | [[nodiscard]] chunk_pos in_chunk_offset() const; 57 | }; 58 | struct vec3 { 59 | float x{}; 60 | float y{}; 61 | float z{}; 62 | 63 | vec3(float xx, float yy, float zz) : x(xx), y(yy), z(zz) {} 64 | }; 65 | 66 | struct chunk_key { 67 | [[nodiscard]] std::string to_string() const; 68 | 69 | // https://github.com/reedacartwright/rbedrock/blob/6d347a67a258dc910148cbca863f15d77db1721c/R/keys.R#L124 70 | // https://learn.microsoft.com/en-us/minecraft/creator/documents/actorstorage#non-actor-data-chunk-key-ids 71 | enum key_type { 72 | Data3D = 43, // 0x2b (+) 73 | VersionNew = 44, // 0x2c (,) 74 | Data2D = 45, // 0x2d (-), height map + biomes 75 | Data2DLegacy = 46, // 0x2e (.) 76 | SubChunkTerrain = 47, // 0x2f (/) 77 | LegacyTerrain = 48, //? 78 | BlockEntity = 49, 79 | Entity = 50, // no longer used 80 | PendingTicks = 51, 81 | BlockExtraData = 52, //? 82 | BiomeState = 53, 83 | FinalizedState = 54, 84 | BorderBlocks = 56, // Education Edition Feature 85 | HardCodedSpawnAreas = 57, 86 | RandomTicks = 58, 87 | Checksums = 59, // 0x3b (;) 88 | GenerationSeed = 60, 89 | BlendingBiomeHeight = 62, 90 | MetaDataHash = 63, 91 | BlendingData = 64, 92 | ActorDigestVersion = 65, 93 | VersionOld = 118, // 0x76 (v) 94 | Unknown = -1 95 | }; 96 | 97 | [[nodiscard]] bool valid() const { return this->cp.valid() && this->type != Unknown; } 98 | 99 | static std::string chunk_key_to_str(chunk_key::key_type key); 100 | 101 | static chunk_key parse(const std::string &key); 102 | 103 | [[maybe_unused]] const static chunk_key INVALID_CHUNK_KEY; 104 | 105 | [[nodiscard]] std::string to_raw() const; 106 | 107 | key_type type{Unknown}; 108 | chunk_pos cp; 109 | int8_t y_index{}; 110 | }; 111 | 112 | struct actor_key { 113 | int64_t actor_uid{static_cast(0xffffffffffffffff)}; 114 | 115 | [[nodiscard]] inline bool valid() const { 116 | return this->actor_uid != static_cast(0xffffffffffffffff); 117 | } 118 | 119 | [[nodiscard]] std::string to_string() const; 120 | 121 | static actor_key parse(const std::string &key); 122 | }; 123 | 124 | struct actor_digest_key { 125 | chunk_pos cp; 126 | 127 | static actor_digest_key parse(const std::string &key); 128 | 129 | [[nodiscard]] inline bool valid() const { return this->cp.valid(); } 130 | 131 | [[nodiscard]] std::string to_string() const; 132 | 133 | [[nodiscard]] std::string to_raw() const; 134 | }; 135 | 136 | struct village_key { 137 | enum key_type { INFO = 0, DWELLERS = 1, PLAYERS = 2, POI = 3, Unknown }; 138 | 139 | static std::string village_key_type_to_str(key_type t); 140 | 141 | [[nodiscard]] bool valid() const { 142 | return this->uuid.size() == 36 && this->type != Unknown; 143 | } 144 | 145 | [[nodiscard]] std::string to_string() const; 146 | 147 | static village_key parse(const std::string &key); 148 | 149 | [[nodiscard]] std::string to_raw() const; 150 | 151 | std::string uuid; 152 | key_type type{Unknown}; 153 | }; 154 | 155 | enum HSAType : int8_t { 156 | NetherFortress = 1, 157 | SwampHut = 2, 158 | OceanMonument = 3, 159 | PillagerOutpost = 5, 160 | Unknown = 6 161 | }; 162 | struct hardcoded_spawn_area { 163 | HSAType type{Unknown}; 164 | block_pos min_pos{0, 0, 0}; 165 | block_pos max_pos{0, 0, 0}; 166 | }; 167 | } // namespace bl 168 | 169 | namespace std { 170 | 171 | template <> 172 | struct hash { 173 | std::size_t operator()(const bl::chunk_pos &k) const { 174 | size_t hash = 3241; 175 | hash = 3457689L * hash + k.x; 176 | hash = 8734625L * hash + k.z; 177 | hash = 2873465L * hash + k.dim; 178 | return hash; 179 | } 180 | }; 181 | } // namespace std 182 | 183 | #endif // BEDROCK_LEVEL_BEDROCK_KEY_H 184 | -------------------------------------------------------------------------------- /src/include/bedrock_level.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/30. 3 | // 4 | 5 | #ifndef BEDROCK_LEVEL_BEDROCK_LEVEL_H 6 | #define BEDROCK_LEVEL_BEDROCK_LEVEL_H 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include "bedrock_key.h" 14 | #include "chunk.h" 15 | #include "global.h" 16 | #include "level_dat.h" 17 | #include "leveldb/db.h" 18 | 19 | namespace bl { 20 | 21 | class bedrock_level { 22 | public: 23 | bedrock_level(); 24 | 25 | /* 26 | * 返回存档是否已经正常打开 27 | */ 28 | [[nodiscard]] bool is_open() const { return this->is_open_; } 29 | 30 | /** 31 | * 打开存档 32 | * @param root 存档根目录,即level.dat所在的目录 33 | * @return 34 | */ 35 | bool open(const std::string &root); 36 | 37 | // 返回db的引用 38 | leveldb::DB *&db() { return this->db_; } 39 | 40 | void close(); 41 | 42 | chunk *get_chunk(const chunk_pos &cp, bool fast_load = false); 43 | general_kv_nbts &player_data() { return this->player_data_; } 44 | 45 | bl::village_data &village_data() { return this->village_data_; } 46 | 47 | bl::general_kv_nbts &map_item_data() { return this->map_item_data_; } 48 | 49 | bl::general_kv_nbts &other_item_data() { return this->other_data_; } 50 | 51 | /** 52 | * 获取缓存的区块的的数量 53 | * @return 54 | */ 55 | [[nodiscard]] inline size_t cached_chunk_size() const { 56 | return this->chunk_data_cache_.size(); 57 | }; 58 | 59 | /** 60 | * 获取level.dat文件的对象wrapper 61 | * @return 62 | */ 63 | level_dat &dat() { return this->dat_; } 64 | 65 | /** 66 | * 开启时,Level会缓存读取过的区块数据,没有容量限制(后面可以改成cache) 67 | * 关闭时,会清空内容 68 | * @param enable 69 | */ 70 | void set_cache(bool enable); 71 | 72 | actor *load_actor(const std::string &raw_uid); 73 | 74 | ~bedrock_level(); 75 | 76 | void foreach_global_keys( 77 | const std::function &f); 78 | 79 | void load_global_data(); 80 | // write 81 | bool remove_chunk(const chunk_pos &cp); 82 | 83 | [[nodiscard]] std::string root_path() const { return this->root_name_; } 84 | static const std::string LEVEL_DATA; 85 | static const std::string LEVEL_DB; 86 | 87 | private: 88 | /** 89 | * 从数据库中读取一个区块的所有数据 90 | * @param cp 区块坐标 91 | * @return 92 | */ 93 | chunk *read_chunk_from_db(const bl::chunk_pos &cp, bool fast_load); 94 | 95 | void clear_cache(); 96 | 97 | private: 98 | bool is_open_{false}; 99 | 100 | level_dat dat_; 101 | 102 | leveldb::DB *db_{nullptr}; 103 | 104 | std::string root_name_; 105 | 106 | std::map chunk_data_cache_; 107 | 108 | private: 109 | bool read_db(); 110 | 111 | bool enable_cache_{false}; 112 | leveldb::Options options_{}; 113 | // global data 114 | bl::village_data village_data_; 115 | bl::general_kv_nbts player_data_; 116 | bl::general_kv_nbts map_item_data_; 117 | bl::general_kv_nbts other_data_; 118 | }; 119 | } // namespace bl 120 | 121 | #endif // BEDROCK_LEVEL_BEDROCK_LEVEL_H 122 | -------------------------------------------------------------------------------- /src/include/bit_tools.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/29. 3 | // 4 | 5 | #ifndef BEDROCK_LEVEL_BIT_TOOLS_H 6 | #define BEDROCK_LEVEL_BIT_TOOLS_H 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #include "utils.h" 13 | 14 | namespace bl::bits { 15 | 16 | inline uint8_t mask(uint8_t low, uint8_t high) { return (0xff >> (7u - high) & (0xff << low)); } 17 | 18 | // 19 | // template 20 | // std::vector rearrange_bytes(size_t bit_len, const uint8_t *data, size_t len) { 21 | // Assert(bit_len > 0 || bit_len < 8, "Bit len should within [1,7]"); 22 | // Assert((len << 3) % bit_len == 0, "Invalid len of data"); 23 | // Assert(bit_len <= sizeof(T) << 3, "Container is too small"); 24 | // std::vector res; 25 | // 26 | // size_t index{0}; 27 | // uint8_t offset{0}; 28 | // const uint8_t R_OFF = 8 - bit_len; 29 | // while (true) { 30 | // // 开读 31 | // uint8_t v = data[index]; 32 | // auto next = bit_len + offset; 33 | // if (next < 8) { // 内部偏移即可 34 | // res.push_back((uint8_t) (v << offset) >> R_OFF); 35 | // offset = next; 36 | // } else if (next == 8) { 37 | // res.push_back((uint8_t) (v << offset) >> R_OFF); 38 | // offset = 0; 39 | // index++; 40 | // if (index >= len) break; 41 | // } else { 42 | // auto remain = next - 8; 43 | // res.push_back(((mask(0, 7u - offset) & v) << remain) + 44 | // (data[index + 1] >> (8 - remain))); 45 | // offset = remain; 46 | // index++; 47 | // } 48 | // } 49 | // return res; 50 | // } 51 | // 52 | // //https://gist.github.com/Tomcc/a96af509e275b1af483b25c543cfbf37 53 | // /** 54 | // * total 4096 block 55 | // * enum class Type : uint8_t { 56 | // Paletted1 = 1, // 32 blocks per word --> 128 world (512bytes) 57 | // Paletted2 = 2, // 16 blocks per word --> 256 world (1024bytes) 58 | // Paletted3 = 3, // 10 blocks and 2 bits of padding per word 409 word + 6block(?3bytes?) 59 | // Paletted4 = 4, // 8 blocks per word --> 512 world (2048bytes) 60 | // Paletted5 = 5, // 6 blocks and 2 bits of padding per word 682word + 4block(?3bytes?) 61 | // Paletted6 = 6, // 5 blocks and 2 bits of padding per word 819word + 1block(?1bytes?) 62 | // Paletted8 = 8, // 4 blocks per word --> 1024 world (4096bytes) 63 | // Paletted16 = 16, // 2 blocks per word --> 2048 word (8192bytes) 64 | // } 65 | // */ 66 | // 67 | 68 | std::vector rearrange_words(size_t bits_len, const byte_t *data, size_t len); 69 | 70 | } // namespace bl::bits 71 | 72 | #endif // BEDROCK_LEVEL_BIT_TOOLS_H 73 | -------------------------------------------------------------------------------- /src/include/chunk.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/30. 3 | // 4 | 5 | #ifndef BEDROCK_LEVEL_CHUNK_H 6 | #define BEDROCK_LEVEL_CHUNK_H 7 | 8 | // cached chunks 9 | 10 | #include 11 | #include 12 | 13 | #include "actor.h" 14 | #include "bedrock_key.h" 15 | #include "color.h" 16 | #include "data_3d.h" 17 | #include "sub_chunk.h" 18 | namespace bl { 19 | 20 | class bedrock_level; 21 | 22 | class chunk { 23 | public: 24 | friend class bedrock_level; 25 | static bool valid_in_chunk_pos(int cx, int y, int cz, int dim); 26 | static void map_y_to_subchunk(int y, int &index, int &offset); 27 | 28 | public: 29 | [[nodiscard]] inline bool fast_load() const { return this->fast_load_mode_; } 30 | 31 | block_info get_block(int cx, int y, int cz); 32 | 33 | block_info get_block_fast(int cx, int y, int cz); 34 | 35 | // block_info get_top_block(int cx, int cz); 36 | 37 | palette::compound_tag *get_block_raw(int cx, int y, int cz); 38 | 39 | // palette::compound_tag *get_top_block_raw(int cx, int cz); 40 | 41 | bl::color get_block_color(int cx, int y, int cz); 42 | 43 | // bl::color get_top_block_color(int cx, int cz); 44 | 45 | biome get_biome(int cx, int y, int cz); 46 | 47 | std::vector> get_biome_y(int y); 48 | 49 | biome get_top_biome(int cx, int cz); 50 | 51 | [[nodiscard]] bl::chunk_pos get_pos() const; 52 | 53 | int get_height(int cx, int cz); 54 | 55 | explicit chunk(const chunk_pos &pos) : loaded_(false), pos_(pos) {}; 56 | 57 | chunk() = delete; 58 | 59 | [[nodiscard]] inline bool loaded() const { return this->loaded_; } 60 | std::vector &block_entities() { return this->block_entities_; } 61 | std::vector &pending_ticks() { return this->pending_ticks_; } 62 | 63 | std::vector entities() & { return this->entities_; } 64 | 65 | std::vector HSAs() { return this->HSAs_; } 66 | 67 | [[nodiscard]] ChunkVersion get_version() const { return this->version; } 68 | ~chunk(); 69 | 70 | private: 71 | bool load_data(bedrock_level &level, bool fast_load); 72 | 73 | private: 74 | bool load_subchunks(bedrock_level &level); 75 | 76 | bool load_biomes(bedrock_level &level); 77 | 78 | void load_entities(bedrock_level &level); 79 | 80 | bool load_pending_ticks(bedrock_level &level); 81 | 82 | bool load_block_entities(bedrock_level &level); 83 | 84 | void load_hsa(bedrock_level &level); 85 | 86 | bool loaded_{false}; 87 | const chunk_pos pos_; 88 | // sub_chunks 89 | std::map sub_chunks_; 90 | // biome and height map 91 | biome3d d3d_{}; 92 | // actor digest 93 | // bl::actor_digest_list actor_digest_list_; 94 | // block entities 95 | std::vector entities_; 96 | std::vector block_entities_; 97 | std::vector pending_ticks_; 98 | 99 | std::vector HSAs_; 100 | ChunkVersion version{New}; 101 | bool fast_load_mode_{false}; 102 | }; 103 | } // namespace bl 104 | 105 | #endif // BEDROCK_LEVEL_CHUNK_H 106 | -------------------------------------------------------------------------------- /src/include/color.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/6/18. 3 | // 4 | 5 | #ifndef BEDROCK_LEVEL_COLOR_H 6 | #define BEDROCK_LEVEL_COLOR_H 7 | #include 8 | #include 9 | 10 | #include "data_3d.h" 11 | namespace bl { 12 | struct color { 13 | uint8_t r{0}; 14 | uint8_t g{0}; 15 | uint8_t b{0}; 16 | uint8_t a{255}; 17 | [[nodiscard]] inline int32_t hex() const { 18 | return (static_cast(r) << 24) | (static_cast(g) << 16) | 19 | (static_cast(b) << 6) | static_cast(a); 20 | } 21 | }; 22 | 23 | [[maybe_unused]] color get_biome_color(biome b); 24 | 25 | std::string get_biome_name(biome b); 26 | 27 | color get_block_color_from_SNBT(const std::string& name); 28 | 29 | // [[maybe_unused]] bl::color get_water_color(bl::color gray, bl::biome b); 30 | 31 | // bl::color get_leave_color(bl::color gray, bl::biome b); 32 | // bl::color get_grass_color(bl::color gray, bl::biome b); 33 | 34 | bl::color blend_color_with_biome(const std::string& name, bl::color color, bl::biome b); 35 | 36 | [[maybe_unused]] std::unordered_map& get_block_color_table(); 37 | bool init_biome_color_palette_from_file(const std::string& filename); 38 | 39 | bool init_block_color_palette_from_file(const std::string& filename); 40 | 41 | [[maybe_unused]] void export_image(const std::vector>& c, int ppi, 42 | const std::string& name); 43 | 44 | } // namespace bl 45 | 46 | #endif // BEDROCK_LEVEL_COLOR_H 47 | -------------------------------------------------------------------------------- /src/include/data_3d.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/30. 3 | // 4 | 5 | #ifndef BEDROCK_LEVEL_DATA_3D_H 6 | #define BEDROCK_LEVEL_DATA_3D_H 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #include "bedrock_key.h" 13 | #include "utils.h" 14 | namespace bl { 15 | 16 | // clang-format off 17 | 18 | enum biome : uint8_t { 19 | ocean = 0, 20 | plains = 1, 21 | desert = 2, 22 | extreme_hills = 3, 23 | forest = 4, 24 | taiga = 5, 25 | swampland = 6, 26 | river = 7, 27 | hell = 8, 28 | the_end = 9, 29 | legacy_frozen_ocean = 10, 30 | frozen_river = 11, 31 | ice_plains = 12, 32 | ice_mountains = 13, 33 | mushroom_island = 14, 34 | mushroom_island_shore = 15, 35 | beach = 16, 36 | desert_hills = 17, 37 | forest_hills = 18, 38 | taiga_hills = 19, 39 | extreme_hills_edge = 20, 40 | jungle = 21, 41 | jungle_hills = 22, 42 | jungle_edge = 23, 43 | deep_ocean = 24, 44 | stone_beach = 25, 45 | cold_beach = 26, 46 | birch_forest = 27, 47 | birch_forest_hills = 28, 48 | roofed_forest = 29, 49 | cold_taiga = 30, 50 | cold_taiga_hills = 31, 51 | mega_taiga = 32, 52 | mega_taiga_hills = 33, 53 | extreme_hills_plus_trees = 34, 54 | savanna = 35, 55 | savanna_plateau = 36, 56 | mesa = 37, 57 | mesa_plateau_stone = 38, 58 | mesa_plateau = 39, 59 | warm_ocean = 40, 60 | deep_warm_ocean = 41, 61 | lukewarm_ocean = 42, 62 | deep_lukewarm_ocean = 43, 63 | cold_ocean = 44, 64 | deep_cold_ocean = 45, 65 | frozen_ocean = 46, 66 | deep_frozen_ocean = 47, 67 | bamboo_jungle = 48, 68 | bamboo_jungle_hills = 49, 69 | sunflower_plains = 129, 70 | desert_mutated = 130, 71 | extreme_hills_mutated = 131, 72 | flower_forest = 132, 73 | taiga_mutated = 133, 74 | swampland_mutated = 134, 75 | ice_plains_spikes = 140, 76 | jungle_mutated = 149, 77 | jungle_edge_mutated = 151, 78 | birch_forest_mutated = 155, 79 | birch_forest_hills_mutated = 156, 80 | roofed_forest_mutated = 157, 81 | cold_taiga_mutated = 158, 82 | redwood_taiga_mutated = 160, 83 | redwood_taiga_hills_mutated = 161, 84 | extreme_hills_plus_trees_mutated = 162, 85 | savanna_mutated = 163, 86 | savanna_plateau_mutated = 164, 87 | mesa_bryce = 165, 88 | mesa_plateau_stone_mutated = 166, 89 | mesa_plateau_mutated = 167, 90 | soulsand_valley = 178, 91 | crimson_forest = 179, 92 | warped_forest = 180, 93 | basalt_deltas = 181, 94 | lofty_peaks = 182, 95 | snow_capped_peaks = 183, 96 | snowy_slopes = 184, 97 | mountain_grove = 185, 98 | mountain_meadow = 186, 99 | lush_caves = 187, 100 | dripstone_caves = 188, 101 | stony_peaks = 189, 102 | deep_dark = 190, 103 | mangrove_swamp = 191, 104 | cherry_groves = 192, 105 | none = 255, 106 | }; 107 | // clang-format on 108 | 109 | class biome3d { 110 | public: 111 | bool load_from_d3d(const byte_t *data, size_t len); 112 | 113 | bool load_from_d2d(const byte_t *data, size_t len); 114 | 115 | inline int height(int x, int z) { 116 | auto [my, _] = this->pos_.get_y_range(this->version_); 117 | return this->height_map_[x + z * 16] + my; 118 | } 119 | 120 | [[nodiscard]] inline std::array height_map() const { 121 | return this->height_map_; 122 | } 123 | 124 | biome get_biome(int cx, int y, int cz); 125 | 126 | std::vector> get_biome_y(int y); 127 | 128 | biome get_top_biome(int cx, int cz); 129 | 130 | void set_chunk_pos(const bl::chunk_pos &cp) { this->pos_ = cp; } 131 | void set_version(ChunkVersion version) { this->version_ = version; } 132 | 133 | private: 134 | std::array height_map_; 135 | std::vector>> biomes_; 136 | bl::chunk_pos pos_; 137 | ChunkVersion version_; 138 | }; 139 | } // namespace bl 140 | 141 | #endif // BEDROCK_LEVEL_DATA_3D_H 142 | -------------------------------------------------------------------------------- /src/include/global.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/31. 3 | // 4 | 5 | #ifndef BEDROCK_LEVEL_GLOBAL_H 6 | #define BEDROCK_LEVEL_GLOBAL_H 7 | #include "bedrock_key.h" 8 | #include "memory" 9 | #include "palette.h" 10 | namespace bl { 11 | 12 | class village_data { 13 | public: 14 | void reset( 15 | const std::unordered_map>& data); 16 | void append_village(const bl::village_key& key, const std::string& value); 17 | 18 | inline std::unordered_map>& data() { 19 | return this->data_; 20 | } 21 | 22 | void clear_data(); 23 | ~village_data(); 24 | 25 | private: 26 | std::unordered_map> data_; 27 | }; 28 | 29 | class general_kv_nbts { 30 | public: 31 | void reset(const std::unordered_map& data); 32 | 33 | void append_nbt(const std::string& key, const std::string& value); 34 | inline std::unordered_map& data() { 35 | return this->data_; 36 | }; 37 | inline const std::unordered_map& data() const { 38 | return this->data_; 39 | }; 40 | ~general_kv_nbts(); 41 | 42 | void clear_data(); 43 | 44 | private: 45 | std::unordered_map data_; 46 | }; 47 | 48 | } // namespace bl 49 | 50 | class global_data {}; 51 | 52 | #endif // BEDROCK_LEVEL_GLOBAL_H 53 | -------------------------------------------------------------------------------- /src/include/level_dat.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/6/21. 3 | // 4 | 5 | #ifndef BEDROCK_LEVEL_LEVEL_DAT_H 6 | #define BEDROCK_LEVEL_LEVEL_DAT_H 7 | #include 8 | 9 | #include "bedrock_key.h" 10 | #include "palette.h" 11 | namespace bl { 12 | class level_dat { 13 | public: 14 | bool load_from_file(const std::string& path); 15 | 16 | bool load_from_raw_data(const std::vector& data); 17 | 18 | void set_nbt(bl::palette::compound_tag* tag); 19 | 20 | [[nodiscard]] inline bool loaded() const { return this->loaded_; } 21 | [[nodiscard]] inline block_pos spawn_position() const { return this->spawn_position_; } 22 | [[nodiscard]] inline uint64_t storage_version() const { return this->storage_version_; } 23 | [[nodiscard]] inline std::string level_name() const { return this->level_name_; } 24 | 25 | [[nodiscard]] bl::palette::compound_tag* root() { return this->root_; } 26 | 27 | [[nodiscard]] std::string to_raw() const; 28 | 29 | ~level_dat(); 30 | private: 31 | bool preload_data(); 32 | 33 | private: 34 | bool loaded_{false}; 35 | block_pos spawn_position_{0, 0, 0}; 36 | std::string level_name_; 37 | int storage_version_{10}; 38 | bl::palette::compound_tag* root_{nullptr}; 39 | std::string header_; 40 | }; 41 | } // namespace bl 42 | 43 | #endif // BEDROCK_LEVEL_LEVEL_DAT_H 44 | -------------------------------------------------------------------------------- /src/include/player.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/31. 3 | // 4 | 5 | #ifndef BEDROCK_LEVEL_PLAYER_H 6 | #define BEDROCK_LEVEL_PLAYER_H 7 | 8 | 9 | class player { 10 | 11 | }; 12 | 13 | 14 | #endif //BEDROCK_LEVEL_PLAYER_H 15 | -------------------------------------------------------------------------------- /src/include/scoreboard.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/31. 3 | // 4 | 5 | #ifndef BEDROCK_LEVEL_SCOREBOARD_H 6 | #define BEDROCK_LEVEL_SCOREBOARD_H 7 | 8 | 9 | class scoreboard { 10 | 11 | }; 12 | 13 | 14 | #endif //BEDROCK_LEVEL_SCOREBOARD_H 15 | -------------------------------------------------------------------------------- /src/include/sub_chunk.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/29. 3 | // 4 | 5 | #ifndef BEDROCK_LEVEL_SUB_CHUNK_H 6 | #define BEDROCK_LEVEL_SUB_CHUNK_H 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include "color.h" 15 | #include "palette.h" 16 | namespace bl { 17 | struct block_info { 18 | std::string name{"minecraft:unknown"}; 19 | bl::color color{173, 8, 172, 255}; 20 | }; 21 | 22 | class sub_chunk { 23 | public: 24 | struct layer { 25 | layer() = default; 26 | uint8_t bits{}; 27 | uint8_t type{}; 28 | uint32_t palette_len{}; 29 | std::vector blocks{}; 30 | std::vector palettes; 31 | 32 | ~layer(); 33 | }; 34 | 35 | block_info get_block(int rx, int ry, int rz); 36 | 37 | block_info get_block_fast(int rx, int ry, int rz); 38 | 39 | palette::compound_tag *get_block_raw(int rx, int ry, int rz); 40 | 41 | sub_chunk() = default; 42 | 43 | void set_version(uint8_t version) { this->version_ = version; } 44 | 45 | void set_y_index(int8_t y_index) { this->y_index_ = y_index; } 46 | 47 | void set_layers_num(uint8_t layers_num) { this->layers_num_ = layers_num; } 48 | 49 | bool load(const byte_t *data, size_t len); 50 | 51 | // for develop 52 | void dump_to_file(FILE *fp) const; 53 | 54 | [[nodiscard]] inline int8_t y_index() const { return this->y_index_; } 55 | [[nodiscard]] inline uint8_t version() const { return this->version_; }; 56 | ~sub_chunk(); 57 | 58 | private: 59 | void push_back_layer(layer *layer) { this->layers_.push_back(layer); } 60 | 61 | uint8_t version_{0xff}; 62 | int8_t y_index_{0}; 63 | uint8_t layers_num_{0xff}; 64 | std::vector layers_; 65 | }; 66 | 67 | } // namespace bl 68 | 69 | #endif // BEDROCK_LEVEL_SUB_CHUNK_H 70 | -------------------------------------------------------------------------------- /src/include/utils.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/29. 3 | // 4 | 5 | #ifndef BEDROCK_LEVEL_UTILS_H 6 | #define BEDROCK_LEVEL_UTILS_H 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | typedef std::chrono::high_resolution_clock timer_clock; 14 | typedef int64_t microsecond_t; 15 | 16 | #define PROF_TIMER(label, Codes) \ 17 | auto start_##label = std::chrono::high_resolution_clock::now(); \ 18 | { Codes } \ 19 | auto e_##label = std::chrono::high_resolution_clock ::now() - start_##label; \ 20 | auto time_##label = std::chrono::duration_cast(e_##label).count(); 21 | 22 | #define DEBUG 23 | 24 | // #ifdef WIN32 25 | // #define FN (__builtin_strrchr(__FILE__, '\\') ? __builtin_strrchr(__FILE__, '\\') + 1 : __FILE__) 26 | // #else 27 | // #define FN (__builtin_strrchr(__FILE__, '/') ? __builtin_strrchr(__FILE__, '/') + 1 : __FILE__) 28 | #define FN "" 29 | // #endif 30 | #define BL_LOGGER(...) log(FN, __FUNCTION__, __LINE__, __VA_ARGS__) 31 | #define BL_ERROR(...) error_msg(FN, __FUNCTION__, __LINE__, __VA_ARGS__) 32 | 33 | void log(const char *file_name, const char *function_name, size_t line, const char *fmt, ...); 34 | 35 | void error_msg(const char *file_name, const char *function_name, size_t line, const char *fmt, ...); 36 | 37 | #ifdef DEBUG 38 | #define Assert(Expr, ...) M_Assert(#Expr, Expr, __FILE__, __LINE__, __VA_ARGS__) 39 | #else 40 | #define Assert(Expr, Msg) ; 41 | #endif 42 | 43 | typedef char byte_t; 44 | static_assert(sizeof(byte_t) == 1); 45 | 46 | void M_Assert(const char *expr_str, bool expr, const char *file, int line, const char *fmt, ...); 47 | 48 | // disable data copy 49 | struct NonCopyable { 50 | NonCopyable &operator=(const NonCopyable &) = delete; 51 | 52 | NonCopyable(const NonCopyable &) = delete; 53 | 54 | NonCopyable() = default; 55 | }; 56 | 57 | namespace bl::utils { 58 | std::vector read_file(const std::string &file_name); 59 | 60 | void write_file(const std::string &file_name, const byte_t *data, size_t len); 61 | 62 | std::string UTF8ToGBEx(const char *utf8); 63 | } // namespace bl::utils 64 | 65 | #endif // BEDROCK_LEVEL_UTILS_H 66 | -------------------------------------------------------------------------------- /src/level_dat.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/6/21. 3 | // 4 | 5 | #include "level_dat.h" 6 | 7 | #include 8 | 9 | #include "utils.h" 10 | 11 | namespace bl { 12 | 13 | bool level_dat::load_from_file(const std::string &path) { 14 | using namespace bl::palette; 15 | namespace fs = std::filesystem; 16 | if (!fs::exists(path)) { 17 | return false; 18 | } 19 | 20 | auto data = utils::read_file(path); 21 | return this->load_from_raw_data(data); 22 | } 23 | bool level_dat::preload_data() { 24 | using namespace bl::palette; 25 | auto name_tag = root_->get("LevelName"); 26 | if (name_tag && name_tag->type() == tag_type::String) { 27 | this->level_name_ = dynamic_cast(name_tag)->value; 28 | } 29 | 30 | auto x_tag = root_->get("spawnZ"); 31 | auto y_tag = root_->get("SpawnY"); 32 | auto z_tag = root_->get("SpawnZ"); 33 | if (x_tag && x_tag->type() == tag_type::Int) { 34 | this->spawn_position_.x = dynamic_cast(x_tag)->value; 35 | } 36 | if (y_tag && y_tag->type() == tag_type::Int) { 37 | this->spawn_position_.y = dynamic_cast(y_tag)->value; 38 | } 39 | 40 | if (z_tag && z_tag->type() == tag_type::Int) { 41 | this->spawn_position_.z = dynamic_cast(z_tag)->value; 42 | } 43 | 44 | return true; 45 | } 46 | 47 | bool level_dat::load_from_raw_data(const std::vector &data) { 48 | using namespace bl::palette; 49 | if (data.size() <= 8) return false; 50 | int read = 0; 51 | this->header_ = std::string(data.data(), 8); 52 | this->root_ = read_one_palette(data.data() + 8, read); 53 | if (!root_ || read != static_cast(data.size()) - 8) { 54 | BL_ERROR("can not read level.dat"); 55 | return false; 56 | } 57 | return this->preload_data(); 58 | } 59 | void level_dat::set_nbt(bl::palette::compound_tag *root) { 60 | if (!root) return; 61 | delete this->root_; 62 | this->root_ = root; 63 | this->preload_data(); 64 | } 65 | std::string level_dat::to_raw() const { return this->header_ + this->root_->to_raw(); } 66 | level_dat::~level_dat() { delete this->root_; } 67 | } // namespace bl 68 | -------------------------------------------------------------------------------- /src/palette.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/29. 3 | // 4 | 5 | #include "palette.h" 6 | 7 | #include 8 | #include 9 | 10 | #include "utils.h" 11 | 12 | namespace bl::palette { 13 | 14 | /** 15 | * 从数据流中读取一个nbt,可以是任何类型,返回指针和读取的长度 16 | * @param data 17 | * @return 18 | */ 19 | std::tuple read_nbt(const byte_t *data); 20 | std::tuple read_compound_value(const byte_t *data, 21 | const std::string &key); 22 | 23 | int read_string(const byte_t *data, std::string &val) { 24 | uint16_t len = *reinterpret_cast(data); 25 | if (len != 0) { 26 | val = std::string(data + 2, data + len + 2); 27 | } 28 | return len + 2; 29 | } 30 | 31 | int read_tag_type(const byte_t *data, tag_type &type) { 32 | type = static_cast(data[0]); 33 | return 1; 34 | } 35 | 36 | std::tuple read_byte_array_value(const byte_t *data, 37 | const std::string &key) { 38 | auto *tag = new byte_array_tag(key); 39 | int32_t len = {0}; 40 | memcpy(&len, data, 4); 41 | tag->value = std::vector(len, 0); 42 | memcpy(tag->value.data(), data + 4, len); 43 | return {tag, len * 1 + 4}; 44 | } 45 | 46 | std::tuple read_int_array_value(const byte_t *data, 47 | const std::string &key) { 48 | auto *tag = new int_array_tag(key); 49 | int32_t len{0}; 50 | memcpy(&len, data, 4); 51 | tag->value = std::vector(len, 0); 52 | memcpy(tag->value.data(), data + 4, len * 4); 53 | return {tag, len * 4 + 4}; 54 | } 55 | 56 | std::tuple read_long_array_value(const byte_t *data, 57 | const std::string &key) { 58 | auto *tag = new long_array_tag(key); 59 | int32_t len{0}; 60 | memcpy(&len, data, 4); 61 | BL_LOGGER("Len is %d", len); 62 | tag->value = std::vector(len, 0); 63 | memcpy(tag->value.data(), data + 4, len * 8); 64 | return {tag, len * 8 + 4}; 65 | } 66 | 67 | std::tuple read_list_tag_value(const byte_t *data, const std::string &key) { 68 | size_t read = 0; 69 | auto *tag = new list_tag(key); 70 | tag_type child_type; 71 | read += read_tag_type(data + read, child_type); 72 | int32_t list_size{0}; 73 | memcpy(&list_size, data + read, 4); 74 | read += 4; 75 | for (int i = 0; i < list_size; i++) { 76 | if (child_type == Int) { 77 | auto *child = new int_tag(""); 78 | memcpy(&child->value, data + read, 4); 79 | tag->value.push_back(child); 80 | read += 4; 81 | } else if (child_type == Short) { 82 | auto *child = new short_tag(""); 83 | memcpy(&child->value, data + read, 2); 84 | tag->value.push_back(child); 85 | read += 2; 86 | } else if (child_type == Long) { 87 | auto *child = new long_tag(""); 88 | memcpy(&child->value, data + read, 8); 89 | tag->value.push_back(child); 90 | read += 8; 91 | } else if (child_type == Float) { 92 | auto *child = new float_tag(""); 93 | memcpy(&child->value, data + read, 4); 94 | tag->value.push_back(child); 95 | read += 4; 96 | } else if (child_type == Double) { 97 | auto *child = new double_tag(""); 98 | memcpy(&child->value, data + read, 8); 99 | tag->value.push_back(child); 100 | read += 8; 101 | } else if (child_type == String) { 102 | auto *child = new string_tag(""); 103 | read += read_string(data + read, child->value); 104 | tag->value.push_back(child); 105 | } else if (child_type == ByteArray) { 106 | auto [t, sz] = read_byte_array_value(data + read, ""); 107 | read += sz; 108 | tag->value.push_back(t); 109 | } else if (child_type == IntArray) { 110 | auto [t, sz] = read_int_array_value(data + read, ""); 111 | read += sz; 112 | tag->value.push_back(t); 113 | } else if (child_type == LongArray) { 114 | auto [t, sz] = read_long_array_value(data + read, ""); 115 | read += sz; 116 | tag->value.push_back(t); 117 | } else if (child_type == Compound) { 118 | auto [t, sz] = read_compound_value(data + read, ""); 119 | read += sz; 120 | tag->value.push_back(t); 121 | } else if (child_type == List) { 122 | auto [t, sz] = read_list_tag_value(data + read, ""); 123 | read += sz; 124 | tag->value.push_back(t); 125 | } else { 126 | throw std::runtime_error("unsupported list child tag type " + 127 | std::to_string((int)child_type)); 128 | } 129 | } 130 | return {tag, read}; 131 | } 132 | 133 | std::tuple read_compound_value(const byte_t *data, 134 | const std::string &key) { 135 | auto *tag = new compound_tag(key); 136 | size_t total = 0; 137 | while (true) { 138 | auto [child, read] = read_nbt(data + total); 139 | total += read; 140 | if (child) { 141 | tag->value[child->key()] = child; 142 | } else { 143 | break; 144 | } 145 | } 146 | return {tag, total}; 147 | } 148 | 149 | /* 150 | * 不保证内存够用 151 | * 152 | */ 153 | std::tuple read_nbt(const byte_t *data) { 154 | int read = 0; 155 | tag_type type; 156 | read += read_tag_type(data, type); 157 | if (type == End) { 158 | return {nullptr, 1}; 159 | } 160 | std::string key; 161 | read += read_string(data + read, key); 162 | if (type == Compound) { 163 | auto [res, len] = read_compound_value(data + read, key); 164 | return {res, read + len}; 165 | } else if (type == Int) { 166 | auto *tag = new int_tag(key); 167 | memcpy(&tag->value, data + read, 4); 168 | return {tag, 4 + read}; 169 | } else if (type == Short) { 170 | auto *tag = new short_tag(key); 171 | memcpy(&tag->value, data + read, 2); 172 | return {tag, 2 + read}; 173 | } else if (type == Long) { 174 | auto *tag = new long_tag(key); 175 | memcpy(&tag->value, data + read, 8); 176 | return {tag, 8 + read}; 177 | } else if (type == Float) { 178 | auto *tag = new float_tag(key); 179 | memcpy(&tag->value, data + read, 4); 180 | return {tag, 4 + read}; 181 | } else if (type == Double) { 182 | auto *tag = new double_tag(key); 183 | memcpy(&tag->value, data + read, 8); 184 | return {tag, 8 + read}; 185 | } else if (type == Byte) { 186 | auto *tag = new byte_tag(key); 187 | memcpy(&tag->value, data + read, 1); 188 | return {tag, 1 + read}; 189 | } else if (type == String) { 190 | auto *tag = new string_tag(key); 191 | auto len = read_string(data + read, tag->value); 192 | return {tag, len + read}; 193 | } else if (type == ByteArray) { 194 | auto [res, len] = read_byte_array_value(data + read, key); 195 | return {res, len + read}; 196 | } else if (type == IntArray) { 197 | auto [res, len] = read_int_array_value(data + read, key); 198 | return {res, len + read}; 199 | } else if (type == LongArray) { 200 | BL_LOGGER("key size(%d)", key.size()); 201 | auto [res, len] = read_long_array_value(data + read, key); 202 | return {res, len + read}; 203 | } else if (type == List) { 204 | auto [res, len] = read_list_tag_value(data + read, key); 205 | return {res, len + read}; 206 | } else { 207 | throw std::runtime_error("unsupported tag type " + std::to_string((int)type)); 208 | } 209 | } 210 | 211 | [[maybe_unused]] compound_tag *read_one_palette(const byte_t *data, int &read) { 212 | read = 0; 213 | auto [r, x] = read_nbt(data); 214 | read = static_cast(x); 215 | if (!r || r->type() != tag_type::Compound) { 216 | BL_ERROR("Invalid palette format"); 217 | delete r; 218 | return nullptr; 219 | } else { 220 | return dynamic_cast(r); 221 | } 222 | } 223 | 224 | std::string tag_type_to_str(tag_type type) { 225 | switch (type) { 226 | case End: 227 | return "End"; 228 | case Byte: 229 | return "Byte"; 230 | case Int: 231 | return "Int"; 232 | case String: 233 | return "String"; 234 | case Compound: 235 | return "Compound"; 236 | case List: 237 | return "List"; 238 | case Long: 239 | return "Long"; 240 | break; 241 | case Float: 242 | return "Float"; 243 | case Double: 244 | return "Double"; 245 | case Short: 246 | return "Short"; 247 | case ByteArray: 248 | return "ByteArray"; 249 | case IntArray: 250 | return "IntArray"; 251 | case LongArray: 252 | return "LongArray"; 253 | } 254 | return "UNKNOWN"; 255 | } 256 | 257 | std::vector read_palette_to_end(const byte_t *data, size_t len) { 258 | size_t ptr = 0; 259 | std::vector res; 260 | while (ptr < len) { 261 | int read; 262 | res.push_back(read_one_palette(data + ptr, read)); 263 | ptr += read; 264 | } 265 | if (ptr != len) { 266 | BL_ERROR("Remain bytes found (%d).", (int)len - (int)ptr); 267 | } 268 | return res; 269 | } 270 | 271 | list_tag::~list_tag() { 272 | for (auto tag : this->value) { 273 | delete tag; 274 | } 275 | } 276 | } // namespace bl::palette 277 | -------------------------------------------------------------------------------- /src/player.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/31. 3 | // 4 | 5 | #include "player.h" 6 | -------------------------------------------------------------------------------- /src/scoreboard.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/31. 3 | // 4 | 5 | #include "scoreboard.h" 6 | -------------------------------------------------------------------------------- /src/sub_chunk.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/29. 3 | // 4 | 5 | #include "sub_chunk.h" 6 | 7 | #include 8 | 9 | #include "bit_tools.h" 10 | #include "utils.h" 11 | 12 | // #include "nbt.hpp" 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | #include "color.h" 19 | #include "palette.h" 20 | 21 | namespace bl { 22 | 23 | namespace { 24 | 25 | // sub chunk layout 26 | // https://user-images.githubusercontent.com/13713600/148380033-6223ac76-54b7-472c-a355-5923b87cb7c5.png 27 | 28 | bool read_header(sub_chunk *sub_chunk, const byte_t *stream, int &read) { 29 | if (!sub_chunk || !stream) return false; 30 | // assert that stream is long enough 31 | auto version = stream[0]; 32 | if (version != 8 // 1.2~1.17 33 | && version != 9) { // 1.18+ 34 | BL_LOGGER("Unsupported sub chunk version: %u", stream[0]); 35 | return false; 36 | } 37 | sub_chunk->set_version(version); 38 | sub_chunk->set_layers_num(stream[1]); 39 | read = 2; 40 | // y-index for version 9 41 | if (version == 9) { 42 | int8_t y_index = stream[2]; 43 | if (y_index != sub_chunk->y_index()) { 44 | BL_ERROR("Invalid Y index value(new(%d) != default(%d))", y_index, 45 | sub_chunk->y_index()); 46 | } 47 | sub_chunk->set_y_index(y_index); 48 | read++; 49 | } 50 | return true; 51 | } 52 | 53 | bool read_palettes(bl::sub_chunk::layer *layer, const byte_t *stream, size_t number, 54 | size_t len, int &read) { 55 | read = 0; 56 | for (auto i = 0u; i < number; i++) { 57 | int r = 0; 58 | auto *tag = bl::palette::read_one_palette(stream + read, r); 59 | if (tag) { 60 | tag->remove("version"); // remove version tag(compatibility for color table) 61 | layer->palettes.push_back(tag); 62 | } else { 63 | BL_ERROR("Can not read block palette"); 64 | return false; 65 | } 66 | read += r; 67 | } 68 | return true; 69 | } 70 | 71 | bool read_one_layer(bl::sub_chunk::layer *layer, const byte_t *stream, size_t len, 72 | int &read) { 73 | read = 0; 74 | constexpr auto BLOCK_NUM = 16 * 16 * 16; 75 | if (!layer || !stream) return false; 76 | auto layer_header = stream[0]; 77 | read++; 78 | layer->type = layer_header & 0x1; 79 | layer->bits = layer_header >> 1u; 80 | if (layer->bits != 0) { 81 | int block_per_word = 32 / layer->bits; 82 | auto wordCount = BLOCK_NUM / block_per_word; 83 | if (BLOCK_NUM % block_per_word != 0) wordCount++; 84 | layer->blocks.resize(BLOCK_NUM); 85 | int position = 0; 86 | for (int wordi = 0; wordi < wordCount; wordi++) { 87 | auto word = *reinterpret_cast(stream + read + wordi * 4); 88 | for (int block = 0; block < block_per_word; block++) { 89 | int state = (word >> ((position % block_per_word) * layer->bits)) & 90 | ((1 << layer->bits) - 1); 91 | if (position < static_cast(layer->blocks.size())) { 92 | layer->blocks[position] = state; 93 | } 94 | position++; 95 | } 96 | } 97 | 98 | read += wordCount << 2; 99 | int palette_len = *reinterpret_cast(stream + read); 100 | layer->palette_len = palette_len; 101 | read += 4; 102 | } else { // uniform 103 | layer->blocks = std::vector(4096, 0); 104 | layer->palette_len = 1; 105 | } 106 | // palette header 107 | int palette_read = 0; 108 | read_palettes(layer, stream + read, layer->palette_len, len - read, palette_read); 109 | read += palette_read; 110 | return true; 111 | } 112 | } // namespace 113 | 114 | bool sub_chunk::load(const byte_t *data, size_t len) { 115 | size_t idx = 0; // 全局索引 116 | int read{0}; 117 | if (!read_header(this, data, read)) return false; 118 | idx += read; 119 | for (auto i = 0; i < (int)this->layers_num_; i++) { 120 | this->layers_.push_back(new layer()); 121 | if (!read_one_layer(this->layers_.back(), data + idx, len - idx, read)) { 122 | BL_ERROR("can not read layer %d", i); 123 | return false; 124 | } 125 | idx += read; 126 | } 127 | return true; 128 | } 129 | 130 | void sub_chunk::dump_to_file(FILE *fp) const {} 131 | 132 | block_info sub_chunk::get_block(int rx, int ry, int rz) { 133 | if (rx < 0 || rx > 15 || ry < 0 || ry > 15 || rz < 0 || rz > 15) { 134 | BL_ERROR("Invalid in chunk position %d %d %d", rx, ry, rz); 135 | return {}; 136 | } 137 | 138 | auto idx = ry + rz * 16 + rx * 256; 139 | auto block = this->layers_[0]->blocks[idx]; 140 | 141 | if (block >= this->layers_[0]->palettes.size() || block < 0) { 142 | BL_ERROR("Invalid block index with value %d", block); 143 | return {}; 144 | } 145 | 146 | auto &palette = this->layers_[0]->palettes[block]; 147 | 148 | auto id = palette->value.find("name"); 149 | if (id == palette->value.end()) { 150 | return {}; 151 | } 152 | 153 | return {dynamic_cast(id->second)->value, 154 | bl::get_block_color_from_SNBT(palette->to_raw())}; 155 | } 156 | 157 | block_info sub_chunk::get_block_fast(int rx, int ry, int rz) { 158 | if (rx < 0 || rx > 15 || ry < 0 || ry > 15 || rz < 0 || rz > 15) { 159 | BL_ERROR("Invalid in chunk position %d %d %d", rx, ry, rz); 160 | return {}; 161 | } 162 | 163 | auto idx = ry + rz * 16 + rx * 256; 164 | auto block = this->layers_[0]->blocks[idx]; 165 | 166 | if (block >= this->layers_[0]->palettes.size() || block < 0) { 167 | BL_ERROR("Invalid block index with value %d", block); 168 | return {}; 169 | } 170 | 171 | auto &palette = this->layers_[0]->palettes[block]; 172 | auto id = palette->value.find("name"); 173 | if (id == palette->value.end()) { 174 | return {}; 175 | } 176 | 177 | return {dynamic_cast(id->second)->value, bl::color{}}; 178 | } 179 | 180 | palette::compound_tag *sub_chunk::get_block_raw(int rx, int ry, int rz) { 181 | if (rx < 0 || rx > 15 || ry < 0 || ry > 15 || rz < 0 || rz > 15) { 182 | BL_ERROR("Invalid in chunk position %d %d %d", rx, ry, rz); 183 | return nullptr; 184 | } 185 | 186 | auto idx = ry + rz * 16 + rx * 256; 187 | auto block = this->layers_[0]->blocks[idx]; 188 | 189 | if (block >= this->layers_[0]->palettes.size() || block < 0) { 190 | BL_ERROR("Invalid block index with value %d", block); 191 | return nullptr; 192 | } 193 | 194 | return this->layers_[0]->palettes[block]; 195 | } 196 | sub_chunk::~sub_chunk() { 197 | for (auto &layer : this->layers_) { 198 | delete layer; 199 | } 200 | } 201 | 202 | sub_chunk::layer::~layer() { 203 | for (auto &p : this->palettes) delete p; 204 | } 205 | } // namespace bl 206 | -------------------------------------------------------------------------------- /src/utils.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/29. 3 | // 4 | 5 | #include "utils.h" 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #define KNRM "\x1B[0m" 14 | #define KRED "\x1B[31m" 15 | #define KGRN "\x1B[32m" 16 | #define KYEL "\x1B[33m" 17 | #define KBLU "\x1B[34m" 18 | #define KMAG "\x1B[35m" 19 | #define KCYN "\x1B[36m" 20 | #define KWHT "\x1B[37m" 21 | 22 | void log(const char *file_name, const char *function_name, size_t line, const char *fmt, ...) { 23 | #ifdef DEBUG 24 | va_list args; 25 | va_start(args, fmt); 26 | fprintf(stdout, "[INFO] [%s:%zu @ %s]:", file_name, line, function_name); 27 | vfprintf(stdout, fmt, args); 28 | fprintf(stdout, "\n"); 29 | fflush(stdout); 30 | #endif 31 | } 32 | 33 | void error_msg(const char *file_name, const char *function_name, size_t line, const char *fmt, 34 | ...) { 35 | va_list args; 36 | va_start(args, fmt); 37 | fprintf(stdout, "[ERROR] [%s:%zu @ %s]:", file_name, line, function_name); 38 | vfprintf(stdout, fmt, args); 39 | fprintf(stdout, "\n"); 40 | fflush(stdout); 41 | } 42 | 43 | void M_Assert(const char *expr_str, bool expr, const char *file, int line, const char *msg, ...) { 44 | if (!expr) { 45 | fprintf(stderr, "Assert failed:\t"); 46 | va_list args; 47 | va_start(args, msg); 48 | vfprintf(stderr, msg, args); 49 | fprintf(stderr, "\nExpected: %s\n", expr_str); 50 | fprintf(stderr, "At Source: %s:%d\n", file, line); 51 | abort(); 52 | } 53 | } 54 | #include 55 | namespace bl::utils { 56 | 57 | std::vector read_file(const std::string &file_name) { 58 | std::ifstream input(std::filesystem::u8path(file_name), std::ios::binary); 59 | if (!input.is_open()) { 60 | BL_ERROR("Can not open file %s", file_name.c_str()); 61 | return {}; 62 | } 63 | std::vector bytes((std::istreambuf_iterator(input)), 64 | (std::istreambuf_iterator())); 65 | input.close(); 66 | return bytes; 67 | } 68 | 69 | void write_file(const std::string &file_name, const byte_t *data, size_t len) { 70 | std::ofstream output(file_name, std::ios::binary); 71 | if (!output.is_open()) { 72 | BL_ERROR("Can not open file %s", file_name.c_str()); 73 | return; 74 | } 75 | output.write(reinterpret_cast(data), static_cast(len)); 76 | output.close(); 77 | } 78 | // https : // www.jianshu.com/p/baf75216f883 79 | 80 | #ifdef _WIN32 81 | #include 82 | std::string UTF8ToGBEx(const char *utf8) { 83 | if (!utf8 || strlen(utf8) < 1) return ""; 84 | std::stringstream ss; 85 | int len = MultiByteToWideChar(CP_UTF8, 0, utf8, -1, nullptr, 0); 86 | wchar_t *wstr = new wchar_t[len + 1]; 87 | memset(wstr, 0, len + 1); 88 | MultiByteToWideChar(CP_UTF8, 0, utf8, -1, wstr, len); 89 | len = WideCharToMultiByte(CP_ACP, 0, wstr, -1, nullptr, 0, nullptr, nullptr); 90 | char *str = new char[len + 1]; 91 | memset(str, 0, len + 1); 92 | WideCharToMultiByte(CP_ACP, 0, wstr, -1, str, len, nullptr, nullptr); 93 | ss << str; 94 | delete[] wstr; 95 | delete[] str; 96 | return ss.str(); 97 | } 98 | #else 99 | std::string UTF8ToGBEx(const char *utf8) { return std::string(utf8); } 100 | #endif 101 | 102 | } // namespace bl::utils 103 | -------------------------------------------------------------------------------- /tests/actor_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/31. 3 | // 4 | 5 | #include "actor.h" 6 | 7 | #include 8 | 9 | #include "utils.h" 10 | 11 | TEST(Actor, BaicRead) { 12 | auto data = bl::utils::read_file( 13 | R"(C:\Users\xhy\dev\bedrock-level\data\dumps\actors\144115188092633088.palette)"); 14 | bl::actor actor; 15 | actor.load(data.data(), data.size()); 16 | actor.dump(); 17 | } 18 | -------------------------------------------------------------------------------- /tests/bedrock_level_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/4/1. 3 | // 4 | #include "bedrock_level.h" 5 | 6 | #include 7 | 8 | #include "bedrock_key.h" 9 | #include "chunk.h" 10 | #include "sub_chunk.h" 11 | #include "utils.h" 12 | 13 | const std::string root = R"(C:\Users\xhy\dev\bedrock-level\data\worlds\a)"; 14 | 15 | TEST(BedrockLevel, ZeroChunkTest) { 16 | using namespace bl; 17 | bl::bedrock_level level; 18 | EXPECT_TRUE(level.open("./sample")); 19 | // auto *ch = level.get_chunk({-1, -1, 2}); 20 | auto cp = chunk_pos{-1, -1, 2}; 21 | auto key = chunk_key{chunk_key::SubChunkTerrain, cp, 3}.to_raw(); 22 | std::string data; 23 | level.db()->Get(leveldb::ReadOptions(), key, &data); 24 | 25 | utils::write_file("a.subchunk", data.data(), data.size()); 26 | 27 | // sub_chunk c; 28 | // c.load(reinterpret_cast(data.data()), data.size()); 29 | } 30 | 31 | TEST(BedrockLevel, ReadChunk) { 32 | using namespace bl; 33 | bl::bedrock_level level; 34 | EXPECT_TRUE(level.open("./sample")); 35 | auto *ch = level.get_chunk({6, 0, 2}); 36 | EXPECT_TRUE(ch); 37 | if (ch) { 38 | for (int i = 0; i < 64; i++) { 39 | auto block = ch->get_block(0, i, 0); 40 | std::cout << block.name << std::endl; 41 | } 42 | } else { 43 | BL_LOGGER("Can not find chunk"); 44 | } 45 | } 46 | 47 | TEST(BedrockLevel, ReadBlock) { 48 | using namespace bl; 49 | bl::bedrock_level level; 50 | EXPECT_TRUE(level.open("../data/worlds/a")); 51 | for (int i = -64; i < 64; i++) { 52 | // auto b = level.get_block({0, i, 0}, 0); 53 | // printf("%d: %s\n", i, b.name.c_str()); 54 | } 55 | printf("\n"); 56 | } 57 | 58 | TEST(BedrockLevel, ReadHeight) { 59 | using namespace bl; 60 | bl::bedrock_level level; 61 | EXPECT_TRUE(level.open("../data/worlds/a")); 62 | auto *chunk = level.get_chunk({0, 0, 0}); 63 | if (!chunk) { 64 | BL_ERROR("Can not load chunk"); 65 | return; 66 | } 67 | for (int i = 0; i < 16; i++) { 68 | for (int j = 0; j < 16; j++) { 69 | printf("%02d ", chunk->get_height(i, j)); 70 | } 71 | printf("\n"); 72 | } 73 | } 74 | 75 | // TEST(BedrockLevel, getRange) { 76 | // bl::bedrock_level level; 77 | // EXPECT_TRUE(level.open("../data/worlds/a")); 78 | // auto [mi, ma] = level.get_range(0); 79 | // BL_ERROR("%s -- %s", mi.to_string().c_str(), ma.to_string().c_str()); 80 | // } 81 | TEST(BedrockLevel, CloseAndOpen) { 82 | bl::bedrock_level level; 83 | EXPECT_TRUE(level.open(root)); 84 | level.close(); 85 | EXPECT_TRUE(level.open(root)); 86 | level.close(); 87 | } 88 | -------------------------------------------------------------------------------- /tests/bit_tools_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/29. 3 | // 4 | 5 | #include 6 | // 7 | 8 | // TEST(BitTool, Mask) { 9 | // EXPECT_EQ(bl::bits::mask(0, 7), 0b11111111); 10 | // EXPECT_EQ(bl::bits::mask(0, 6), 0b01111111); 11 | // EXPECT_EQ(bl::bits::mask(0, 5), 0b00111111); 12 | // EXPECT_EQ(bl::bits::mask(1, 5), 0b00111110); 13 | // EXPECT_EQ(bl::bits::mask(2, 5), 0b00111100); 14 | // EXPECT_EQ(bl::bits::mask(3, 5), 0b00111000); 15 | // } 16 | // 17 | // TEST(Rearrange, Bit1) { 18 | // byte_t data[] = {-1, -1, -1, -1}; 19 | // auto res = bl::bits::rearrange_words(1, data, 4); 20 | // EXPECT_TRUE(res.size() == 32); 21 | // for (auto i : res) { 22 | // EXPECT_TRUE(i == 1); 23 | // } 24 | // byte_t data2[] = {0, 0, 0, 0}; 25 | // auto res2 = bl::bits::rearrange_words(1, data2, 4); 26 | // EXPECT_TRUE(res2.size() == 32); 27 | // for (auto i : res2) { 28 | // EXPECT_TRUE(i == 0); 29 | // } 30 | // } 31 | // 32 | // TEST(Rearrange, Bit2) { 33 | // byte_t data[] = {-1, -1, -1, -1}; 34 | // auto res = bl::bits::rearrange_words(2, data, 4); 35 | // EXPECT_TRUE(res.size() == 16); 36 | // for (auto i : res) { 37 | // EXPECT_TRUE(i == 3); 38 | // } 39 | // byte_t data2[] = {0, 0, 0, 0}; 40 | // auto res2 = bl::bits::rearrange_words(2, data2, 4); 41 | // EXPECT_TRUE(res2.size() == 16); 42 | // for (auto i : res2) { 43 | // EXPECT_TRUE(i == 0); 44 | // } 45 | // 46 | // auto v3 = static_cast(0b10101010); 47 | // byte_t data3[] = {v3, v3, v3, v3}; 48 | // auto res3 = bl::bits::rearrange_words(2, data3, 4); 49 | // EXPECT_TRUE(res2.size() == 16); 50 | // for (auto i : res3) { 51 | // EXPECT_TRUE(i == 2); 52 | // } 53 | // } 54 | // 55 | // TEST(Rearrange, Bit4) { 56 | // byte_t data[] = {-1, -1, -1, -1}; 57 | // auto res = bl::bits::rearrange_words(4, data, 4); 58 | // EXPECT_TRUE(res.size() == 8); 59 | // for (auto i : res) { 60 | // EXPECT_TRUE(i == 15); 61 | // } 62 | // 63 | // byte_t data2[] = {0, 0, 0, 0}; 64 | // auto res2 = bl::bits::rearrange_words(4, data2, 4); 65 | // EXPECT_TRUE(res2.size() == 8); 66 | // for (auto i : res2) { 67 | // EXPECT_TRUE(i == 0); 68 | // } 69 | // 70 | // auto v3 = static_cast(0b10101010); 71 | // byte_t data3[] = {v3, v3, v3, v3}; 72 | // auto res3 = bl::bits::rearrange_words(4, data3, 4); 73 | // EXPECT_TRUE(res2.size() == 8); 74 | // for (auto i : res3) { 75 | // EXPECT_TRUE(i == 10); 76 | // } 77 | // } 78 | // 79 | // TEST(Rearrange, Bit16) { 80 | // auto v3 = static_cast(0b10101010); 81 | // byte_t data[] = {v3, v3, v3, v3}; 82 | // auto res = bl::bits::rearrange_words(16, data, 4); 83 | // EXPECT_TRUE(res.size() == 2); 84 | // for (auto i : res) { 85 | // EXPECT_TRUE(i == 0xaaaa); 86 | // } 87 | // } 88 | //// 3 5 6 89 | // TEST(Rearrange, Bit3) { 90 | // byte_t data[] = {-1, -1, -1, -1}; 91 | // auto res = bl::bits::rearrange_words(3, data, 4); 92 | // EXPECT_TRUE(res.size() == 10); 93 | // for (auto i : res) { 94 | // // BL_LOGGER("%d", i); 95 | // EXPECT_TRUE(i == 7); 96 | // } 97 | // } 98 | // 99 | // TEST(Rearrange, Bit5) { 100 | // byte_t data[] = {-1, -1, -1, -1}; 101 | // auto res = bl::bits::rearrange_words(5, data, 4); 102 | // EXPECT_TRUE(res.size() == 6); 103 | // for (auto i : res) { 104 | // // BL_LOGGER("%d", i); 105 | // EXPECT_TRUE(i == 31); 106 | // } 107 | // } 108 | // 109 | // TEST(Rearrange, Bit6) { 110 | // byte_t data[] = {-1, -1, -1, -1}; 111 | // auto res = bl::bits::rearrange_words(6, data, 4); 112 | // EXPECT_TRUE(res.size() == 5); 113 | // for (auto i : res) { 114 | // // BL_LOGGER("%d", i); 115 | // EXPECT_TRUE(i == 63); 116 | // } 117 | // } 118 | // 119 | // TEST(Rearrange, Custom) { 120 | // byte_t data[] = {0x00, 0x11, 0x11, 0x11}; 121 | // auto res = bl::bits::rearrange_words(6, data, 4); 122 | // EXPECT_TRUE(res.size() == 5); 123 | // for (auto i : res) { 124 | // BL_LOGGER("%d", i); 125 | // } 126 | // } 127 | -------------------------------------------------------------------------------- /tests/chunk_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/4/2. 3 | // 4 | 5 | #include "chunk.h" 6 | 7 | #include 8 | 9 | void check_map(int y, int index, int offset) { 10 | int i, o; 11 | bl::chunk::map_y_to_subchunk(y, i, o); 12 | EXPECT_TRUE(i == index && o == offset); 13 | } 14 | 15 | TEST(Chunk, SubIndexMapping) { 16 | check_map(-64, -4, 0); 17 | check_map(-63, -4, 1); 18 | check_map(-49, -4, 15); 19 | check_map(-48, -3, 0); 20 | check_map(-47, -3, 1); 21 | check_map(-48, -3, 0); 22 | check_map(-16, -1, 0); 23 | check_map(-1, -1, 15); 24 | check_map(0, 0, 0); 25 | check_map(1, 0, 1); 26 | check_map(15, 0, 15); 27 | check_map(16, 1, 0); 28 | } 29 | 30 | TEST(Chunk, ValidInCHunkPos) { 31 | EXPECT_FALSE(bl::chunk::valid_in_chunk_pos(15, 0, 15, -1)); 32 | EXPECT_FALSE(bl::chunk::valid_in_chunk_pos(15, 0, 15, 3)); 33 | EXPECT_FALSE(bl::chunk::valid_in_chunk_pos(15, 0, 16, 0)); 34 | EXPECT_FALSE(bl::chunk::valid_in_chunk_pos(15, 0, -1, 0)); 35 | EXPECT_FALSE(bl::chunk::valid_in_chunk_pos(16, 0, -1, 0)); 36 | EXPECT_FALSE(bl::chunk::valid_in_chunk_pos(-1, 0, 16, 0)); 37 | EXPECT_TRUE(bl::chunk::valid_in_chunk_pos(15, 0, 15, 0)); 38 | EXPECT_TRUE(bl::chunk::valid_in_chunk_pos(15, 0, 15, 1)); 39 | EXPECT_TRUE(bl::chunk::valid_in_chunk_pos(15, 0, 15, 2)); 40 | // overworld 41 | EXPECT_FALSE(bl::chunk::valid_in_chunk_pos(1, 320, 1, 0)); 42 | EXPECT_FALSE(bl::chunk::valid_in_chunk_pos(1, -65, 1, 0)); 43 | EXPECT_TRUE(bl::chunk::valid_in_chunk_pos(1, 319, 1, 0)); 44 | EXPECT_TRUE(bl::chunk::valid_in_chunk_pos(1, -64, 1, 0)); 45 | // nether 46 | EXPECT_FALSE(bl::chunk::valid_in_chunk_pos(1, 128, 1, 1)); 47 | EXPECT_FALSE(bl::chunk::valid_in_chunk_pos(1, -1, 1, 1)); 48 | EXPECT_TRUE(bl::chunk::valid_in_chunk_pos(1, 127, 1, 1)); 49 | EXPECT_TRUE(bl::chunk::valid_in_chunk_pos(1, 0, 1, 1)); 50 | // the end 51 | EXPECT_FALSE(bl::chunk::valid_in_chunk_pos(1, 256, 1, 2)); 52 | EXPECT_FALSE(bl::chunk::valid_in_chunk_pos(1, -1, 1, 2)); 53 | EXPECT_TRUE(bl::chunk::valid_in_chunk_pos(1, 255, 1, 2)); 54 | EXPECT_TRUE(bl::chunk::valid_in_chunk_pos(1, 0, 1, 2)); 55 | } 56 | -------------------------------------------------------------------------------- /tests/color_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/6/18. 3 | // 4 | #include "color.h" 5 | 6 | #include "data_3d.h" 7 | #include "gtest/gtest.h" 8 | TEST(Color, readColorPalette) { 9 | bl::init_biome_color_palette_from_file( 10 | R"(C:\Users\xhy\dev\bedrock-level\data\colors\biome.json)"); 11 | } 12 | TEST(Color, readBlockPalette) { 13 | bl::init_block_color_palette_from_file( 14 | R"(C:\Users\xhy\dev\bedrock-level\data\colors\block.json)"); 15 | } 16 | 17 | TEST(Color, exportImage) { 18 | bl::init_biome_color_palette_from_file( 19 | R"(C:\Users\xhy\dev\bedrock-level\data\colors\biome.json)"); 20 | std::vector> b(40, std::vector(60, bl::biome::cherry_groves)); 21 | b[12][32] = bl::biome::ocean; 22 | std::vector> c(40, std::vector(60, bl::color())); 23 | for (auto i = 0u; i < b.size(); i++) { 24 | for (auto j = 0u; j < b[0].size(); j++) { 25 | c[i][j] = bl::get_biome_color(b[i][j]); 26 | } 27 | } 28 | bl::export_image(c, 10, "a.png"); 29 | } -------------------------------------------------------------------------------- /tests/data3d_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/30. 3 | // 4 | 5 | #include 6 | 7 | #include "color.h" 8 | #include "data_3d.h" 9 | #include "utils.h" 10 | 11 | TEST(Data3d, BasicRead) { 12 | auto data = bl::utils::read_file("../data/dumps/data3d/0_-1.data3d"); 13 | EXPECT_TRUE(data.size() > 512); 14 | bl::biome3d d3d{}; 15 | d3d.load_from_d3d(data.data(), data.size()); 16 | } 17 | 18 | TEST(Data3d, MemoryFree) { 19 | auto data = bl::utils::read_file("../data/dumps/data3d/0_-1.data3d"); 20 | EXPECT_TRUE(data.size() > 512); 21 | auto *d = new bl::biome3d; 22 | d->load_from_d3d(data.data(), data.size()); 23 | delete d; 24 | } 25 | 26 | TEST(Data3d, BiomeRead) { 27 | bl::init_biome_color_palette_from_file( 28 | R"(C:\Users\xhy\dev\bedrock-level\data\colors\biome.json)"); 29 | 30 | auto data = bl::utils::read_file("../data/dumps/data3d/0_0.data3d"); 31 | EXPECT_TRUE(data.size() > 512); 32 | bl::biome3d d3d{}; 33 | d3d.load_from_d3d(data.data(), data.size()); 34 | 35 | for (int y = -64; y < 320; y++) { 36 | std::vector> c(16, std::vector(16, bl::color())); 37 | for (int x = 0; x < 16; x++) { 38 | for (int z = 0; z < 16; z++) { 39 | auto b = d3d.get_biome(x, y, z); 40 | c[x][z] = bl::get_biome_color(b); 41 | } 42 | } 43 | bl::export_image(c, 10, "./png/" + std::to_string(y + 64) + ".png"); 44 | } 45 | } 46 | 47 | TEST(Data3d, TopBiomeRead) { 48 | bl::init_biome_color_palette_from_file( 49 | R"(C:\Users\xhy\dev\bedrock-level\data\colors\biome.json)"); 50 | 51 | auto data = bl::utils::read_file("../data/dumps/data3d/1_-1.data3d"); 52 | EXPECT_TRUE(data.size() > 512); 53 | bl::biome3d d3d{}; 54 | d3d.load_from_d3d(data.data(), data.size()); 55 | 56 | std::vector> c(16, std::vector(16, bl::color())); 57 | for (int x = 0; x < 16; x++) { 58 | for (int z = 0; z < 16; z++) { 59 | auto b = d3d.get_top_biome(x, z); 60 | c[x][z] = bl::get_biome_color(b); 61 | } 62 | } 63 | 64 | bl::export_image(c, 10, "surface.png"); 65 | } 66 | -------------------------------------------------------------------------------- /tests/data_dump_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/30. 3 | // 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | #include "bedrock_key.h" 11 | #include "bedrock_level.h" 12 | #include "utils.h" 13 | 14 | const std::string TEST_WORLD_ROOT = R"(C:\Users\xhy\Desktop\t)"; 15 | const std::string DUMP_ROOT = R"(C:\Users\xhy\dev\bedrock-level\data\dumps\)"; 16 | 17 | TEST(BedrockLevel, SimpleOpen) { 18 | bl::bedrock_level level; 19 | EXPECT_TRUE(level.open(TEST_WORLD_ROOT)); 20 | // level.close(); 21 | } 22 | 23 | TEST(BedrockLevel, CheckChunkKeys) { 24 | using namespace bl; 25 | bl::bedrock_level level; 26 | EXPECT_TRUE(level.open(TEST_WORLD_ROOT)); 27 | auto *db = level.db(); 28 | auto *it = db->NewIterator(leveldb::ReadOptions()); 29 | for (it->SeekToFirst(); it->Valid(); it->Next()) { 30 | auto k = bl::chunk_key::parse(it->key().ToString()); 31 | std::cout << k.to_string() << std::endl; 32 | } 33 | delete it; 34 | level.close(); 35 | } 36 | 37 | // Tag 43 38 | TEST(BedrockLevel, ExportData3d) { 39 | using namespace bl; 40 | bl::bedrock_level level; 41 | EXPECT_TRUE(level.open(TEST_WORLD_ROOT)); 42 | auto *db = level.db(); 43 | auto *it = db->NewIterator(leveldb::ReadOptions()); 44 | for (it->SeekToFirst(); it->Valid(); it->Next()) { 45 | auto k = bl::chunk_key::parse(it->key().ToString()); 46 | if (k.type == chunk_key::Data3D) { 47 | utils::write_file(DUMP_ROOT + "data3d/" + std::to_string(k.cp.x) + "_" + 48 | std::to_string(k.cp.z) + ".data3d", 49 | it->value().data(), it->value().size()); 50 | } 51 | } 52 | delete it; 53 | } 54 | 55 | // Tag 44 56 | TEST(BedrockLevel, CheckVersion) { 57 | using namespace bl; 58 | bl::bedrock_level level; 59 | EXPECT_TRUE(level.open(TEST_WORLD_ROOT)); 60 | auto *db = level.db(); 61 | auto *it = db->NewIterator(leveldb::ReadOptions()); 62 | for (it->SeekToFirst(); it->Valid(); it->Next()) { 63 | auto k = bl::chunk_key::parse(it->key().ToString()); 64 | if (k.type == chunk_key::VersionNew) { 65 | ASSERT_EQ(it->value().size(), 1); 66 | printf("Chunk version is %d\n", (int)it->value()[0]); 67 | } 68 | } 69 | delete it; 70 | } 71 | 72 | // Tag 47 73 | TEST(BedrockLevel, ExportSubChunkTerrain) { 74 | using namespace bl; 75 | bl::bedrock_level level; 76 | 77 | EXPECT_TRUE(level.open(TEST_WORLD_ROOT)); 78 | 79 | auto *db = level.db(); 80 | auto *it = db->NewIterator(leveldb::ReadOptions()); 81 | for (it->SeekToFirst(); it->Valid(); it->Next()) { 82 | auto k = bl::chunk_key::parse(it->key().ToString()); 83 | if (k.type == chunk_key::SubChunkTerrain) { 84 | utils::write_file(DUMP_ROOT + "sub_chunks/" + std::to_string(k.cp.x) + "_" + 85 | std::to_string(k.cp.z) + "_" + std::to_string(k.y_index) + 86 | ".subchunk", 87 | it->value().data(), it->value().size()); 88 | } 89 | } 90 | delete it; 91 | level.close(); 92 | } 93 | 94 | // Tag 49 95 | TEST(BedrockLevel, ExportBlockEntity) { 96 | using namespace bl; 97 | bl::bedrock_level level; 98 | EXPECT_TRUE(level.open(TEST_WORLD_ROOT)); 99 | auto *db = level.db(); 100 | auto *it = db->NewIterator(leveldb::ReadOptions()); 101 | for (it->SeekToFirst(); it->Valid(); it->Next()) { 102 | auto k = bl::chunk_key::parse(it->key().ToString()); 103 | printf("%s\n", k.to_string().c_str()); 104 | if (k.type == chunk_key::BlockEntity) { 105 | utils::write_file(DUMP_ROOT + "bes/" + std::to_string(k.cp.z) + "_" + 106 | std::to_string(k.cp.z) + ".blockentity.palette", 107 | it->value().data(), it->value().size()); 108 | } 109 | } 110 | delete it; 111 | level.close(); 112 | } 113 | 114 | // Tag 51 115 | TEST(BedrockLevel, ExportPts) { 116 | using namespace bl; 117 | bl::bedrock_level level; 118 | 119 | EXPECT_TRUE(level.open(TEST_WORLD_ROOT)); 120 | 121 | auto *db = level.db(); 122 | auto *it = db->NewIterator(leveldb::ReadOptions()); 123 | for (it->SeekToFirst(); it->Valid(); it->Next()) { 124 | auto k = bl::chunk_key::parse(it->key().ToString()); 125 | if (k.type == chunk_key::PendingTicks) { 126 | utils::write_file(DUMP_ROOT + "pts/" + std::to_string(k.cp.x) + "_" + 127 | std::to_string(k.cp.z) + ".pt.palette", 128 | it->value().data(), it->value().size()); 129 | } 130 | } 131 | delete it; 132 | level.close(); 133 | } 134 | 135 | // Tag 54 136 | 137 | TEST(BedrockLevel, CheckChunkState) { 138 | using namespace bl; 139 | bl::bedrock_level level; 140 | EXPECT_TRUE(level.open(TEST_WORLD_ROOT)); 141 | auto *db = level.db(); 142 | auto *it = db->NewIterator(leveldb::ReadOptions()); 143 | for (it->SeekToFirst(); it->Valid(); it->Next()) { 144 | auto k = bl::chunk_key::parse(it->key().ToString()); 145 | if (k.type == chunk_key::FinalizedState) { 146 | ASSERT_EQ(it->value().size(), 4); 147 | printf("Chunk State is %d\n", *reinterpret_cast(it->value().data())); 148 | } 149 | } 150 | delete it; 151 | level.close(); 152 | } 153 | 154 | // Tag 58 155 | TEST(BedrockLevel, ExportRandomTick) { 156 | using namespace bl; 157 | bl::bedrock_level level; 158 | EXPECT_TRUE(level.open(TEST_WORLD_ROOT)); 159 | auto *db = level.db(); 160 | auto *it = db->NewIterator(leveldb::ReadOptions()); 161 | for (it->SeekToFirst(); it->Valid(); it->Next()) { 162 | auto k = bl::chunk_key::parse(it->key().ToString()); 163 | if (k.type == chunk_key::RandomTicks) { 164 | utils::write_file(DUMP_ROOT + "rt/" + std::to_string(k.cp.x) + "_" + 165 | std::to_string(k.cp.z) + ".rt.palette", 166 | it->value().data(), it->value().size()); 167 | } 168 | } 169 | delete it; 170 | level.close(); 171 | } 172 | 173 | TEST(BedrockLevel, ExportHSA) { 174 | using namespace bl; 175 | bl::bedrock_level level; 176 | EXPECT_TRUE(level.open(TEST_WORLD_ROOT)); 177 | auto *db = level.db(); 178 | auto *it = db->NewIterator(leveldb::ReadOptions()); 179 | for (it->SeekToFirst(); it->Valid(); it->Next()) { 180 | auto k = bl::chunk_key::parse(it->key().ToString()); 181 | if (k.type == chunk_key::HardCodedSpawnAreas) { 182 | utils::write_file(DUMP_ROOT + "hsa/" + std::to_string(k.cp.x) + "_" + 183 | std::to_string(k.cp.z) + ".hsa.data", 184 | it->value().data(), it->value().size()); 185 | } 186 | } 187 | delete it; 188 | level.close(); 189 | } 190 | 191 | TEST(BedrockLevel, DumpActorDigits) { 192 | using namespace bl; 193 | bl::bedrock_level level; 194 | EXPECT_TRUE(level.open(TEST_WORLD_ROOT)); 195 | auto *db = level.db(); 196 | auto *it = db->NewIterator(leveldb::ReadOptions()); 197 | for (it->SeekToFirst(); it->Valid(); it->Next()) { 198 | auto k = bl::chunk_key::parse(it->key().ToString()); 199 | if (k.type == chunk_key::ActorDigestVersion && it->value().size() > 1) { 200 | utils::write_file(DUMP_ROOT + "actor_digits/" + std::to_string(k.cp.x) + "_" + 201 | std::to_string(k.cp.z) + ".actor_digits", 202 | it->value().data(), it->value().size()); 203 | } 204 | } 205 | delete it; 206 | level.close(); 207 | } 208 | 209 | TEST(BedrockLevel, SaveInvalid) { 210 | using namespace bl; 211 | bl::bedrock_level level; 212 | 213 | EXPECT_TRUE(level.open(TEST_WORLD_ROOT)); 214 | auto *db = level.db(); 215 | size_t idx = 0; 216 | auto *it = db->NewIterator(leveldb::ReadOptions()); 217 | 218 | for (it->SeekToFirst(); it->Valid(); it->Next()) { 219 | auto ck = bl::chunk_key::parse(it->key().ToString()); 220 | if (ck.valid()) { 221 | // std::cout << "Chunk key: " << ck.to_string() << std::endl; 222 | continue; 223 | } 224 | 225 | auto actor_key = bl::actor_key::parse(it->key().ToString()); 226 | if (actor_key.valid()) { 227 | // std::cout << "Actor key: " << actor_key.to_string() << std::endl; 228 | continue; 229 | } 230 | 231 | auto digest_key = bl::actor_digest_key::parse(it->key().ToString()); 232 | if (digest_key.valid()) { 233 | // std::cout << "Digest key: " << digest_key.to_string() << std::endl; 234 | continue; 235 | } 236 | 237 | auto village_key = bl::village_key::parse(it->key().ToString()); 238 | if (village_key.valid()) { 239 | // std::cout << "Village Key: " << village_key.to_string() << std::endl; 240 | continue; 241 | } 242 | 243 | utils::write_file(DUMP_ROOT + "invalid/" + std::to_string(idx) + ".key", it->key().data(), 244 | it->key().size()); 245 | utils::write_file(DUMP_ROOT + "invalid/" + std::to_string(idx) + ".nbt", it->value().data(), 246 | it->value().size()); 247 | ++idx; 248 | } 249 | delete it; 250 | level.close(); 251 | } 252 | 253 | TEST(BedrockLevel, DumpActors) { 254 | using namespace bl; 255 | bl::bedrock_level level; 256 | EXPECT_TRUE(level.open(TEST_WORLD_ROOT)); 257 | auto *db = level.db(); 258 | auto *it = db->NewIterator(leveldb::ReadOptions()); 259 | for (it->SeekToFirst(); it->Valid(); it->Next()) { 260 | auto key = bl::actor_key::parse(it->key().ToString()); 261 | if (key.valid()) { 262 | auto path = DUMP_ROOT + "/actors/" + std::to_string(key.actor_uid) + ".palette"; 263 | utils::write_file(path, it->value().data(), it->value().size()); 264 | } 265 | } 266 | delete it; 267 | level.close(); 268 | } 269 | -------------------------------------------------------------------------------- /tests/key_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/31. 3 | // 4 | 5 | #include 6 | 7 | #include "bedrock_key.h" 8 | 9 | TEST(ChunkPos, VailidCheck) { 10 | using namespace bl; 11 | bl::chunk_pos c1{1, 1, -1}; 12 | bl::chunk_pos c2{1, 1, 0}; 13 | bl::chunk_pos c3{1, 1, 1}; 14 | bl::chunk_pos c4{1, 1, 2}; 15 | bl::chunk_pos c5{1, 1, 3}; 16 | EXPECT_TRUE(!c1.valid()); 17 | EXPECT_TRUE(c2.valid()); 18 | EXPECT_TRUE(c3.valid()); 19 | EXPECT_TRUE(c4.valid()); 20 | EXPECT_TRUE(!c5.valid()); 21 | } 22 | 23 | TEST(ChunkKey, Convert) { 24 | using namespace bl; 25 | chunk_pos p{1, 1, 0}; 26 | chunk_key key{bl::chunk_key::RandomTicks, p, 0}; 27 | auto data = key.to_raw(); 28 | auto cov = chunk_key::parse(data); 29 | EXPECT_TRUE(cov.cp == key.cp); 30 | EXPECT_TRUE(cov.type == key.type); 31 | 32 | key.type = bl::chunk_key::SubChunkTerrain; 33 | key.y_index = 13; 34 | cov = chunk_key::parse(key.to_raw()); 35 | 36 | EXPECT_TRUE(cov.cp == key.cp); 37 | EXPECT_TRUE(cov.type == key.type); 38 | EXPECT_TRUE(cov.y_index == key.y_index); 39 | } 40 | 41 | TEST(VillageKey, Convert) { 42 | const std::string raw_id = "VILLAGE_241c7732-221a-4266-9fe9-cdd40d9bdeb0_INFO"; 43 | bl::village_key key = bl::village_key::parse(raw_id); 44 | EXPECT_TRUE(key.valid()); 45 | EXPECT_TRUE(key.uuid == "241c7732-221a-4266-9fe9-cdd40d9bdeb0"); 46 | EXPECT_TRUE(key.type == bl::village_key::INFO); 47 | EXPECT_TRUE(key.to_raw() == raw_id); 48 | } 49 | -------------------------------------------------------------------------------- /tests/level_dat_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/6/21. 3 | // 4 | #include "level_dat.h" 5 | 6 | #include "gtest/gtest.h" 7 | #include "utils.h" 8 | TEST(LevelDat, Load) { 9 | const std::string path = R"(C:\Users\xhy\dev\bedrock-level\data\worlds\a\level.dat)"; 10 | bl::level_dat dat; 11 | EXPECT_TRUE(dat.load_from_file(path)); 12 | BL_LOGGER("Spawn position: %d %d %d", dat.spawn_position().x, dat.spawn_position().y, 13 | dat.spawn_position().z); 14 | BL_LOGGER("Level name: %s", dat.level_name().c_str()); 15 | BL_LOGGER("Storage Version: %d", dat.storage_version()); 16 | } 17 | 18 | TEST(LevelDat, MemoryFree) { 19 | const std::string path = R"(C:\Users\xhy\dev\bedrock-level\data\worlds\a\level.dat)"; 20 | auto *dat = new bl::level_dat; 21 | EXPECT_TRUE(dat->load_from_file(path)); 22 | delete dat; 23 | } 24 | -------------------------------------------------------------------------------- /tests/palettes_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/31. 3 | // 4 | 5 | #include 6 | 7 | #include 8 | 9 | #include "palette.h" 10 | #include "utils.h" 11 | 12 | TEST(Palette, TagCopy) { 13 | using namespace bl::palette; 14 | auto data = bl::utils::read_file( 15 | R"(C:\Users\xhy\dev\bedrock-level\data\dumps\actors\144115188092633088.palette)"); 16 | int r = 0; 17 | auto* nbt = bl::palette::read_one_palette(data.data(), r); 18 | 19 | auto nbt2 = *nbt; 20 | bl::palette::compound_tag nbt3(*nbt); 21 | auto* nbt4 = nbt->copy(); 22 | 23 | std::stringstream f1; 24 | nbt->write(f1, 0); 25 | delete nbt; 26 | 27 | std::stringstream f2, f3, f4; 28 | nbt2.write(f2, 0); 29 | nbt3.write(f3, 0); 30 | nbt4->write(f4, 0); 31 | 32 | EXPECT_TRUE(f1.str() == f2.str() && f2.str() == f3.str() && f3.str() == f4.str()); 33 | } 34 | 35 | TEST(Palette, MemoryFree) { 36 | auto data = bl::utils::read_file( 37 | R"(C:\Users\xhy\dev\bedrock-level\data\dumps\bes\-1_-1.blockentity.palette)"); 38 | EXPECT_TRUE(!data.empty()); 39 | auto palettes = bl::palette::read_palette_to_end(data.data(), data.size()); 40 | BL_LOGGER("Palette size is %d", palettes.size()); 41 | for (auto& p : palettes) { 42 | EXPECT_TRUE(p); 43 | delete p; 44 | } 45 | } 46 | 47 | TEST(Palete, ToRaw) { 48 | using namespace bl::palette; 49 | auto data = bl::utils::read_file( 50 | R"(C:\Users\xhy\dev\bedrock-level\data\dumps\actors\144115188092633088.palette)"); 51 | int r = 0; 52 | auto* nbt = bl::palette::read_one_palette(data.data(), r); 53 | auto raw = nbt->to_raw(); 54 | BL_LOGGER("raw size is %zu nbt data size is %zu", raw.size(), data.size()); 55 | bl::utils::write_file("demo.nbt", raw.data(), raw.size()); 56 | } 57 | 58 | TEST(Palete, ToRaw2) { 59 | using namespace bl::palette; 60 | auto data = bl::utils::read_file(R"(demo.nbt)"); 61 | int r = 0; 62 | auto* nbt = bl::palette::read_one_palette(data.data(), r); 63 | nbt->write(std::cout, 0); 64 | } 65 | 66 | TEST(Palette, ReadOne) { 67 | using namespace bl::palette; 68 | auto data = bl::utils::read_file( 69 | R"(C:\Users\xhy\dev\bedrock-level\data\dumps\actors\144115188092633088.palette)"); 70 | int r = 0; 71 | auto* nbt = bl::palette::read_one_palette(data.data(), r); 72 | nbt->write(std::cout, 0); 73 | } 74 | -------------------------------------------------------------------------------- /tests/stb_image_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/6/18. 3 | // 4 | 5 | #include 6 | #include "stb/stb_image_write.h" 7 | #include "utils.h" 8 | 9 | TEST(StbImage, WritePNG) { 10 | const int c = 3; 11 | const int w = 60; 12 | const int h = 160; 13 | std::vector data(c * w * h, 0); 14 | for (int y = 0; y < w; y++) { 15 | for (int x = 0; x < h; x++) { 16 | data[3 * (y * h + x)] = 0; 17 | data[3 * (y * h + x) + 1] = 128; 18 | data[3 * (y * h + x) + 2] = 128; 19 | } 20 | } 21 | 22 | stbi_write_png("a.png", w, h, c, data.data(), 0); 23 | } -------------------------------------------------------------------------------- /tests/sub_chunk_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/29. 3 | // 4 | 5 | #include "sub_chunk.h" 6 | 7 | #include 8 | 9 | #include "utils.h" 10 | 11 | TEST(SubChunk, BasicRead) { 12 | auto data = bl::utils::read_file( 13 | R"(C:\Users\xhy\dev\bedrock-level\data\dumps\sub_chunks\0_-1_0.subchunk)"); 14 | bl::sub_chunk sub_chunk; 15 | 16 | sub_chunk.load(data.data(), data.size()); 17 | sub_chunk.dump_to_file(stdout); 18 | } 19 | 20 | TEST(SubChunk, LayerFree) { 21 | auto *l = new bl::sub_chunk::layer(); 22 | auto data = bl::utils::read_file( 23 | R"(C:\Users\xhy\dev\bedrock-level\data\dumps\bes\-1_-1.blockentity.palette)"); 24 | EXPECT_TRUE(!data.empty()); 25 | auto palettes = bl::palette::read_palette_to_end(data.data(), data.size()); 26 | BL_LOGGER("Palette size is %d", palettes.size()); 27 | for (auto &p : palettes) { 28 | l->palettes.push_back(p); 29 | } 30 | } 31 | 32 | TEST(SubChunk, FreeMemory) { 33 | auto data = bl::utils::read_file( 34 | R"(C:\Users\xhy\dev\bedrock-level\data\dumps\sub_chunks\0_-1_0.subchunk)"); 35 | auto *sub = new bl::sub_chunk(); 36 | EXPECT_TRUE(sub); 37 | EXPECT_TRUE(sub->load(data.data(), data.size())); 38 | 39 | struct A { 40 | std::map subs; 41 | ~A() { 42 | for (auto &x : subs) delete x.second; 43 | } 44 | }; 45 | auto *a = new A(); 46 | a->subs[1] = sub; 47 | delete a; 48 | } 49 | 50 | TEST(SubChunk, GetBlock) { 51 | auto data = bl::utils::read_file( 52 | R"(C:\Users\xhy\dev\bedrock-level\data\dumps\sub_chunks\0_-1_0.subchunk)"); 53 | auto *sub = new bl::sub_chunk(); 54 | EXPECT_TRUE(sub); 55 | EXPECT_TRUE(sub->load(data.data(), data.size())); 56 | for (int i = 0; i < 16; i++) { 57 | auto raw = sub->get_block_raw(0, 0, i); 58 | raw->write(std::cout, 0); 59 | } 60 | delete sub; 61 | } 62 | -------------------------------------------------------------------------------- /tests/utils_test.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xhy on 2023/3/29. 3 | // 4 | 5 | #include "utils.h" 6 | 7 | #include 8 | 9 | TEST(Utils, Logger) { 10 | int a = 1; 11 | BL_ERROR("This is a error message with a = %d", a); 12 | BL_LOGGER("This is a logger message with a = %d", a); 13 | } -------------------------------------------------------------------------------- /third/leveldb/cache.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | // 5 | // A Cache is an interface that maps keys to values. It has internal 6 | // synchronization and may be safely accessed concurrently from 7 | // multiple threads. It may automatically evict entries to make room 8 | // for new entries. Values have a specified charge against the cache 9 | // capacity. For example, a cache where the values are variable 10 | // length strings, may use the length of the string as the charge for 11 | // the string. 12 | // 13 | // A builtin cache implementation with a least-recently-used eviction 14 | // policy is provided. Clients may use their own implementations if 15 | // they want something more sophisticated (like scan-resistance, a 16 | // custom eviction policy, variable cache sizing, etc.) 17 | 18 | #ifndef STORAGE_LEVELDB_INCLUDE_CACHE_H_ 19 | #define STORAGE_LEVELDB_INCLUDE_CACHE_H_ 20 | 21 | #include 22 | 23 | #include "leveldb/export.h" 24 | #include "leveldb/slice.h" 25 | 26 | namespace leveldb { 27 | 28 | class LEVELDB_EXPORT Cache; 29 | 30 | // Create a new cache with a fixed size capacity. This implementation 31 | // of Cache uses a least-recently-used eviction policy. 32 | LEVELDB_EXPORT Cache* NewLRUCache(size_t capacity); 33 | 34 | class LEVELDB_EXPORT Cache { 35 | public: 36 | Cache() = default; 37 | 38 | Cache(const Cache&) = delete; 39 | Cache& operator=(const Cache&) = delete; 40 | 41 | // Destroys all existing entries by calling the "deleter" 42 | // function that was passed to the constructor. 43 | virtual ~Cache(); 44 | 45 | // Opaque handle to an entry stored in the cache. 46 | struct Handle {}; 47 | 48 | // Insert a mapping from key->value into the cache and assign it 49 | // the specified charge against the total cache capacity. 50 | // 51 | // Returns a handle that corresponds to the mapping. The caller 52 | // must call this->Release(handle) when the returned mapping is no 53 | // longer needed. 54 | // 55 | // When the inserted entry is no longer needed, the key and 56 | // value will be passed to "deleter". 57 | virtual Handle* Insert(const Slice& key, void* value, size_t charge, 58 | void (*deleter)(const Slice& key, void* value)) = 0; 59 | 60 | // If the cache has no mapping for "key", returns nullptr. 61 | // 62 | // Else return a handle that corresponds to the mapping. The caller 63 | // must call this->Release(handle) when the returned mapping is no 64 | // longer needed. 65 | virtual Handle* Lookup(const Slice& key) = 0; 66 | 67 | // Release a mapping returned by a previous Lookup(). 68 | // REQUIRES: handle must not have been released yet. 69 | // REQUIRES: handle must have been returned by a method on *this. 70 | virtual void Release(Handle* handle) = 0; 71 | 72 | // Return the value encapsulated in a handle returned by a 73 | // successful Lookup(). 74 | // REQUIRES: handle must not have been released yet. 75 | // REQUIRES: handle must have been returned by a method on *this. 76 | virtual void* Value(Handle* handle) = 0; 77 | 78 | // If the cache contains entry for key, erase it. Note that the 79 | // underlying entry will be kept around until all existing handles 80 | // to it have been released. 81 | virtual void Erase(const Slice& key) = 0; 82 | 83 | // Return a new numeric id. May be used by multiple clients who are 84 | // sharing the same cache to partition the key space. Typically the 85 | // client will allocate a new id at startup and prepend the id to 86 | // its cache keys. 87 | virtual uint64_t NewId() = 0; 88 | 89 | // Remove all cache entries that are not actively in use. Memory-constrained 90 | // applications may wish to call this method to reduce memory usage. 91 | // Default implementation of Prune() does nothing. Subclasses are strongly 92 | // encouraged to override the default implementation. A future release of 93 | // leveldb may change Prune() to a pure abstract method. 94 | virtual void Prune() {} 95 | 96 | // Return an estimate of the combined charges of all elements stored in the 97 | // cache. 98 | virtual size_t TotalCharge() const = 0; 99 | 100 | private: 101 | void LRU_Remove(Handle* e); 102 | void LRU_Append(Handle* e); 103 | void Unref(Handle* e); 104 | 105 | struct Rep; 106 | Rep* rep_; 107 | }; 108 | 109 | } // namespace leveldb 110 | 111 | #endif // STORAGE_LEVELDB_INCLUDE_CACHE_H_ 112 | -------------------------------------------------------------------------------- /third/leveldb/comparator.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | 5 | #ifndef STORAGE_LEVELDB_INCLUDE_COMPARATOR_H_ 6 | #define STORAGE_LEVELDB_INCLUDE_COMPARATOR_H_ 7 | 8 | #include 9 | 10 | #include "leveldb/export.h" 11 | 12 | namespace leveldb { 13 | 14 | class Slice; 15 | 16 | // A Comparator object provides a total order across slices that are 17 | // used as keys in an sstable or a database. A Comparator implementation 18 | // must be thread-safe since leveldb may invoke its methods concurrently 19 | // from multiple threads. 20 | class LEVELDB_EXPORT Comparator { 21 | public: 22 | virtual ~Comparator(); 23 | 24 | // Three-way comparison. Returns value: 25 | // < 0 iff "a" < "b", 26 | // == 0 iff "a" == "b", 27 | // > 0 iff "a" > "b" 28 | virtual int Compare(const Slice& a, const Slice& b) const = 0; 29 | 30 | // The name of the comparator. Used to check for comparator 31 | // mismatches (i.e., a DB created with one comparator is 32 | // accessed using a different comparator. 33 | // 34 | // The client of this package should switch to a new name whenever 35 | // the comparator implementation changes in a way that will cause 36 | // the relative ordering of any two keys to change. 37 | // 38 | // Names starting with "leveldb." are reserved and should not be used 39 | // by any clients of this package. 40 | virtual const char* Name() const = 0; 41 | 42 | // Advanced functions: these are used to reduce the space requirements 43 | // for internal data structures like index blocks. 44 | 45 | // If *start < limit, changes *start to a short string in [start,limit). 46 | // Simple comparator implementations may return with *start unchanged, 47 | // i.e., an implementation of this method that does nothing is correct. 48 | virtual void FindShortestSeparator(std::string* start, 49 | const Slice& limit) const = 0; 50 | 51 | // Changes *key to a short string >= *key. 52 | // Simple comparator implementations may return with *key unchanged, 53 | // i.e., an implementation of this method that does nothing is correct. 54 | virtual void FindShortSuccessor(std::string* key) const = 0; 55 | }; 56 | 57 | // Return a builtin comparator that uses lexicographic byte-wise 58 | // ordering. The result remains the property of this module and 59 | // must not be deleted. 60 | LEVELDB_EXPORT const Comparator* BytewiseComparator(); 61 | 62 | } // namespace leveldb 63 | 64 | #endif // STORAGE_LEVELDB_INCLUDE_COMPARATOR_H_ 65 | -------------------------------------------------------------------------------- /third/leveldb/compressor.h: -------------------------------------------------------------------------------- 1 | #ifndef LEVELDB_MCPE_INCLUDE_COMPRESSOR_H_ 2 | #define LEVELDB_MCPE_INCLUDE_COMPRESSOR_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include "leveldb/export.h" 11 | 12 | namespace leveldb { 13 | class Slice; 14 | 15 | class LEVELDB_EXPORT Compressor 16 | { 17 | public: 18 | 19 | uint64_t inputBytes = 0, compressedBytes = 0; 20 | 21 | //an ID that has to be unique across the whole system 22 | const char uniqueCompressionID; 23 | 24 | virtual ~Compressor() = default; 25 | 26 | Compressor(char uniqueCompressionID) : 27 | uniqueCompressionID(uniqueCompressionID) { 28 | 29 | } 30 | 31 | double getAverageCompression() const 32 | { 33 | return inputBytes ? ((double)compressedBytes / (double)inputBytes) : 0; 34 | } 35 | 36 | void resetAverageCompressionStats() { 37 | inputBytes = compressedBytes = 0; 38 | } 39 | 40 | void compress(const char* input, size_t length, ::std::string& output) { 41 | 42 | compressImpl(input, length, output); 43 | 44 | inputBytes += length; 45 | compressedBytes += output.length(); 46 | } 47 | 48 | void compress(const std::string& in, std::string& out) { 49 | compress(in.data(), in.length(), out); 50 | } 51 | 52 | virtual void compressImpl(const char* input, size_t length, ::std::string& output) const = 0; 53 | 54 | virtual bool decompress(const char* input, size_t length, ::std::string &output) const = 0; 55 | 56 | bool decompress(const std::string& input, ::std::string& output) const { 57 | return decompress(input.data(), input.length(), output); 58 | } 59 | 60 | protected: 61 | private: 62 | }; 63 | } 64 | 65 | #endif 66 | -------------------------------------------------------------------------------- /third/leveldb/db.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | 5 | #ifndef STORAGE_LEVELDB_INCLUDE_DB_H_ 6 | #define STORAGE_LEVELDB_INCLUDE_DB_H_ 7 | 8 | #include 9 | #include 10 | 11 | #include "leveldb/export.h" 12 | #include "leveldb/iterator.h" 13 | #include "leveldb/options.h" 14 | 15 | namespace leveldb { 16 | 17 | // Update CMakeLists.txt if you change these 18 | static const int kMajorVersion = 1; 19 | static const int kMinorVersion = 23; 20 | 21 | struct Options; 22 | struct ReadOptions; 23 | struct WriteOptions; 24 | class WriteBatch; 25 | 26 | // Abstract handle to particular state of a DB. 27 | // A Snapshot is an immutable object and can therefore be safely 28 | // accessed from multiple threads without any external synchronization. 29 | class LEVELDB_EXPORT Snapshot { 30 | protected: 31 | virtual ~Snapshot(); 32 | }; 33 | 34 | // A range of keys 35 | struct LEVELDB_EXPORT Range { 36 | Range() = default; 37 | Range(const Slice& s, const Slice& l) : start(s), limit(l) {} 38 | 39 | Slice start; // Included in the range 40 | Slice limit; // Not included in the range 41 | }; 42 | 43 | // A DB is a persistent ordered map from keys to values. 44 | // A DB is safe for concurrent access from multiple threads without 45 | // any external synchronization. 46 | class LEVELDB_EXPORT DB { 47 | public: 48 | // Open the database with the specified "name". 49 | // Stores a pointer to a heap-allocated database in *dbptr and returns 50 | // OK on success. 51 | // Stores nullptr in *dbptr and returns a non-OK status on error. 52 | // Caller should delete *dbptr when it is no longer needed. 53 | static Status Open(const Options& options, const std::string& name, 54 | DB** dbptr); 55 | 56 | DB() = default; 57 | 58 | DB(const DB&) = delete; 59 | DB& operator=(const DB&) = delete; 60 | 61 | virtual ~DB(); 62 | 63 | // Set the database entry for "key" to "value". Returns OK on success, 64 | // and a non-OK status on error. 65 | // Note: consider setting options.sync = true. 66 | virtual Status Put(const WriteOptions& options, const Slice& key, 67 | const Slice& value) = 0; 68 | 69 | // Remove the database entry (if any) for "key". Returns OK on 70 | // success, and a non-OK status on error. It is not an error if "key" 71 | // did not exist in the database. 72 | // Note: consider setting options.sync = true. 73 | virtual Status Delete(const WriteOptions& options, const Slice& key) = 0; 74 | 75 | // Apply the specified updates to the database. 76 | // Returns OK on success, non-OK on failure. 77 | // Note: consider setting options.sync = true. 78 | virtual Status Write(const WriteOptions& options, WriteBatch* updates) = 0; 79 | 80 | // If the database contains an entry for "key" store the 81 | // corresponding value in *value and return OK. 82 | // 83 | // If there is no entry for "key" leave *value unchanged and return 84 | // a status for which Status::IsNotFound() returns true. 85 | // 86 | // May return some other Status on an error. 87 | virtual Status Get(const ReadOptions& options, const Slice& key, 88 | std::string* value) = 0; 89 | 90 | // Return a heap-allocated iterator over the contents of the database. 91 | // The result of NewIterator() is initially invalid (caller must 92 | // call one of the Seek methods on the iterator before using it). 93 | // 94 | // Caller should delete the iterator when it is no longer needed. 95 | // The returned iterator should be deleted before this db is deleted. 96 | virtual Iterator* NewIterator(const ReadOptions& options) = 0; 97 | 98 | // Return a handle to the current DB state. Iterators created with 99 | // this handle will all observe a stable snapshot of the current DB 100 | // state. The caller must call ReleaseSnapshot(result) when the 101 | // snapshot is no longer needed. 102 | virtual const Snapshot* GetSnapshot() = 0; 103 | 104 | // Release a previously acquired snapshot. The caller must not 105 | // use "snapshot" after this call. 106 | virtual void ReleaseSnapshot(const Snapshot* snapshot) = 0; 107 | 108 | // DB implementations can export properties about their state 109 | // via this method. If "property" is a valid property understood by this 110 | // DB implementation, fills "*value" with its current value and returns 111 | // true. Otherwise returns false. 112 | // 113 | // 114 | // Valid property names include: 115 | // 116 | // "leveldb.num-files-at-level" - return the number of files at level , 117 | // where is an ASCII representation of a level number (e.g. "0"). 118 | // "leveldb.stats" - returns a multi-line string that describes statistics 119 | // about the internal operation of the DB. 120 | // "leveldb.sstables" - returns a multi-line string that describes all 121 | // of the sstables that make up the db contents. 122 | // "leveldb.approximate-memory-usage" - returns the approximate number of 123 | // bytes of memory in use by the DB. 124 | virtual bool GetProperty(const Slice& property, std::string* value) = 0; 125 | 126 | // For each i in [0,n-1], store in "sizes[i]", the approximate 127 | // file system space used by keys in "[range[i].start .. range[i].limit)". 128 | // 129 | // Note that the returned sizes measure file system space usage, so 130 | // if the user data compresses by a factor of ten, the returned 131 | // sizes will be one-tenth the size of the corresponding user data size. 132 | // 133 | // The results may not include the sizes of recently written data. 134 | virtual void GetApproximateSizes(const Range* range, int n, 135 | uint64_t* sizes) = 0; 136 | 137 | // Compact the underlying storage for the key range [*begin,*end]. 138 | // In particular, deleted and overwritten versions are discarded, 139 | // and the data is rearranged to reduce the cost of operations 140 | // needed to access the data. This operation should typically only 141 | // be invoked by users who understand the underlying implementation. 142 | // 143 | // begin==nullptr is treated as a key before all keys in the database. 144 | // end==nullptr is treated as a key after all keys in the database. 145 | // Therefore the following call will compact the entire database: 146 | // db->CompactRange(nullptr, nullptr); 147 | virtual void CompactRange(const Slice* begin, const Slice* end) = 0; 148 | }; 149 | 150 | // Destroy the contents of the specified database. 151 | // Be very careful using this method. 152 | // 153 | // Note: For backwards compatibility, if DestroyDB is unable to list the 154 | // database files, Status::OK() will still be returned masking this failure. 155 | LEVELDB_EXPORT Status DestroyDB(const std::string& name, 156 | const Options& options); 157 | 158 | // If a DB cannot be opened, you may attempt to call this method to 159 | // resurrect as much of the contents of the database as possible. 160 | // Some data may be lost, so be careful when calling this function 161 | // on a database that contains important information. 162 | LEVELDB_EXPORT Status RepairDB(const std::string& dbname, 163 | const Options& options); 164 | 165 | } // namespace leveldb 166 | 167 | #endif // STORAGE_LEVELDB_INCLUDE_DB_H_ 168 | -------------------------------------------------------------------------------- /third/leveldb/decompress_allocator.h: -------------------------------------------------------------------------------- 1 | #ifndef LEVELDB_DECOMPRESS_ALLOCATOR_H_ 2 | #define LEVELDB_DECOMPRESS_ALLOCATOR_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "leveldb/export.h" 9 | 10 | namespace leveldb { 11 | class LEVELDB_EXPORT DecompressAllocator { 12 | public: 13 | virtual ~DecompressAllocator() = default; 14 | 15 | virtual std::string get(); 16 | virtual void release(std::string&& string); 17 | 18 | virtual void prune(); 19 | 20 | protected: 21 | std::mutex mutex; 22 | std::vector stack; 23 | }; 24 | } 25 | 26 | #endif -------------------------------------------------------------------------------- /third/leveldb/dumpfile.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2014 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | 5 | #ifndef STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_ 6 | #define STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_ 7 | 8 | #include 9 | 10 | #include "leveldb/env.h" 11 | #include "leveldb/export.h" 12 | #include "leveldb/status.h" 13 | 14 | namespace leveldb { 15 | 16 | // Dump the contents of the file named by fname in text format to 17 | // *dst. Makes a sequence of dst->Append() calls; each call is passed 18 | // the newline-terminated text corresponding to a single item found 19 | // in the file. 20 | // 21 | // Returns a non-OK result if fname does not name a leveldb storage 22 | // file, or if the file cannot be read. 23 | LEVELDB_EXPORT Status DumpFile(Env* env, const std::string& fname, 24 | WritableFile* dst); 25 | 26 | } // namespace leveldb 27 | 28 | #endif // STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_ 29 | -------------------------------------------------------------------------------- /third/leveldb/export.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2017 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | 5 | #ifndef STORAGE_LEVELDB_INCLUDE_EXPORT_H_ 6 | #define STORAGE_LEVELDB_INCLUDE_EXPORT_H_ 7 | 8 | #if !defined(LEVELDB_EXPORT) 9 | 10 | #if defined(LEVELDB_SHARED_LIBRARY) 11 | #if defined(_WIN32) 12 | 13 | #if defined(LEVELDB_COMPILE_LIBRARY) 14 | #define LEVELDB_EXPORT __declspec(dllexport) 15 | #else 16 | #define LEVELDB_EXPORT __declspec(dllimport) 17 | #endif // defined(LEVELDB_COMPILE_LIBRARY) 18 | 19 | #else // defined(_WIN32) 20 | #if defined(LEVELDB_COMPILE_LIBRARY) 21 | #define LEVELDB_EXPORT __attribute__((visibility("default"))) 22 | #else 23 | #define LEVELDB_EXPORT 24 | #endif 25 | #endif // defined(_WIN32) 26 | 27 | #else // defined(LEVELDB_SHARED_LIBRARY) 28 | #define LEVELDB_EXPORT 29 | #endif 30 | 31 | #endif // !defined(LEVELDB_EXPORT) 32 | 33 | #endif // STORAGE_LEVELDB_INCLUDE_EXPORT_H_ 34 | -------------------------------------------------------------------------------- /third/leveldb/filter_policy.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2012 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | // 5 | // A database can be configured with a custom FilterPolicy object. 6 | // This object is responsible for creating a small filter from a set 7 | // of keys. These filters are stored in leveldb and are consulted 8 | // automatically by leveldb to decide whether or not to read some 9 | // information from disk. In many cases, a filter can cut down the 10 | // number of disk seeks form a handful to a single disk seek per 11 | // DB::Get() call. 12 | // 13 | // Most people will want to use the builtin bloom filter support (see 14 | // NewBloomFilterPolicy() below). 15 | 16 | #ifndef STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_ 17 | #define STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_ 18 | 19 | #include 20 | 21 | #include "leveldb/export.h" 22 | 23 | namespace leveldb { 24 | 25 | class Slice; 26 | 27 | class LEVELDB_EXPORT FilterPolicy { 28 | public: 29 | virtual ~FilterPolicy(); 30 | 31 | // Return the name of this policy. Note that if the filter encoding 32 | // changes in an incompatible way, the name returned by this method 33 | // must be changed. Otherwise, old incompatible filters may be 34 | // passed to methods of this type. 35 | virtual const char* Name() const = 0; 36 | 37 | // keys[0,n-1] contains a list of keys (potentially with duplicates) 38 | // that are ordered according to the user supplied comparator. 39 | // Append a filter that summarizes keys[0,n-1] to *dst. 40 | // 41 | // Warning: do not change the initial contents of *dst. Instead, 42 | // append the newly constructed filter to *dst. 43 | virtual void CreateFilter(const Slice* keys, int n, 44 | std::string* dst) const = 0; 45 | 46 | // "filter" contains the data appended by a preceding call to 47 | // CreateFilter() on this class. This method must return true if 48 | // the key was in the list of keys passed to CreateFilter(). 49 | // This method may return true or false if the key was not on the 50 | // list, but it should aim to return false with a high probability. 51 | virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const = 0; 52 | }; 53 | 54 | // Return a new filter policy that uses a bloom filter with approximately 55 | // the specified number of bits per key. A good value for bits_per_key 56 | // is 10, which yields a filter with ~ 1% false positive rate. 57 | // 58 | // Callers must delete the result after any database that is using the 59 | // result has been closed. 60 | // 61 | // Note: if you are using a custom comparator that ignores some parts 62 | // of the keys being compared, you must not use NewBloomFilterPolicy() 63 | // and must provide your own FilterPolicy that also ignores the 64 | // corresponding parts of the keys. For example, if the comparator 65 | // ignores trailing spaces, it would be incorrect to use a 66 | // FilterPolicy (like NewBloomFilterPolicy) that does not ignore 67 | // trailing spaces in keys. 68 | LEVELDB_EXPORT const FilterPolicy* NewBloomFilterPolicy(int bits_per_key); 69 | 70 | } // namespace leveldb 71 | 72 | #endif // STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_ 73 | -------------------------------------------------------------------------------- /third/leveldb/iterator.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | // 5 | // An iterator yields a sequence of key/value pairs from a source. 6 | // The following class defines the interface. Multiple implementations 7 | // are provided by this library. In particular, iterators are provided 8 | // to access the contents of a Table or a DB. 9 | // 10 | // Multiple threads can invoke const methods on an Iterator without 11 | // external synchronization, but if any of the threads may call a 12 | // non-const method, all threads accessing the same Iterator must use 13 | // external synchronization. 14 | 15 | #ifndef STORAGE_LEVELDB_INCLUDE_ITERATOR_H_ 16 | #define STORAGE_LEVELDB_INCLUDE_ITERATOR_H_ 17 | 18 | #include "leveldb/export.h" 19 | #include "leveldb/slice.h" 20 | #include "leveldb/status.h" 21 | 22 | namespace leveldb { 23 | 24 | class LEVELDB_EXPORT Iterator { 25 | public: 26 | Iterator(); 27 | 28 | Iterator(const Iterator&) = delete; 29 | Iterator& operator=(const Iterator&) = delete; 30 | 31 | virtual ~Iterator(); 32 | 33 | // An iterator is either positioned at a key/value pair, or 34 | // not valid. This method returns true iff the iterator is valid. 35 | virtual bool Valid() const = 0; 36 | 37 | // Position at the first key in the source. The iterator is Valid() 38 | // after this call iff the source is not empty. 39 | virtual void SeekToFirst() = 0; 40 | 41 | // Position at the last key in the source. The iterator is 42 | // Valid() after this call iff the source is not empty. 43 | virtual void SeekToLast() = 0; 44 | 45 | // Position at the first key in the source that is at or past target. 46 | // The iterator is Valid() after this call iff the source contains 47 | // an entry that comes at or past target. 48 | virtual void Seek(const Slice& target) = 0; 49 | 50 | // Moves to the next entry in the source. After this call, Valid() is 51 | // true iff the iterator was not positioned at the last entry in the source. 52 | // REQUIRES: Valid() 53 | virtual void Next() = 0; 54 | 55 | // Moves to the previous entry in the source. After this call, Valid() is 56 | // true iff the iterator was not positioned at the first entry in source. 57 | // REQUIRES: Valid() 58 | virtual void Prev() = 0; 59 | 60 | // Return the key for the current entry. The underlying storage for 61 | // the returned slice is valid only until the next modification of 62 | // the iterator. 63 | // REQUIRES: Valid() 64 | virtual Slice key() const = 0; 65 | 66 | // Return the value for the current entry. The underlying storage for 67 | // the returned slice is valid only until the next modification of 68 | // the iterator. 69 | // REQUIRES: Valid() 70 | virtual Slice value() const = 0; 71 | 72 | // If an error has occurred, return it. Else return an ok status. 73 | virtual Status status() const = 0; 74 | 75 | // Clients are allowed to register function/arg1/arg2 triples that 76 | // will be invoked when this iterator is destroyed. 77 | // 78 | // Note that unlike all of the preceding methods, this method is 79 | // not abstract and therefore clients should not override it. 80 | using CleanupFunction = void (*)(void* arg1, void* arg2); 81 | void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2); 82 | 83 | private: 84 | // Cleanup functions are stored in a single-linked list. 85 | // The list's head node is inlined in the iterator. 86 | struct CleanupNode { 87 | // True if the node is not used. Only head nodes might be unused. 88 | bool IsEmpty() const { return function == nullptr; } 89 | // Invokes the cleanup function. 90 | void Run() { 91 | assert(function != nullptr); 92 | (*function)(arg1, arg2); 93 | } 94 | 95 | // The head node is used if the function pointer is not null. 96 | CleanupFunction function; 97 | void* arg1; 98 | void* arg2; 99 | CleanupNode* next; 100 | }; 101 | CleanupNode cleanup_head_; 102 | }; 103 | 104 | // Return an empty iterator (yields nothing). 105 | LEVELDB_EXPORT Iterator* NewEmptyIterator(); 106 | 107 | // Return an empty iterator with the specified status. 108 | LEVELDB_EXPORT Iterator* NewErrorIterator(const Status& status); 109 | 110 | } // namespace leveldb 111 | 112 | #endif // STORAGE_LEVELDB_INCLUDE_ITERATOR_H_ 113 | -------------------------------------------------------------------------------- /third/leveldb/options.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | 5 | #ifndef STORAGE_LEVELDB_INCLUDE_OPTIONS_H_ 6 | #define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_ 7 | 8 | #include 9 | 10 | #include "leveldb/export.h" 11 | #include "leveldb/compressor.h" 12 | 13 | namespace leveldb { 14 | 15 | class Cache; 16 | class Comparator; 17 | class Env; 18 | class FilterPolicy; 19 | class Logger; 20 | class Snapshot; 21 | class Compressor; 22 | class DecompressAllocator; 23 | 24 | // Options to control the behavior of a database (passed to DB::Open) 25 | struct LEVELDB_EXPORT Options { 26 | // Create an Options object with default values for all fields. 27 | Options(); 28 | 29 | // ------------------- 30 | // Parameters that affect behavior 31 | 32 | // Comparator used to define the order of keys in the table. 33 | // Default: a comparator that uses lexicographic byte-wise ordering 34 | // 35 | // REQUIRES: The client must ensure that the comparator supplied 36 | // here has the same name and orders keys *exactly* the same as the 37 | // comparator provided to previous open calls on the same DB. 38 | const Comparator* comparator; 39 | 40 | // If true, the database will be created if it is missing. 41 | bool create_if_missing = false; 42 | 43 | // If true, an error is raised if the database already exists. 44 | bool error_if_exists = false; 45 | 46 | // If true, the implementation will do aggressive checking of the 47 | // data it is processing and will stop early if it detects any 48 | // errors. This may have unforeseen ramifications: for example, a 49 | // corruption of one DB entry may cause a large number of entries to 50 | // become unreadable or for the entire DB to become unopenable. 51 | bool paranoid_checks = false; 52 | 53 | // Use the specified object to interact with the environment, 54 | // e.g. to read/write files, schedule background work, etc. 55 | // Default: Env::Default() 56 | Env* env; 57 | 58 | // Any internal progress/error information generated by the db will 59 | // be written to info_log if it is non-null, or to a file stored 60 | // in the same directory as the DB contents if info_log is null. 61 | Logger* info_log = nullptr; 62 | 63 | // ------------------- 64 | // Parameters that affect performance 65 | 66 | // Amount of data to build up in memory (backed by an unsorted log 67 | // on disk) before converting to a sorted on-disk file. 68 | // 69 | // Larger values increase performance, especially during bulk loads. 70 | // Up to two write buffers may be held in memory at the same time, 71 | // so you may wish to adjust this parameter to control memory usage. 72 | // Also, a larger write buffer will result in a longer recovery time 73 | // the next time the database is opened. 74 | size_t write_buffer_size = 4 * 1024 * 1024; 75 | 76 | // Number of open files that can be used by the DB. You may need to 77 | // increase this if your database has a large working set (budget 78 | // one open file per 2MB of working set). 79 | int max_open_files = 1000; 80 | 81 | // Control over blocks (user data is stored in a set of blocks, and 82 | // a block is the unit of reading from disk). 83 | 84 | // If non-null, use the specified cache for blocks. 85 | // If null, leveldb will automatically create and use an 8MB internal cache. 86 | Cache* block_cache = nullptr; 87 | 88 | // Approximate size of user data packed per block. Note that the 89 | // block size specified here corresponds to uncompressed data. The 90 | // actual size of the unit read from disk may be smaller if 91 | // compression is enabled. This parameter can be changed dynamically. 92 | size_t block_size = 4 * 1024; 93 | 94 | // Number of keys between restart points for delta encoding of keys. 95 | // This parameter can be changed dynamically. Most clients should 96 | // leave this parameter alone. 97 | int block_restart_interval = 16; 98 | 99 | // Leveldb will write up to this amount of bytes to a file before 100 | // switching to a new one. 101 | // Most clients should leave this parameter alone. However if your 102 | // filesystem is more efficient with larger files, you could 103 | // consider increasing the value. The downside will be longer 104 | // compactions and hence longer latency/performance hiccups. 105 | // Another reason to increase this parameter might be when you are 106 | // initially populating a large database. 107 | size_t max_file_size = 2 * 1024 * 1024; 108 | 109 | // Compress blocks using the specified compression algorithm. This 110 | // parameter can be changed dynamically. 111 | // 112 | // Default: kSnappyCompression, which gives lightweight but fast 113 | // compression. 114 | // 115 | // Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz: 116 | // ~200-500MB/s compression 117 | // ~400-800MB/s decompression 118 | // Note that these speeds are significantly faster than most 119 | // persistent storage speeds, and therefore it is typically never 120 | // worth switching to kNoCompression. Even if the input data is 121 | // incompressible, the kSnappyCompression implementation will 122 | // efficiently detect that and will switch to uncompressed mode. 123 | Compressor* compressors[256]; 124 | 125 | // EXPERIMENTAL: If true, append to existing MANIFEST and log files 126 | // when a database is opened. This can significantly speed up open. 127 | // 128 | // Default: currently false, but may become true later. 129 | bool reuse_logs = false; 130 | 131 | // If non-null, use the specified filter policy to reduce disk reads. 132 | // Many applications will benefit from passing the result of 133 | // NewBloomFilterPolicy() here. 134 | const FilterPolicy* filter_policy = nullptr; 135 | }; 136 | 137 | // Options that control read operations 138 | struct LEVELDB_EXPORT ReadOptions { 139 | ReadOptions() = default; 140 | 141 | // If true, all data read from underlying storage will be 142 | // verified against corresponding checksums. 143 | bool verify_checksums = false; 144 | 145 | // Should the data read for this iteration be cached in memory? 146 | // Callers may wish to set this field to false for bulk scans. 147 | bool fill_cache = true; 148 | 149 | // If "snapshot" is non-null, read as of the supplied snapshot 150 | // (which must belong to the DB that is being read and which must 151 | // not have been released). If "snapshot" is null, use an implicit 152 | // snapshot of the state at the beginning of this read operation. 153 | const Snapshot* snapshot = nullptr; 154 | DecompressAllocator* decompress_allocator = nullptr; 155 | }; 156 | 157 | // Options that control write operations 158 | struct LEVELDB_EXPORT WriteOptions { 159 | WriteOptions() = default; 160 | 161 | // If true, the write will be flushed from the operating system 162 | // buffer cache (by calling WritableFile::Sync()) before the write 163 | // is considered complete. If this flag is true, writes will be 164 | // slower. 165 | // 166 | // If this flag is false, and the machine crashes, some recent 167 | // writes may be lost. Note that if it is just the process that 168 | // crashes (i.e., the machine does not reboot), no writes will be 169 | // lost even if sync==false. 170 | // 171 | // In other words, a DB write with sync==false has similar 172 | // crash semantics as the "write()" system call. A DB write 173 | // with sync==true has similar crash semantics to a "write()" 174 | // system call followed by "fsync()". 175 | bool sync = false; 176 | }; 177 | 178 | } // namespace leveldb 179 | 180 | #endif // STORAGE_LEVELDB_INCLUDE_OPTIONS_H_ 181 | -------------------------------------------------------------------------------- /third/leveldb/slice.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | // 5 | // Slice is a simple structure containing a pointer into some external 6 | // storage and a size. The user of a Slice must ensure that the slice 7 | // is not used after the corresponding external storage has been 8 | // deallocated. 9 | // 10 | // Multiple threads can invoke const methods on a Slice without 11 | // external synchronization, but if any of the threads may call a 12 | // non-const method, all threads accessing the same Slice must use 13 | // external synchronization. 14 | 15 | #ifndef STORAGE_LEVELDB_INCLUDE_SLICE_H_ 16 | #define STORAGE_LEVELDB_INCLUDE_SLICE_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | #include "leveldb/export.h" 24 | 25 | namespace leveldb { 26 | 27 | class LEVELDB_EXPORT Slice { 28 | public: 29 | // Create an empty slice. 30 | Slice() : data_(""), size_(0) {} 31 | 32 | // Create a slice that refers to d[0,n-1]. 33 | Slice(const char* d, size_t n) : data_(d), size_(n) {} 34 | 35 | // Create a slice that refers to the contents of "s" 36 | Slice(const std::string& s) : data_(s.data()), size_(s.size()) {} 37 | 38 | // Create a slice that refers to s[0,strlen(s)-1] 39 | Slice(const char* s) : data_(s), size_(strlen(s)) {} 40 | 41 | // Intentionally copyable. 42 | Slice(const Slice&) = default; 43 | Slice& operator=(const Slice&) = default; 44 | 45 | // Return a pointer to the beginning of the referenced data 46 | const char* data() const { return data_; } 47 | 48 | // Return the length (in bytes) of the referenced data 49 | size_t size() const { return size_; } 50 | 51 | // Return true iff the length of the referenced data is zero 52 | bool empty() const { return size_ == 0; } 53 | 54 | // Return the ith byte in the referenced data. 55 | // REQUIRES: n < size() 56 | char operator[](size_t n) const { 57 | assert(n < size()); 58 | return data_[n]; 59 | } 60 | 61 | // Change this slice to refer to an empty array 62 | void clear() { 63 | data_ = ""; 64 | size_ = 0; 65 | } 66 | 67 | // Drop the first "n" bytes from this slice. 68 | void remove_prefix(size_t n) { 69 | assert(n <= size()); 70 | data_ += n; 71 | size_ -= n; 72 | } 73 | 74 | // Return a string that contains the copy of the referenced data. 75 | std::string ToString() const { return std::string(data_, size_); } 76 | 77 | // Three-way comparison. Returns value: 78 | // < 0 iff "*this" < "b", 79 | // == 0 iff "*this" == "b", 80 | // > 0 iff "*this" > "b" 81 | int compare(const Slice& b) const; 82 | 83 | // Return true iff "x" is a prefix of "*this" 84 | bool starts_with(const Slice& x) const { 85 | return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0)); 86 | } 87 | 88 | private: 89 | const char* data_; 90 | size_t size_; 91 | }; 92 | 93 | inline bool operator==(const Slice& x, const Slice& y) { 94 | return ((x.size() == y.size()) && 95 | (memcmp(x.data(), y.data(), x.size()) == 0)); 96 | } 97 | 98 | inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); } 99 | 100 | inline int Slice::compare(const Slice& b) const { 101 | const size_t min_len = (size_ < b.size_) ? size_ : b.size_; 102 | int r = memcmp(data_, b.data_, min_len); 103 | if (r == 0) { 104 | if (size_ < b.size_) 105 | r = -1; 106 | else if (size_ > b.size_) 107 | r = +1; 108 | } 109 | return r; 110 | } 111 | 112 | } // namespace leveldb 113 | 114 | #endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_ 115 | -------------------------------------------------------------------------------- /third/leveldb/snappy_compressor.h: -------------------------------------------------------------------------------- 1 | #ifndef LEVELDB_MCPE_INCLUDE_SNAPPY_COMPRESSOR_H_ 2 | #define LEVELDB_MCPE_INCLUDE_SNAPPY_COMPRESSOR_H_ 3 | 4 | #include "leveldb/compressor.h" 5 | 6 | namespace leveldb { 7 | class LEVELDB_EXPORT SnappyCompressor : public Compressor 8 | { 9 | public: 10 | static const char SERIALIZE_ID = 1; 11 | 12 | virtual ~SnappyCompressor() = default; 13 | 14 | SnappyCompressor() : 15 | Compressor(SERIALIZE_ID) { 16 | } 17 | 18 | virtual void compressImpl(const char* input, size_t length, ::std::string& output) const override; 19 | 20 | virtual bool decompress(const char* input, size_t length, ::std::string& output) const override; 21 | }; 22 | } 23 | 24 | #endif -------------------------------------------------------------------------------- /third/leveldb/status.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | // 5 | // A Status encapsulates the result of an operation. It may indicate success, 6 | // or it may indicate an error with an associated error message. 7 | // 8 | // Multiple threads can invoke const methods on a Status without 9 | // external synchronization, but if any of the threads may call a 10 | // non-const method, all threads accessing the same Status must use 11 | // external synchronization. 12 | 13 | #ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_ 14 | #define STORAGE_LEVELDB_INCLUDE_STATUS_H_ 15 | 16 | #include 17 | #include 18 | 19 | #include "leveldb/export.h" 20 | #include "leveldb/slice.h" 21 | 22 | namespace leveldb { 23 | 24 | class LEVELDB_EXPORT Status { 25 | public: 26 | // Create a success status. 27 | Status() noexcept : state_(nullptr) {} 28 | ~Status() { delete[] state_; } 29 | 30 | Status(const Status& rhs); 31 | Status& operator=(const Status& rhs); 32 | 33 | Status(Status&& rhs) noexcept : state_(rhs.state_) { rhs.state_ = nullptr; } 34 | Status& operator=(Status&& rhs) noexcept; 35 | 36 | // Return a success status. 37 | static Status OK() { return Status(); } 38 | 39 | // Return error status of an appropriate type. 40 | static Status NotFound(const Slice& msg, const Slice& msg2 = Slice()) { 41 | return Status(kNotFound, msg, msg2); 42 | } 43 | static Status Corruption(const Slice& msg, const Slice& msg2 = Slice()) { 44 | return Status(kCorruption, msg, msg2); 45 | } 46 | static Status NotSupported(const Slice& msg, const Slice& msg2 = Slice()) { 47 | return Status(kNotSupported, msg, msg2); 48 | } 49 | static Status InvalidArgument(const Slice& msg, const Slice& msg2 = Slice()) { 50 | return Status(kInvalidArgument, msg, msg2); 51 | } 52 | static Status IOError(const Slice& msg, const Slice& msg2 = Slice()) { 53 | return Status(kIOError, msg, msg2); 54 | } 55 | 56 | // Returns true iff the status indicates success. 57 | bool ok() const { return (state_ == nullptr); } 58 | 59 | // Returns true iff the status indicates a NotFound error. 60 | bool IsNotFound() const { return code() == kNotFound; } 61 | 62 | // Returns true iff the status indicates a Corruption error. 63 | bool IsCorruption() const { return code() == kCorruption; } 64 | 65 | // Returns true iff the status indicates an IOError. 66 | bool IsIOError() const { return code() == kIOError; } 67 | 68 | // Returns true iff the status indicates a NotSupportedError. 69 | bool IsNotSupportedError() const { return code() == kNotSupported; } 70 | 71 | // Returns true iff the status indicates an InvalidArgument. 72 | bool IsInvalidArgument() const { return code() == kInvalidArgument; } 73 | 74 | // Return a string representation of this status suitable for printing. 75 | // Returns the string "OK" for success. 76 | std::string ToString() const; 77 | 78 | private: 79 | enum Code { 80 | kOk = 0, 81 | kNotFound = 1, 82 | kCorruption = 2, 83 | kNotSupported = 3, 84 | kInvalidArgument = 4, 85 | kIOError = 5 86 | }; 87 | 88 | Code code() const { 89 | return (state_ == nullptr) ? kOk : static_cast(state_[4]); 90 | } 91 | 92 | Status(Code code, const Slice& msg, const Slice& msg2); 93 | static const char* CopyState(const char* s); 94 | 95 | // OK status has a null state_. Otherwise, state_ is a new[] array 96 | // of the following form: 97 | // state_[0..3] == length of message 98 | // state_[4] == code 99 | // state_[5..] == message 100 | const char* state_; 101 | }; 102 | 103 | inline Status::Status(const Status& rhs) { 104 | state_ = (rhs.state_ == nullptr) ? nullptr : CopyState(rhs.state_); 105 | } 106 | inline Status& Status::operator=(const Status& rhs) { 107 | // The following condition catches both aliasing (when this == &rhs), 108 | // and the common case where both rhs and *this are ok. 109 | if (state_ != rhs.state_) { 110 | delete[] state_; 111 | state_ = (rhs.state_ == nullptr) ? nullptr : CopyState(rhs.state_); 112 | } 113 | return *this; 114 | } 115 | inline Status& Status::operator=(Status&& rhs) noexcept { 116 | std::swap(state_, rhs.state_); 117 | return *this; 118 | } 119 | 120 | } // namespace leveldb 121 | 122 | #endif // STORAGE_LEVELDB_INCLUDE_STATUS_H_ 123 | -------------------------------------------------------------------------------- /third/leveldb/table.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | 5 | #ifndef STORAGE_LEVELDB_INCLUDE_TABLE_H_ 6 | #define STORAGE_LEVELDB_INCLUDE_TABLE_H_ 7 | 8 | #include 9 | 10 | #include "leveldb/export.h" 11 | #include "leveldb/iterator.h" 12 | 13 | namespace leveldb { 14 | 15 | class Block; 16 | class BlockHandle; 17 | class Footer; 18 | struct Options; 19 | class RandomAccessFile; 20 | struct ReadOptions; 21 | class TableCache; 22 | 23 | // A Table is a sorted map from strings to strings. Tables are 24 | // immutable and persistent. A Table may be safely accessed from 25 | // multiple threads without external synchronization. 26 | class LEVELDB_EXPORT Table { 27 | public: 28 | // Attempt to open the table that is stored in bytes [0..file_size) 29 | // of "file", and read the metadata entries necessary to allow 30 | // retrieving data from the table. 31 | // 32 | // If successful, returns ok and sets "*table" to the newly opened 33 | // table. The client should delete "*table" when no longer needed. 34 | // If there was an error while initializing the table, sets "*table" 35 | // to nullptr and returns a non-ok status. Does not take ownership of 36 | // "*source", but the client must ensure that "source" remains live 37 | // for the duration of the returned table's lifetime. 38 | // 39 | // *file must remain live while this Table is in use. 40 | static Status Open(const Options& options, RandomAccessFile* file, 41 | uint64_t file_size, Table** table); 42 | 43 | Table(const Table&) = delete; 44 | Table& operator=(const Table&) = delete; 45 | 46 | ~Table(); 47 | 48 | // Returns a new iterator over the table contents. 49 | // The result of NewIterator() is initially invalid (caller must 50 | // call one of the Seek methods on the iterator before using it). 51 | Iterator* NewIterator(const ReadOptions&) const; 52 | 53 | // Given a key, return an approximate byte offset in the file where 54 | // the data for that key begins (or would begin if the key were 55 | // present in the file). The returned value is in terms of file 56 | // bytes, and so includes effects like compression of the underlying data. 57 | // E.g., the approximate offset of the last key in the table will 58 | // be close to the file length. 59 | uint64_t ApproximateOffsetOf(const Slice& key) const; 60 | 61 | private: 62 | friend class TableCache; 63 | struct Rep; 64 | 65 | static Iterator* BlockReader(void*, const ReadOptions&, const Slice&); 66 | 67 | explicit Table(Rep* rep) : rep_(rep) {} 68 | 69 | // Calls (*handle_result)(arg, ...) with the entry found after a call 70 | // to Seek(key). May not make such a call if filter policy says 71 | // that key is not present. 72 | Status InternalGet(const ReadOptions&, const Slice& key, void* arg, 73 | void (*handle_result)(void* arg, const Slice& k, 74 | const Slice& v)); 75 | 76 | void ReadMeta(const Footer& footer); 77 | void ReadFilter(const Slice& filter_handle_value); 78 | 79 | Rep* const rep_; 80 | }; 81 | 82 | } // namespace leveldb 83 | 84 | #endif // STORAGE_LEVELDB_INCLUDE_TABLE_H_ 85 | -------------------------------------------------------------------------------- /third/leveldb/table_builder.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | // 5 | // TableBuilder provides the interface used to build a Table 6 | // (an immutable and sorted map from keys to values). 7 | // 8 | // Multiple threads can invoke const methods on a TableBuilder without 9 | // external synchronization, but if any of the threads may call a 10 | // non-const method, all threads accessing the same TableBuilder must use 11 | // external synchronization. 12 | 13 | #ifndef STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ 14 | #define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ 15 | 16 | #include 17 | 18 | #include "leveldb/export.h" 19 | #include "leveldb/options.h" 20 | #include "leveldb/status.h" 21 | #include "leveldb/compressor.h" 22 | 23 | namespace leveldb { 24 | 25 | class BlockBuilder; 26 | class BlockHandle; 27 | class WritableFile; 28 | 29 | class LEVELDB_EXPORT TableBuilder { 30 | public: 31 | // Create a builder that will store the contents of the table it is 32 | // building in *file. Does not close the file. It is up to the 33 | // caller to close the file after calling Finish(). 34 | TableBuilder(const Options& options, WritableFile* file); 35 | 36 | TableBuilder(const TableBuilder&) = delete; 37 | TableBuilder& operator=(const TableBuilder&) = delete; 38 | 39 | // REQUIRES: Either Finish() or Abandon() has been called. 40 | ~TableBuilder(); 41 | 42 | // Change the options used by this builder. Note: only some of the 43 | // option fields can be changed after construction. If a field is 44 | // not allowed to change dynamically and its value in the structure 45 | // passed to the constructor is different from its value in the 46 | // structure passed to this method, this method will return an error 47 | // without changing any fields. 48 | Status ChangeOptions(const Options& options); 49 | 50 | // Add key,value to the table being constructed. 51 | // REQUIRES: key is after any previously added key according to comparator. 52 | // REQUIRES: Finish(), Abandon() have not been called 53 | void Add(const Slice& key, const Slice& value); 54 | 55 | // Advanced operation: flush any buffered key/value pairs to file. 56 | // Can be used to ensure that two adjacent entries never live in 57 | // the same data block. Most clients should not need to use this method. 58 | // REQUIRES: Finish(), Abandon() have not been called 59 | void Flush(); 60 | 61 | // Return non-ok iff some error has been detected. 62 | Status status() const; 63 | 64 | // Finish building the table. Stops using the file passed to the 65 | // constructor after this function returns. 66 | // REQUIRES: Finish(), Abandon() have not been called 67 | Status Finish(); 68 | 69 | // Indicate that the contents of this builder should be abandoned. Stops 70 | // using the file passed to the constructor after this function returns. 71 | // If the caller is not going to call Finish(), it must call Abandon() 72 | // before destroying this builder. 73 | // REQUIRES: Finish(), Abandon() have not been called 74 | void Abandon(); 75 | 76 | // Number of calls to Add() so far. 77 | uint64_t NumEntries() const; 78 | 79 | // Size of the file generated so far. If invoked after a successful 80 | // Finish() call, returns the size of the final generated file. 81 | uint64_t FileSize() const; 82 | 83 | private: 84 | bool ok() const { return status().ok(); } 85 | void WriteBlock(BlockBuilder* block, BlockHandle* handle); 86 | void WriteRawBlock(const Slice& data, Compressor* compressor, BlockHandle* handle); 87 | 88 | struct Rep; 89 | Rep* rep_; 90 | }; 91 | 92 | } // namespace leveldb 93 | 94 | #endif // STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ 95 | -------------------------------------------------------------------------------- /third/leveldb/write_batch.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | // 5 | // WriteBatch holds a collection of updates to apply atomically to a DB. 6 | // 7 | // The updates are applied in the order in which they are added 8 | // to the WriteBatch. For example, the value of "key" will be "v3" 9 | // after the following batch is written: 10 | // 11 | // batch.Put("key", "v1"); 12 | // batch.Delete("key"); 13 | // batch.Put("key", "v2"); 14 | // batch.Put("key", "v3"); 15 | // 16 | // Multiple threads can invoke const methods on a WriteBatch without 17 | // external synchronization, but if any of the threads may call a 18 | // non-const method, all threads accessing the same WriteBatch must use 19 | // external synchronization. 20 | 21 | #ifndef STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_ 22 | #define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_ 23 | 24 | #include 25 | 26 | #include "leveldb/export.h" 27 | #include "leveldb/status.h" 28 | 29 | namespace leveldb { 30 | 31 | class Slice; 32 | 33 | class LEVELDB_EXPORT WriteBatch { 34 | public: 35 | class LEVELDB_EXPORT Handler { 36 | public: 37 | virtual ~Handler(); 38 | virtual void Put(const Slice& key, const Slice& value) = 0; 39 | virtual void Delete(const Slice& key) = 0; 40 | }; 41 | 42 | WriteBatch(); 43 | 44 | // Intentionally copyable. 45 | WriteBatch(const WriteBatch&) = default; 46 | WriteBatch& operator=(const WriteBatch&) = default; 47 | 48 | ~WriteBatch(); 49 | 50 | // Store the mapping "key->value" in the database. 51 | void Put(const Slice& key, const Slice& value); 52 | 53 | // If the database contains a mapping for "key", erase it. Else do nothing. 54 | void Delete(const Slice& key); 55 | 56 | // Clear all updates buffered in this batch. 57 | void Clear(); 58 | 59 | // The size of the database changes caused by this batch. 60 | // 61 | // This number is tied to implementation details, and may change across 62 | // releases. It is intended for LevelDB usage metrics. 63 | size_t ApproximateSize() const; 64 | 65 | // Copies the operations in "source" to this batch. 66 | // 67 | // This runs in O(source size) time. However, the constant factor is better 68 | // than calling Iterate() over the source batch with a Handler that replicates 69 | // the operations into this batch. 70 | void Append(const WriteBatch& source); 71 | 72 | // Support for iterating over the contents of a batch. 73 | Status Iterate(Handler* handler) const; 74 | 75 | private: 76 | friend class WriteBatchInternal; 77 | 78 | std::string rep_; // See comment in write_batch.cc for the format of rep_ 79 | }; 80 | 81 | } // namespace leveldb 82 | 83 | #endif // STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_ 84 | -------------------------------------------------------------------------------- /third/leveldb/zlib_compressor.h: -------------------------------------------------------------------------------- 1 | #ifndef LEVELDB_MCPE_INCLUDE_ZLIB_COMPRESSOR_H_ 2 | #define LEVELDB_MCPE_INCLUDE_ZLIB_COMPRESSOR_H_ 3 | 4 | #include "leveldb/compressor.h" 5 | 6 | namespace leveldb { 7 | 8 | class LEVELDB_EXPORT ZlibCompressorBase : public Compressor 9 | { 10 | public: 11 | int inflate(const char* input, size_t length, ::std::string &output) const; 12 | 13 | const int compressionLevel; 14 | const bool raw; 15 | 16 | virtual ~ZlibCompressorBase() = default; 17 | 18 | ZlibCompressorBase(char uniqueCompressionID, int compressionLevel, bool raw) : 19 | Compressor(uniqueCompressionID), 20 | compressionLevel(compressionLevel), 21 | raw(raw) 22 | { 23 | assert(compressionLevel >= -1 && compressionLevel <= 9); 24 | } 25 | 26 | virtual void compressImpl(const char* input, size_t length, ::std::string& output) const override; 27 | 28 | virtual bool decompress(const char* input, size_t length, ::std::string &output) const override; 29 | 30 | private: 31 | 32 | int _window() const; 33 | 34 | }; 35 | 36 | class LEVELDB_EXPORT ZlibCompressor : public ZlibCompressorBase { 37 | public: 38 | static const int SERIALIZE_ID = 2; 39 | 40 | ZlibCompressor(int compressionLevel = -1) : 41 | ZlibCompressorBase(SERIALIZE_ID, compressionLevel, false) { 42 | 43 | } 44 | }; 45 | 46 | class LEVELDB_EXPORT ZlibCompressorRaw : public ZlibCompressorBase { 47 | public: 48 | static const int SERIALIZE_ID = 4; 49 | 50 | ZlibCompressorRaw(int compressionLevel = -1) : 51 | ZlibCompressorBase(SERIALIZE_ID, compressionLevel, true) { 52 | 53 | } 54 | }; 55 | } 56 | 57 | #endif 58 | -------------------------------------------------------------------------------- /third/leveldb/zopfli_compressor.h: -------------------------------------------------------------------------------- 1 | 2 | #pragma once 3 | 4 | #include "leveldb/compressor.h" 5 | 6 | namespace leveldb { 7 | 8 | class DLLX ZopfliCompressor : public Compressor 9 | { 10 | public: 11 | static const int SERIALIZE_ID = 2; //Same as ZLib since it is a replacement 12 | 13 | ZopfliCompressor() : Compressor(SERIALIZE_ID) {} 14 | 15 | virtual ~ZopfliCompressor() {} 16 | 17 | virtual void compressImpl(const char* input, size_t length, ::std::string& output) const override; 18 | 19 | virtual bool decompress(const char* input, size_t length, ::std::string &output) const override; 20 | 21 | private: 22 | }; 23 | } -------------------------------------------------------------------------------- /third/leveldb/zstd_compressor.h: -------------------------------------------------------------------------------- 1 | 2 | #pragma once 3 | 4 | #include "leveldb/compressor.h" 5 | 6 | namespace leveldb { 7 | 8 | class DLLX ZstdCompressor : public Compressor 9 | { 10 | public: 11 | static const int SERIALIZE_ID = 3; 12 | 13 | const int compressionLevel; 14 | 15 | virtual ~ZstdCompressor() { 16 | 17 | } 18 | 19 | ZstdCompressor(int compressionLevel = -1) : 20 | Compressor(SERIALIZE_ID), 21 | compressionLevel(compressionLevel) 22 | { 23 | assert(compressionLevel >= -1 && compressionLevel <= 9); 24 | } 25 | 26 | virtual void compressImpl(const char* input, size_t length, ::std::string& output) const override; 27 | 28 | virtual bool decompress(const char* input, size_t length, ::std::string &output) const override; 29 | 30 | private: 31 | 32 | }; 33 | } -------------------------------------------------------------------------------- /third/seh_exception.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | /** 3 | * @Description: a class for convert SEH exception to c++ exception 4 | * @Author: shang_cm 5 | * @Date: 2020-06-01 16:09:00 6 | * @LastEditTime: 2010-06-01 16:09:00 7 | * @LastEditors: shang_cm 8 | */ 9 | #pragma once 10 | #include 11 | 12 | #include 13 | 14 | class seh_exception : std::exception { 15 | typedef ULONG(WINAPI* fpRtlNtStatusToDosError)(DWORD Status); 16 | 17 | public: 18 | seh_exception(unsigned int nExceptionCode, 19 | _EXCEPTION_POINTERS* pstExcptionInfor) 20 | : m_nExceptionCode(0), 21 | m_pExcptionInfor(NULL), 22 | m_szMsgBuff(NULL), 23 | m_hNtModule(NULL), 24 | RtlNtStatusToDosError(NULL) { 25 | m_nExceptionCode = nExceptionCode; 26 | m_pExcptionInfor = pstExcptionInfor; 27 | m_hNtModule = GetModuleHandle(L"NTDLL.DLL"); 28 | if (NULL != m_hNtModule) { 29 | RtlNtStatusToDosError = (fpRtlNtStatusToDosError)GetProcAddress( 30 | m_hNtModule, "RtlNtStatusToDosError"); 31 | } 32 | } 33 | 34 | virtual ~seh_exception() { 35 | m_nExceptionCode = 0; 36 | m_pExcptionInfor = NULL; 37 | RtlNtStatusToDosError = NULL; 38 | 39 | if (NULL != m_szMsgBuff) { 40 | LocalFree(m_szMsgBuff); 41 | m_szMsgBuff = NULL; 42 | } 43 | }; 44 | 45 | const char* what() const noexcept { 46 | if (RtlNtStatusToDosError != NULL) { 47 | DWORD nConvertLen = FormatMessageA( 48 | FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | 49 | FORMAT_MESSAGE_FROM_HMODULE, 50 | m_hNtModule, RtlNtStatusToDosError(m_nExceptionCode), 0, 51 | (char*)&m_szMsgBuff, 0, NULL); 52 | 53 | if (0 != nConvertLen) { 54 | return m_szMsgBuff; 55 | } 56 | } 57 | 58 | return "SEH_UNKNOW_ERROR"; 59 | } 60 | 61 | const PEXCEPTION_POINTERS info() const { return m_pExcptionInfor; } 62 | 63 | const unsigned int code() const { return m_nExceptionCode; } 64 | 65 | private: 66 | HMODULE m_hNtModule; 67 | 68 | unsigned int m_nExceptionCode; 69 | char* m_szMsgBuff; 70 | PEXCEPTION_POINTERS m_pExcptionInfor; 71 | fpRtlNtStatusToDosError RtlNtStatusToDosError; 72 | 73 | public: 74 | static void(__cdecl TranslateSEHtoCE)( 75 | unsigned int nExceptionCode, 76 | struct _EXCEPTION_POINTERS* pstExcptionInfor) { 77 | throw seh_exception(nExceptionCode, pstExcptionInfor); 78 | } 79 | }; 80 | --------------------------------------------------------------------------------