├── src ├── adc.h ├── FileReader.h ├── MemoryReader.h ├── Reader.cpp ├── Reader.h ├── PartitionedDisk.h ├── unichar.h ├── MemoryReader.cpp ├── SubReader.h ├── CachedReader.h ├── HFSExtentsOverflowBTree.h ├── decmpfs.h ├── HFSFork.h ├── DMGDecompressor.h ├── DMGPartition.h ├── HFSAttributeBTree.h ├── ResourceFork.h ├── GPTDisk.h ├── AppleDisk.h ├── SubReader.cpp ├── FileReader.cpp ├── MacBinary.cpp ├── main-fuse.h ├── HFSZlibReader.h ├── gpt.h ├── hfs.h ├── MacBinary.h ├── DMGDisk.h ├── HFSVolume.h ├── rsrc.h ├── apm.h ├── exceptions.h ├── CacheZone.h ├── HFSHighLevelVolume.h ├── HFSBTree.h ├── stat_xlate.h ├── CacheZone.cpp ├── unichar.cpp ├── HFSCatalogBTree.h ├── HFSExtentsOverflowBTree.cpp ├── GPTDisk.cpp ├── AppleDisk.cpp ├── ResourceFork.cpp ├── HFSVolume.cpp ├── be.h ├── adc.cpp ├── dmg.h ├── HFSFork.cpp ├── HFSAttributeBTree.cpp ├── DMGPartition.cpp ├── CachedReader.cpp ├── HFSBTreeNode.h ├── HFSZlibReader.cpp ├── hfsplus.h ├── DMGDisk.cpp ├── HFSBTree.cpp ├── main-fuse.cpp ├── DMGDecompressor.cpp ├── main-hdiutil.cpp ├── HFSHighLevelVolume.cpp └── HFSCatalogBTree.cpp ├── test ├── CacheTest.h ├── main-test-dmg.cpp ├── main-test-hfs+.cpp └── CacheTest.cpp ├── cmake_modules └── FindLibXml2.cmake ├── README.md └── CMakeLists.txt /src/adc.h: -------------------------------------------------------------------------------- 1 | #ifndef ADC_H 2 | #define ADC_H 3 | #include 4 | 5 | int adc_decompress(int in_size, uint8_t* input, int avail_size, uint8_t* output, int restartIndex, int* bytes_written); 6 | int adc_chunk_type(char _byte); 7 | int adc_chunk_size(char _byte); 8 | int adc_chunk_offset(unsigned char *chunk_start); 9 | 10 | #endif 11 | -------------------------------------------------------------------------------- /src/FileReader.h: -------------------------------------------------------------------------------- 1 | #ifndef FILEREADER_H 2 | #define FILEREADER_H 3 | #include "Reader.h" 4 | #include 5 | 6 | class FileReader : public Reader 7 | { 8 | public: 9 | FileReader(const std::string& path); 10 | ~FileReader(); 11 | 12 | int32_t read(void* buf, int32_t count, uint64_t offset) override; 13 | uint64_t length() override; 14 | private: 15 | int m_fd; 16 | }; 17 | 18 | #endif 19 | -------------------------------------------------------------------------------- /src/MemoryReader.h: -------------------------------------------------------------------------------- 1 | #ifndef MEMORYREADER_H 2 | #define MEMORYREADER_H 3 | #include "Reader.h" 4 | #include 5 | #include 6 | 7 | class MemoryReader : public Reader 8 | { 9 | public: 10 | MemoryReader(const uint8_t* start, size_t length); 11 | virtual int32_t read(void* buf, int32_t count, uint64_t offset) override; 12 | virtual uint64_t length() override; 13 | private: 14 | std::vector m_data; 15 | }; 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /src/Reader.cpp: -------------------------------------------------------------------------------- 1 | #include "Reader.h" 2 | #include "CacheZone.h" 3 | 4 | void Reader::adviseOptimalBlock(uint64_t offset, uint64_t& blockStart, uint64_t& blockEnd) 5 | { 6 | // Default implementation returns a block aligned to a 4096-byte boundary 7 | blockStart = offset & ~uint64_t(CacheZone::BLOCK_SIZE - 1); 8 | blockEnd = blockStart + CacheZone::BLOCK_SIZE; 9 | 10 | const uint64_t len = length(); 11 | if (blockEnd > len) 12 | blockEnd = len; 13 | } 14 | -------------------------------------------------------------------------------- /src/Reader.h: -------------------------------------------------------------------------------- 1 | #ifndef READER_H 2 | #define READER_H 3 | #include 4 | 5 | class Reader 6 | { 7 | public: 8 | virtual ~Reader() {} 9 | virtual int32_t read(void* buf, int32_t count, uint64_t offset) = 0; 10 | virtual uint64_t length() = 0; 11 | 12 | // Advises cache on the amount of data it should read in order to avoid repeatedly decompressing 13 | // the same blocks of data. 14 | virtual void adviseOptimalBlock(uint64_t offset, uint64_t& blockStart, uint64_t& blockEnd); 15 | }; 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /src/PartitionedDisk.h: -------------------------------------------------------------------------------- 1 | #ifndef PARTITIONEDDISK_H 2 | #define PARTITIONEDDISK_H 3 | #include "Reader.h" 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | class PartitionedDisk 10 | { 11 | public: 12 | struct Partition 13 | { 14 | std::string name, type; 15 | uint64_t offset, size; // in byte 16 | }; 17 | public: 18 | virtual ~PartitionedDisk() {} 19 | 20 | virtual const std::vector& partitions() const = 0; 21 | virtual std::shared_ptr readerForPartition(int index) = 0; 22 | }; 23 | 24 | #endif 25 | 26 | -------------------------------------------------------------------------------- /src/unichar.h: -------------------------------------------------------------------------------- 1 | #ifndef UNICHAR_H 2 | #define UNICHAR_H 3 | #include 4 | #include "hfsplus.h" 5 | #include "be.h" 6 | 7 | std::string UnicharToString(uint16_t length, const unichar* string); 8 | bool EqualNoCase(const HFSString& str1, const std::string& str2); 9 | bool EqualCase(const HFSString& str1, const std::string& str2); 10 | uint16_t StringToUnichar(const std::string& in, unichar* out, size_t maxLength /* in unichars */); 11 | 12 | inline std::string UnicharToString(const HFSString& str) { return UnicharToString(be(str.length), str.string); } 13 | 14 | #endif 15 | -------------------------------------------------------------------------------- /src/MemoryReader.cpp: -------------------------------------------------------------------------------- 1 | #include "MemoryReader.h" 2 | #include 3 | 4 | MemoryReader::MemoryReader(const uint8_t* start, size_t length) 5 | { 6 | m_data = std::vector(start, start+length); 7 | } 8 | 9 | int32_t MemoryReader::read(void* buf, int32_t count, uint64_t offset) 10 | { 11 | if (offset > m_data.size()) 12 | return 0; 13 | if (count+offset > m_data.size()) 14 | count = m_data.size() - offset; 15 | 16 | memcpy(buf, &m_data[offset], count); 17 | return count; 18 | } 19 | 20 | uint64_t MemoryReader::length() 21 | { 22 | return m_data.size(); 23 | } 24 | -------------------------------------------------------------------------------- /test/CacheTest.h: -------------------------------------------------------------------------------- 1 | #ifndef CACHETEST_H 2 | #define CACHETEST_H 3 | #include "../src/MemoryReader.h" 4 | #include 5 | 6 | class MyMemoryReader : public MemoryReader 7 | { 8 | public: 9 | using MemoryReader::MemoryReader; 10 | 11 | void setOptimalBoundaries(std::initializer_list bd); 12 | virtual void adviseOptimalBlock(uint64_t offset, uint64_t& blockStart, uint64_t& blockEnd) override; 13 | virtual int32_t read(void* buf, int32_t count, uint64_t offset) override; 14 | 15 | private: 16 | std::list m_optimalBoundaries; 17 | }; 18 | 19 | #endif // CACHETEST_H 20 | 21 | -------------------------------------------------------------------------------- /src/SubReader.h: -------------------------------------------------------------------------------- 1 | #ifndef SUBREADER_H 2 | #define SUBREADER_H 3 | #include "Reader.h" 4 | #include 5 | #include 6 | 7 | class SubReader : public Reader 8 | { 9 | public: 10 | SubReader(std::shared_ptr parent, uint64_t offset, uint64_t size); 11 | 12 | virtual int32_t read(void* buf, int32_t count, uint64_t offset) override; 13 | virtual uint64_t length() override; 14 | virtual void adviseOptimalBlock(uint64_t offset, uint64_t& blockStart, uint64_t& blockEnd) override; 15 | private: 16 | std::shared_ptr m_parent; 17 | uint64_t m_offset, m_size; 18 | }; 19 | 20 | #endif 21 | -------------------------------------------------------------------------------- /src/CachedReader.h: -------------------------------------------------------------------------------- 1 | #ifndef CACHEDREADER_H 2 | #define CACHEDREADER_H 3 | #include "Reader.h" 4 | #include "CacheZone.h" 5 | #include 6 | 7 | class CachedReader : public Reader 8 | { 9 | public: 10 | CachedReader(std::shared_ptr reader, CacheZone* zone, const std::string& tag); 11 | 12 | virtual int32_t read(void* buf, int32_t count, uint64_t offset) override; 13 | virtual uint64_t length() override; 14 | private: 15 | void nonCachedRead(void* buf, int32_t count, uint64_t offset); 16 | private: 17 | std::shared_ptr m_reader; 18 | CacheZone* m_zone; 19 | const std::string m_tag; 20 | }; 21 | 22 | #endif 23 | -------------------------------------------------------------------------------- /src/HFSExtentsOverflowBTree.h: -------------------------------------------------------------------------------- 1 | #ifndef HFSEXTENTSOVERFLOWBTREE_H 2 | #define HFSEXTENTSOVERFLOWBTREE_H 3 | #include "HFSBTree.h" 4 | #include "hfsplus.h" 5 | #include "CacheZone.h" 6 | #include 7 | #include 8 | 9 | class HFSExtentsOverflowBTree : protected HFSBTree 10 | { 11 | public: 12 | HFSExtentsOverflowBTree(std::shared_ptr fork, CacheZone* zone); 13 | void findExtentsForFile(HFSCatalogNodeID cnid, bool resourceFork, uint32_t startBlock, std::vector& extraExtents); 14 | private: 15 | static int cnidComparator(const Key* indexKey, const Key* desiredKey); 16 | }; 17 | 18 | #endif 19 | 20 | -------------------------------------------------------------------------------- /src/decmpfs.h: -------------------------------------------------------------------------------- 1 | #ifndef DECMPFS_H 2 | #define DECMPFS_H 3 | #include 4 | 5 | #define DECMPFS_MAGIC 0x636d7066 /* cmpf */ 6 | #define DECMPFS_ID 0xffff 7 | 8 | #define DECMPFS_XATTR_NAME "com.apple.decmpfs" 9 | 10 | enum class DecmpfsCompressionType : uint32_t 11 | { 12 | UncompressedInline = 1, // inline = after the header in xattr 13 | CompressedInline = 3, 14 | CompressedResourceFork = 4 15 | }; 16 | 17 | #pragma pack(1) 18 | struct decmpfs_disk_header 19 | { 20 | uint32_t compression_magic; 21 | uint32_t compression_type; 22 | uint64_t uncompressed_size; 23 | unsigned char attr_bytes[0]; 24 | }; 25 | #pragma pack() 26 | 27 | #endif 28 | -------------------------------------------------------------------------------- /src/HFSFork.h: -------------------------------------------------------------------------------- 1 | #ifndef HFSFORK_H 2 | #define HFSFORK_H 3 | #include "hfsplus.h" 4 | #include "HFSVolume.h" 5 | #include 6 | #include 7 | 8 | class HFSVolume; 9 | 10 | class HFSFork : public Reader 11 | { 12 | public: 13 | HFSFork(HFSVolume* vol, const HFSPlusForkData& fork, HFSCatalogNodeID cnid = kHFSNullID, bool resourceFork = false); 14 | int32_t read(void* buf, int32_t count, uint64_t offset) override; 15 | uint64_t length() override; 16 | private: 17 | void loadFromOverflowsFile(uint32_t blocksSoFar); 18 | private: 19 | HFSVolume* m_volume; 20 | HFSPlusForkData m_fork; 21 | std::vector m_extents; 22 | 23 | HFSCatalogNodeID m_cnid; 24 | bool m_resourceFork; 25 | }; 26 | 27 | #endif 28 | -------------------------------------------------------------------------------- /src/DMGDecompressor.h: -------------------------------------------------------------------------------- 1 | #ifndef DMGDECOMPRESSOR_H 2 | #define DMGDECOMPRESSOR_H 3 | #include 4 | #include "dmg.h" 5 | #include "Reader.h" 6 | #include 7 | 8 | class DMGDecompressor 9 | { 10 | protected: 11 | DMGDecompressor(std::shared_ptr reader); 12 | int readSome(char** ptr); 13 | void processed(int bytes); 14 | uint64_t readerLength() const { return m_reader->length(); } 15 | public: 16 | virtual ~DMGDecompressor() {} 17 | virtual int32_t decompress(void* output, int32_t count, int64_t offset) = 0; 18 | 19 | static DMGDecompressor* create(RunType runType, std::shared_ptr reader); 20 | private: 21 | std::shared_ptr m_reader; 22 | uint32_t m_pos; 23 | char m_buf[8*1024]; 24 | }; 25 | 26 | #endif 27 | -------------------------------------------------------------------------------- /src/DMGPartition.h: -------------------------------------------------------------------------------- 1 | #ifndef DMGPARTITION_H 2 | #define DMGPARTITION_H 3 | #include "Reader.h" 4 | #include "dmg.h" 5 | #include 6 | #include 7 | 8 | class DMGPartition : public Reader 9 | { 10 | public: 11 | DMGPartition(std::shared_ptr disk, BLKXTable* table); 12 | ~DMGPartition(); 13 | 14 | virtual int32_t read(void* buf, int32_t count, uint64_t offset) override; 15 | virtual uint64_t length() override; 16 | virtual void adviseOptimalBlock(uint64_t offset, uint64_t& blockStart, uint64_t& blockEnd) override; 17 | private: 18 | int32_t readRun(void* buf, int32_t runIndex, uint64_t offsetInSector, int32_t count); 19 | private: 20 | std::shared_ptr m_disk; 21 | BLKXTable* m_table; 22 | std::map m_sectors; 23 | }; 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /src/HFSAttributeBTree.h: -------------------------------------------------------------------------------- 1 | #ifndef HFSATTRIBUTEBTREE_H 2 | #define HFSATTRIBUTEBTREE_H 3 | #include "HFSBTree.h" 4 | #include "CacheZone.h" 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | class HFSAttributeBTree : protected HFSBTree 12 | { 13 | public: 14 | HFSAttributeBTree(std::shared_ptr fork, CacheZone* zone); 15 | 16 | typedef std::map> AttributeMap; 17 | 18 | AttributeMap getattr(HFSCatalogNodeID cnid); 19 | bool getattr(HFSCatalogNodeID cnid, const std::string& attrName, std::vector& data); 20 | private: 21 | static int cnidComparator(const Key* indexKey, const Key* desiredKey); 22 | static int cnidAttrComparator(const Key* indexKey, const Key* desiredKey); 23 | }; 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /src/ResourceFork.h: -------------------------------------------------------------------------------- 1 | #ifndef RESOURCE_FORK_H 2 | #define RESOURCE_FORK_H 3 | #include "Reader.h" 4 | #include "rsrc.h" 5 | #include 6 | #include 7 | #include 8 | 9 | class ResourceFork 10 | { 11 | public: 12 | ResourceFork(std::shared_ptr reader); 13 | 14 | std::shared_ptr getResource(uint32_t resourceType, uint16_t id); 15 | private: 16 | void loadResources(); 17 | private: 18 | std::shared_ptr m_reader; 19 | 20 | struct Resource 21 | { 22 | uint32_t type; 23 | uint16_t id; 24 | }; 25 | struct ResourceLocation 26 | { 27 | uint64_t offset; 28 | uint32_t length; 29 | }; 30 | friend bool operator<(const ResourceFork::Resource& t, const ResourceFork::Resource& that); 31 | 32 | std::map m_resources; 33 | }; 34 | 35 | #endif 36 | 37 | -------------------------------------------------------------------------------- /src/GPTDisk.h: -------------------------------------------------------------------------------- 1 | #ifndef GPTDISK_H 2 | #define GPTDISK_H 3 | #include "Reader.h" 4 | #include "gpt.h" 5 | #include "PartitionedDisk.h" 6 | #include 7 | 8 | class GPTDisk : public PartitionedDisk 9 | { 10 | public: 11 | GPTDisk(std::shared_ptr readerWholeDisk); 12 | GPTDisk(std::shared_ptr protectiveMBR, std::shared_ptr partitionTable); 13 | 14 | virtual const std::vector& partitions() const override { return m_partitions; } 15 | virtual std::shared_ptr readerForPartition(int index) override; 16 | 17 | static bool isGPTDisk(std::shared_ptr reader); 18 | private: 19 | void loadPartitions(std::shared_ptr table); 20 | static std::string makeGUID(const GPT_GUID& guid); 21 | private: 22 | std::shared_ptr m_reader; 23 | std::vector m_partitions; 24 | }; 25 | 26 | #endif 27 | -------------------------------------------------------------------------------- /src/AppleDisk.h: -------------------------------------------------------------------------------- 1 | #ifndef APPLEDISK_H 2 | #define APPLEDISK_H 3 | #include "apm.h" 4 | #include "Reader.h" 5 | #include "PartitionedDisk.h" 6 | #include 7 | #include 8 | 9 | class AppleDisk : public PartitionedDisk 10 | { 11 | public: 12 | AppleDisk(std::shared_ptr reader); 13 | 14 | virtual const std::vector& partitions() const override { return m_partitions; } 15 | virtual std::shared_ptr readerForPartition(int index) override; 16 | 17 | static bool isAppleDisk(std::shared_ptr reader); 18 | private: 19 | AppleDisk(std::shared_ptr readerBlock0, std::shared_ptr readerPM); 20 | void load(std::shared_ptr readerPM); 21 | friend class DMGDisk; 22 | private: 23 | std::shared_ptr m_reader; 24 | Block0 m_block0; 25 | std::vector m_partitions; 26 | }; 27 | 28 | #endif 29 | -------------------------------------------------------------------------------- /test/main-test-dmg.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "FileReader.h" 3 | #include "DMGDisk.h" 4 | 5 | void readPartition(DMGDisk* dmg, int index); 6 | 7 | int main(int argc, char** argv) 8 | { 9 | Reader* fileReader = new FileReader(argv[1]); 10 | DMGDisk* dmg = new DMGDisk(fileReader); 11 | 12 | auto partitions = dmg->partitions(); 13 | for (int i = 0; i < partitions.size(); i++) 14 | { 15 | auto& part = partitions[i]; 16 | 17 | std::cout << part.name << ' ' << part.type << ", " << part.size << " bytes\n"; 18 | if (part.type == "Apple_HFS") 19 | { 20 | readPartition(dmg, i); 21 | } 22 | } 23 | 24 | delete dmg; 25 | delete fileReader; 26 | } 27 | 28 | void readPartition(DMGDisk* dmg, int index) 29 | { 30 | char buf[4096]; 31 | Reader* reader = dmg->readerForPartition(index); 32 | 33 | reader->read(buf, sizeof(buf), 0); 34 | 35 | delete reader; 36 | } 37 | 38 | -------------------------------------------------------------------------------- /src/SubReader.cpp: -------------------------------------------------------------------------------- 1 | #include "SubReader.h" 2 | 3 | SubReader::SubReader(std::shared_ptr parent, uint64_t offset, uint64_t size) 4 | : m_parent(parent), m_offset(offset), m_size(size) 5 | { 6 | } 7 | 8 | int32_t SubReader::read(void* buf, int32_t count, uint64_t offset) 9 | { 10 | if (offset > m_size) 11 | return 0; 12 | if (offset+count > m_size) 13 | count = m_size - offset; 14 | 15 | return m_parent->read(buf, count, offset + m_offset); 16 | } 17 | 18 | uint64_t SubReader::length() 19 | { 20 | return m_size; 21 | } 22 | 23 | void SubReader::adviseOptimalBlock(uint64_t offset, uint64_t& blockStart, uint64_t& blockEnd) 24 | { 25 | m_parent->adviseOptimalBlock(m_offset+offset, blockStart, blockEnd); 26 | 27 | if (blockStart < m_offset) 28 | blockStart = m_offset; 29 | blockStart -= m_offset; 30 | 31 | blockEnd -= m_offset; 32 | if (blockEnd > m_size) 33 | blockEnd = m_size; 34 | } 35 | -------------------------------------------------------------------------------- /src/FileReader.cpp: -------------------------------------------------------------------------------- 1 | #include "FileReader.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "exceptions.h" 9 | 10 | FileReader::FileReader(const std::string& path) 11 | : m_fd(-1) 12 | { 13 | m_fd = ::open(path.c_str(), O_RDONLY); 14 | 15 | if (m_fd == -1) 16 | { 17 | #ifdef DEBUG 18 | std::cerr << "Cannot open " << path << ": " << strerror(errno) << std::endl; 19 | #endif 20 | throw file_not_found_error(path); 21 | } 22 | } 23 | 24 | FileReader::~FileReader() 25 | { 26 | if (m_fd != -1) 27 | ::close(m_fd); 28 | } 29 | 30 | int32_t FileReader::read(void* buf, int32_t count, uint64_t offset) 31 | { 32 | if (m_fd == -1) 33 | return -1; 34 | 35 | static_assert(sizeof(off_t) == 8, "off_t is too small"); 36 | 37 | return ::pread(m_fd, buf, count, offset); 38 | } 39 | 40 | uint64_t FileReader::length() 41 | { 42 | return ::lseek(m_fd, 0, SEEK_END); 43 | } 44 | -------------------------------------------------------------------------------- /src/MacBinary.cpp: -------------------------------------------------------------------------------- 1 | #include "MacBinary.h" 2 | #include "SubReader.h" 3 | #include "be.h" 4 | #include "exceptions.h" 5 | #include 6 | 7 | MacBinary::MacBinary(std::shared_ptr reader) 8 | : m_reader(reader) 9 | { 10 | if (m_reader->read(&m_header, sizeof(m_header), 0) != sizeof(m_header)) 11 | throw io_error("Error reading MacBinary header"); 12 | } 13 | 14 | Reader* MacBinary::getDataFork() 15 | { 16 | uint32_t extraLen = 0; 17 | 18 | if (be(m_header.signature) == 'mBIN') 19 | extraLen = be(m_header.sec_header_len); 20 | 21 | return new SubReader(m_reader, sizeof(m_header) + extraLen, be(m_header.data_len)); 22 | } 23 | 24 | Reader* MacBinary::getResourceFork() 25 | { 26 | uint32_t extraLen = 0; 27 | 28 | if (be(m_header.signature) == 'mBIN') 29 | extraLen = be(m_header.sec_header_len); 30 | 31 | extraLen += be(m_header.data_len); 32 | extraLen = (extraLen+127) / 128 * 128; 33 | 34 | return new SubReader(m_reader, sizeof(m_header) + extraLen, be(m_header.resource_len)); 35 | } 36 | 37 | -------------------------------------------------------------------------------- /src/main-fuse.h: -------------------------------------------------------------------------------- 1 | #ifndef MAIN_FUSE_H 2 | #define MAIN_FUSE_H 3 | #define FUSE_USE_VERSION 26 4 | 5 | #include 6 | 7 | static void showHelp(const char* argv0); 8 | static void openDisk(const char* path); 9 | 10 | int hfs_getattr(const char* path, struct stat* stat); 11 | int hfs_readlink(const char* path, char* buf, size_t size); 12 | int hfs_open(const char* path, struct fuse_file_info* info); 13 | int hfs_read(const char* path, char* buf, size_t bytes, off_t offset, struct fuse_file_info* info); 14 | int hfs_release(const char* path, struct fuse_file_info* info); 15 | int hfs_readdir(const char* path, void* buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info* info); 16 | #if defined(__APPLE__) && !defined(DARLING) 17 | int hfs_getxattr(const char* path, const char* name, char* value, size_t vlen, uint32_t position); 18 | #else 19 | int hfs_getxattr(const char* path, const char* name, char* value, size_t vlen); 20 | #endif 21 | int hfs_listxattr(const char* path, char* buffer, size_t size); 22 | 23 | #endif 24 | 25 | -------------------------------------------------------------------------------- /src/HFSZlibReader.h: -------------------------------------------------------------------------------- 1 | #ifndef ZLIBREADER_H 2 | #define ZLIBREADER_H 3 | #include "Reader.h" 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | class HFSZlibReader : public Reader 10 | { 11 | public: 12 | HFSZlibReader(std::shared_ptr parent, uint64_t uncompressedSize, bool singleRun = false); 13 | virtual ~HFSZlibReader(); 14 | 15 | virtual int32_t read(void* buf, int32_t count, uint64_t offset) override; 16 | virtual uint64_t length() override; 17 | virtual void adviseOptimalBlock(uint64_t offset, uint64_t& blockStart, uint64_t& blockEnd) override; 18 | private: 19 | int32_t readRun(int runIndex, void* buf, int32_t count, uint64_t offset); 20 | void zlibInit(); 21 | void zlibExit(); 22 | private: 23 | std::shared_ptr m_reader; 24 | bool m_ownParentReader; 25 | uint64_t m_uncompressedSize; 26 | z_stream m_strm; 27 | int m_lastRun = -1; 28 | uint64_t m_lastEnd = 0, m_inputPos = 0; 29 | bool m_lastUncompressed = false; 30 | std::vector> m_offsets; 31 | }; 32 | 33 | #endif 34 | -------------------------------------------------------------------------------- /src/gpt.h: -------------------------------------------------------------------------------- 1 | #ifndef GPT_H 2 | #define GPT_H 3 | #include 4 | #include "be.h" 5 | 6 | #pragma pack(1) 7 | 8 | #define MPT_GPT_FAKE_TYPE 0xEE 9 | 10 | struct MBRPartition 11 | { 12 | uint8_t status; 13 | uint8_t chsFirst[3]; 14 | uint8_t type; 15 | uint8_t chsLast[3]; 16 | uint32_t lbaFirst; 17 | uint32_t numSectors; 18 | }; 19 | 20 | #define MBR_SIGNATURE be(0x55AA) 21 | 22 | struct ProtectiveMBR 23 | { 24 | uint8_t unused[446]; 25 | MBRPartition partitions[4]; 26 | uint16_t signature; 27 | }; 28 | 29 | #define GPT_SIGNATURE "EFI PART" 30 | 31 | struct GPTHeader 32 | { 33 | char signature[8]; 34 | // TODO 35 | }; 36 | 37 | struct GPT_GUID 38 | { 39 | uint32_t data1; 40 | uint16_t data2, data3; 41 | uint8_t data4[8]; 42 | }; 43 | 44 | struct GPTPartition 45 | { 46 | GPT_GUID typeGUID; 47 | GPT_GUID partitionGUID; 48 | uint64_t firstLBA, lastLBA; 49 | uint64_t flags; 50 | uint16_t name[36]; 51 | }; 52 | 53 | #define GUID_EMPTY "00000000-0000-0000-0000-000000000000" 54 | #define GUID_HFSPLUS "48465300-0000-11AA-AA11-00306543ECAC" 55 | 56 | #pragma pack() 57 | 58 | #endif 59 | -------------------------------------------------------------------------------- /src/hfs.h: -------------------------------------------------------------------------------- 1 | #ifndef HFS_H 2 | #define HFS_H 3 | #include 4 | 5 | #define HFS_SIGNATURE 0x4244 6 | 7 | #pragma pack(1) 8 | 9 | struct HFSExtentDescriptor 10 | { 11 | uint16_t startBlock; 12 | uint16_t blockCount; 13 | }; 14 | typedef HFSExtentDescriptor HFSExtentRecord[3]; 15 | 16 | struct HFSMasterDirectoryBlock 17 | { 18 | uint16_t drSigWord; 19 | uint32_t drCrDate; 20 | uint32_t drLsMod; 21 | uint16_t drAtrb; 22 | uint16_t drNmFls; 23 | uint16_t drVBMSt; 24 | uint16_t drAllocPtr; 25 | uint16_t drNmAlBlks; 26 | uint32_t drAlBlkSiz; 27 | uint32_t drClpSiz; 28 | uint16_t drAlBlSt; 29 | uint32_t drNxtCNID; 30 | uint16_t drFreeBks; 31 | uint8_t drVN[28]; 32 | uint32_t drVolBkUp; 33 | uint16_t drVSeqNum; 34 | uint32_t drWrCnt; 35 | uint32_t drXTClpSiz; 36 | uint32_t drCTClpSiz; 37 | uint16_t drNmRtDirs; 38 | uint32_t drFilCnt; 39 | uint32_t drDirCnt; 40 | uint32_t drFndrInfo[8]; 41 | uint16_t drEmbedSigWord; 42 | HFSExtentDescriptor drEmbedExtent; 43 | uint32_t drXTFlSize; 44 | HFSExtentRecord drXTExtRec; 45 | uint32_t drCTFlSize; 46 | HFSExtentRecord drCTExtRec; 47 | }; 48 | 49 | #pragma pack() 50 | 51 | #endif 52 | 53 | -------------------------------------------------------------------------------- /src/MacBinary.h: -------------------------------------------------------------------------------- 1 | #ifndef MACBINARY_H 2 | #define MACBINARY_H 3 | #include 4 | #include "Reader.h" 5 | #include 6 | 7 | #pragma pack(1) 8 | struct MacBinaryHeader 9 | { 10 | uint8_t old_ver; 11 | uint8_t filename_len; 12 | char filename[63]; 13 | uint32_t file_type, file_creator; 14 | uint8_t orig_finder_flags; 15 | uint8_t zero; 16 | uint16_t vertical_pos, horizontal_pos; 17 | uint16_t win_folder_id; 18 | uint8_t _protected, zero2; 19 | uint32_t data_len, resource_len; 20 | uint32_t created, last_modified; 21 | uint16_t getinfo_comment_len; 22 | uint8_t finder_flags; 23 | uint32_t signature; // 'mBIN' 24 | uint8_t script, ext_finder_flags, unused[8]; 25 | uint32_t total_len; // unused 26 | uint16_t sec_header_len; 27 | uint8_t version_write, version_read; 28 | uint16_t crc; 29 | char padding[2]; // pad to 128 bytes 30 | }; 31 | #pragma pack() 32 | 33 | class MacBinary 34 | { 35 | public: 36 | MacBinary(std::shared_ptr reader); 37 | 38 | Reader* getDataFork(); 39 | Reader* getResourceFork(); 40 | 41 | static bool isMacBinary(Reader* reader); 42 | private: 43 | std::shared_ptr m_reader; 44 | MacBinaryHeader m_header; 45 | }; 46 | 47 | 48 | 49 | #endif 50 | -------------------------------------------------------------------------------- /cmake_modules/FindLibXml2.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find LibXml2 2 | # Once done this will define 3 | # LIBXML2_FOUND - System has LibXml2 4 | # LIBXML2_INCLUDE_DIRS - The LibXml2 include directories 5 | # LIBXML2_LIBRARIES - The libraries needed to use LibXml2 6 | # LIBXML2_DEFINITIONS - Compiler switches required for using LibXml2 7 | 8 | find_package(PkgConfig) 9 | pkg_check_modules(PC_LIBXML QUIET libxml-2.0) 10 | set(LIBXML2_DEFINITIONS ${PC_LIBXML_CFLAGS_OTHER}) 11 | 12 | find_path(LIBXML2_INCLUDE_DIR libxml/xpath.h 13 | HINTS ${PC_LIBXML_INCLUDEDIR} ${PC_LIBXML_INCLUDE_DIRS} 14 | PATH_SUFFIXES libxml2 ) 15 | 16 | find_library(LIBXML2_LIBRARY NAMES xml2 libxml2 17 | HINTS ${PC_LIBXML_LIBDIR} ${PC_LIBXML_LIBRARY_DIRS} ) 18 | 19 | set(LIBXML2_LIBRARIES ${LIBXML2_LIBRARY} ) 20 | set(LIBXML2_INCLUDE_DIRS ${LIBXML2_INCLUDE_DIR} ) 21 | 22 | include(FindPackageHandleStandardArgs) 23 | # handle the QUIETLY and REQUIRED arguments and set LIBXML2_FOUND to TRUE 24 | # if all listed variables are TRUE 25 | find_package_handle_standard_args(LibXml2 DEFAULT_MSG 26 | LIBXML2_LIBRARY LIBXML2_INCLUDE_DIR) 27 | 28 | mark_as_advanced(LIBXML2_INCLUDE_DIR LIBXML2_LIBRARY ) 29 | 30 | -------------------------------------------------------------------------------- /src/DMGDisk.h: -------------------------------------------------------------------------------- 1 | #ifndef DMGDISK_H 2 | #define DMGDISK_H 3 | #include "PartitionedDisk.h" 4 | #include "Reader.h" 5 | #include "dmg.h" 6 | #include "CacheZone.h" 7 | #include 8 | #include 9 | 10 | class DMGDisk : public PartitionedDisk 11 | { 12 | public: 13 | DMGDisk(std::shared_ptrreader); 14 | ~DMGDisk(); 15 | 16 | virtual const std::vector& partitions() const override { return m_partitions; } 17 | virtual std::shared_ptr readerForPartition(int index) override; 18 | 19 | static bool isDMG(std::shared_ptr reader); 20 | private: 21 | void loadKoly(const UDIFResourceFile& koly); 22 | bool loadPartitionElements(xmlXPathContextPtr xpathContext, xmlNodeSetPtr nodes); 23 | static bool parseNameAndType(const std::string& nameAndType, std::string& name, std::string& type); 24 | static bool base64Decode(const std::string& input, std::vector& output); 25 | BLKXTable* loadBLKXTableForPartition(int index); 26 | std::shared_ptr readerForKolyBlock(int index); 27 | private: 28 | std::shared_ptr m_reader; 29 | std::vector m_partitions; 30 | UDIFResourceFile m_udif; 31 | xmlDocPtr m_kolyXML; 32 | CacheZone m_zone; 33 | }; 34 | 35 | #endif 36 | 37 | -------------------------------------------------------------------------------- /src/HFSVolume.h: -------------------------------------------------------------------------------- 1 | #ifndef HFSVOLUME_H 2 | #define HFSVOLUME_H 3 | #include "hfsplus.h" 4 | #include "hfs.h" 5 | #include "Reader.h" 6 | #include "CacheZone.h" 7 | #include 8 | #include 9 | 10 | class HFSCatalogBTree; 11 | class HFSFork; 12 | class HFSExtentsOverflowBTree; 13 | class HFSAttributeBTree; 14 | 15 | class HFSVolume 16 | { 17 | public: 18 | HFSVolume(std::shared_ptr reader); 19 | ~HFSVolume(); 20 | 21 | void usage(uint64_t& totalBytes, uint64_t& freeBytes) const; 22 | HFSCatalogBTree* rootCatalogTree(); 23 | 24 | bool isHFSX() const; 25 | inline HFSAttributeBTree* attributes() { return m_attributes; } 26 | inline uint64_t volumeSize() const { return m_reader->length(); } 27 | 28 | static bool isHFSPlus(std::shared_ptr reader); 29 | 30 | inline CacheZone* getFileZone() { return &m_fileZone; } 31 | inline CacheZone* getBtreeZone() { return &m_btreeZone; } 32 | private: 33 | void processEmbeddedHFSPlus(HFSMasterDirectoryBlock* block); 34 | private: 35 | std::shared_ptr m_reader; 36 | std::shared_ptr m_embeddedReader; 37 | HFSExtentsOverflowBTree* m_overflowExtents; 38 | HFSAttributeBTree* m_attributes; 39 | HFSPlusVolumeHeader m_header; 40 | CacheZone m_fileZone, m_btreeZone; 41 | 42 | friend class HFSBTree; 43 | friend class HFSFork; 44 | }; 45 | 46 | #endif 47 | -------------------------------------------------------------------------------- /src/rsrc.h: -------------------------------------------------------------------------------- 1 | #ifndef RSRC_H 2 | #define RSRC_H 3 | #include 4 | 5 | // Resource fork structure declarations 6 | // Big Endian 7 | 8 | #pragma pack(1) 9 | 10 | 11 | struct HFSResourceForkHeader 12 | { 13 | uint32_t dataOffset; 14 | uint32_t mapOffset; // offset to HFSResourceMapHeader 15 | uint32_t dataLength; 16 | uint32_t mapLength; 17 | }; 18 | 19 | struct HFSResourceHeader 20 | { 21 | uint32_t length; 22 | uint8_t data[]; 23 | }; 24 | 25 | struct HFSResourceMapHeader 26 | { 27 | uint32_t dataOffset; 28 | uint32_t mapOffset; 29 | uint32_t dataLength; 30 | uint32_t mapLength; 31 | uint32_t reserved2; 32 | uint16_t reserved3; 33 | uint16_t attributes; 34 | uint16_t listOffset; // offset to HFSResourceList from the start of HFSResourceMapHeader 35 | }; 36 | 37 | struct HFSResourceListItem 38 | { 39 | uint32_t type; // fourcc 40 | uint16_t count; // contains count - 1 41 | uint16_t offset; // offset to HFSResourcePointer from this list item 42 | }; 43 | 44 | struct HFSResourceList 45 | { 46 | uint16_t count; // contains count - 1 47 | HFSResourceListItem items[]; 48 | }; 49 | 50 | struct HFSResourcePointer 51 | { 52 | uint16_t resourceId; // 0xffff for cmpfs 53 | uint16_t offsetName; 54 | uint32_t dataOffset; // offset to HFSResourceHeader from added to HFSResourceForkHeader::dataOffset 55 | uint16_t reserved; 56 | }; 57 | 58 | #pragma pack() 59 | 60 | #endif 61 | -------------------------------------------------------------------------------- /src/apm.h: -------------------------------------------------------------------------------- 1 | #ifndef APM_H 2 | #define APM_H 3 | #include 4 | 5 | #pragma pack(1) 6 | 7 | static const uint16_t BLOCK0_SIGNATURE = 0x4552; 8 | static const uint16_t DPME_SIGNATURE = 0x504D; 9 | 10 | struct DDMap 11 | { 12 | uint32_t ddBlock; 13 | uint16_t ddSize; 14 | uint16_t ddType; 15 | }; 16 | 17 | struct DPME 18 | { 19 | uint16_t dpme_signature; 20 | uint16_t dpme_reserved_1; 21 | uint32_t dpme_map_entries; 22 | uint32_t dpme_pblock_start; 23 | uint32_t dpme_pblocks; 24 | char dpme_name[32]; 25 | char dpme_type[32]; 26 | uint32_t dpme_lblock_start; 27 | uint32_t dpme_lblocks; 28 | uint32_t dpme_flags; 29 | uint32_t dpme_boot_block; 30 | uint32_t dpme_boot_bytes; 31 | uint32_t dpme_load_addr; 32 | uint32_t dpme_load_addr_2; 33 | uint32_t dpme_goto_addr; 34 | uint32_t dpme_goto_addr_2; 35 | uint32_t dpme_checksum; 36 | uint8_t dpme_process_id[16]; 37 | uint32_t dpme_reserved_2[32]; 38 | uint32_t dpme_reserved_3[62]; 39 | }; 40 | 41 | struct Block0 42 | { 43 | uint16_t sbSig; 44 | uint16_t sbBlkSize; 45 | uint32_t sbBlkCount; 46 | uint16_t sbDevType; 47 | uint16_t sbDevId; 48 | uint32_t sbDrvrData; 49 | uint16_t sbDrvrCount; 50 | DDMap sbDrvrMap[8]; 51 | uint8_t sbReserved[430]; 52 | }; 53 | 54 | #pragma pack() 55 | 56 | #endif 57 | -------------------------------------------------------------------------------- /src/exceptions.h: -------------------------------------------------------------------------------- 1 | #ifndef EXCEPTIONS_H 2 | #define EXCEPTIONS_H 3 | #include 4 | 5 | // Cannot resolve given file/directory path. 6 | class file_not_found_error : public std::runtime_error 7 | { 8 | public: 9 | using std::runtime_error::runtime_error; 10 | }; 11 | 12 | // Used for fatal errors. Indicates a bug or severe data corruption. 13 | class io_error : public std::runtime_error 14 | { 15 | public: 16 | using std::runtime_error::runtime_error; 17 | }; 18 | 19 | // Used to indicate non-existent xattr 20 | class no_data_error : public std::exception 21 | { 22 | public: 23 | virtual const char* what() const noexcept override { return "No data available"; } 24 | }; 25 | 26 | // Used when something unexpected or unknown is encountered. May also indicate data corruption. 27 | class function_not_implemented_error : public std::runtime_error 28 | { 29 | public: 30 | using std::runtime_error::runtime_error; 31 | }; 32 | 33 | 34 | // Used to indicate non-existent xattr 35 | class attribute_not_found_error : public std::exception 36 | { 37 | public: 38 | virtual const char* what() const noexcept override { return "Attribute not found (93)"; } 39 | }; 40 | 41 | // Used to indicate non-existent xattr on a directory 42 | class operation_not_permitted_error : public std::exception 43 | { 44 | public: 45 | virtual const char* what() const noexcept override { return "Operation not permitted (1)"; } 46 | }; 47 | 48 | #endif 49 | -------------------------------------------------------------------------------- /src/CacheZone.h: -------------------------------------------------------------------------------- 1 | #ifndef CACHEZONE_H 2 | #define CACHEZONE_H 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | namespace std { 13 | template struct hash> 14 | { 15 | size_t operator()(const std::pair& t) const 16 | { 17 | return std::hash()(t.first) ^ std::hash()(t.second); 18 | } 19 | }; 20 | } 21 | 22 | class CacheZone 23 | { 24 | public: 25 | CacheZone(size_t maxBlocks); 26 | 27 | enum { BLOCK_SIZE = 4096 }; 28 | 29 | void store(const std::string& vfile, uint64_t blockId, const uint8_t* data, size_t bytes); 30 | size_t get(const std::string& vfile, uint64_t blockId, uint8_t* data, size_t offset, size_t maxBytes); 31 | 32 | void setMaxBlocks(size_t max); 33 | inline size_t maxBlocks() const { return m_maxBlocks; } 34 | 35 | inline float hitRate() const { return float(m_hits) / float(m_queries); } 36 | inline size_t size() const { return m_cache.size(); } 37 | private: 38 | void evictCache(); 39 | private: 40 | typedef std::pair CacheKey; 41 | 42 | struct CacheEntry 43 | { 44 | std::list::iterator itAge; 45 | std::array data; 46 | }; 47 | 48 | typedef std::unordered_map Cache; 49 | 50 | Cache m_cache; 51 | std::list m_cacheAge; 52 | size_t m_maxBlocks; 53 | uint64_t m_queries = 0, m_hits = 0; 54 | }; 55 | 56 | 57 | 58 | #endif 59 | -------------------------------------------------------------------------------- /src/HFSHighLevelVolume.h: -------------------------------------------------------------------------------- 1 | #ifndef HFSHIGHLEVELVOLUME_H 2 | #define HFSHIGHLEVELVOLUME_H 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "HFSVolume.h" 8 | #include "HFSCatalogBTree.h" 9 | 10 | struct decmpfs_disk_header; 11 | 12 | // HFSHighLevelVolume wraps HFSVolume to provide a more high-level approach to the filesystem. 13 | // It transparently decompresses compressed files (decmpfs) and translates HFS+ file information 14 | // to well-known Unix-like types. 15 | class HFSHighLevelVolume 16 | { 17 | public: 18 | HFSHighLevelVolume(std::shared_ptr volume); 19 | 20 | inline bool isHFSX() const { return m_volume->isHFSX(); } 21 | inline uint64_t volumeSize() const { return m_volume->volumeSize(); } 22 | 23 | // See exceptions.h for the list of possible exceptions 24 | std::map listDirectory(const std::string& path); 25 | std::shared_ptr openFile(const std::string& path); 26 | struct stat stat(const std::string& path); 27 | std::vector listXattr(const std::string& path); 28 | std::vector getXattr(const std::string& path, const std::string& xattrName); 29 | private: 30 | void hfs_nativeToStat(const HFSPlusCatalogFileOrFolder& ff, struct stat* stat, bool resourceFork = false); 31 | void hfs_nativeToStat_decmpfs(const HFSPlusCatalogFileOrFolder& ff, struct stat* stat, bool resourceFork = false); 32 | decmpfs_disk_header* get_decmpfs(HFSCatalogNodeID cnid, std::vector& holder); 33 | private: 34 | std::shared_ptr m_volume; 35 | std::unique_ptr m_tree; 36 | }; 37 | 38 | #endif 39 | -------------------------------------------------------------------------------- /src/HFSBTree.h: -------------------------------------------------------------------------------- 1 | #ifndef HFSBTREE_H 2 | #define HFSBTREE_H 3 | #include "HFSVolume.h" 4 | #include "HFSFork.h" 5 | #include "hfsplus.h" 6 | #include 7 | #include 8 | #include 9 | #include "HFSBTreeNode.h" 10 | #include "CachedReader.h" 11 | #include "CacheZone.h" 12 | 13 | class HFSBTree 14 | { 15 | public: 16 | HFSBTree(std::shared_ptr fork, CacheZone* zone, const char* cacheTag); 17 | 18 | struct Key 19 | { 20 | uint16_t length; 21 | char data[]; 22 | } __attribute__((packed)); 23 | enum CompareResult 24 | { 25 | Smaller = -1, Equal = 0, Greater = 1 26 | }; 27 | 28 | // Returns true if the desiredKey >= indexKey 29 | typedef int (*KeyComparator)(const Key* indexKey, const Key* desiredKey); 30 | 31 | // Used when searching for an exact key (e.g. a specific file in a folder) 32 | std::shared_ptr findLeafNode(const Key* indexKey, KeyComparator comp, bool wildcard = false); 33 | 34 | // Sued when searching for an inexact key (e.g. when listing a folder) 35 | // Return value includes the leaf node where the comparator returns true for the first time when approaching from the right, 36 | // and all following nodes for which the comparator returns true as well. 37 | std::vector> findLeafNodes(const Key* indexKey, KeyComparator comp); 38 | 39 | protected: 40 | std::shared_ptr traverseTree(int nodeIndex, const Key* indexKey, KeyComparator comp, bool wildcard); 41 | void walkTree(int nodeIndex); 42 | protected: 43 | std::shared_ptr m_fork; 44 | std::shared_ptr m_reader; 45 | //char* m_tree; 46 | BTHeaderRec m_header; 47 | }; 48 | 49 | #endif 50 | -------------------------------------------------------------------------------- /src/stat_xlate.h: -------------------------------------------------------------------------------- 1 | #ifndef _STAT_XLATE_H 2 | #define _STAT_XLATE_H 3 | 4 | // THIS FILE IS ONLY USED UNDER DARLING. 5 | // 6 | // Explanation: 7 | // Under Darling, this FUSE module is built as a Mach-O 8 | // application against macOS header files. However, 9 | // it still links against host system's libfuse.so. 10 | // This causes problems with struct stat, the layout 11 | // of which differs between the two systems. 12 | 13 | struct linux_stat 14 | { 15 | unsigned long st_dev; 16 | unsigned long st_ino; 17 | unsigned long st_nlink; 18 | 19 | unsigned int st_mode; 20 | unsigned int st_uid; 21 | unsigned int st_gid; 22 | unsigned int __pad0; 23 | unsigned long st_rdev; 24 | unsigned long st_size; 25 | unsigned long st_blksize; 26 | unsigned long st_blocks; 27 | 28 | unsigned long st_atime_sec; 29 | unsigned long st_atime_nsec; 30 | unsigned long st_mtime_sec; 31 | unsigned long st_mtime_nsec; 32 | unsigned long st_ctime_sec; 33 | unsigned long st_ctime_nsec; 34 | long unused[3]; 35 | }; 36 | 37 | static inline void bsd_stat_to_linux_stat(const struct stat* in, struct linux_stat* out) 38 | { 39 | out->st_dev = in->st_dev; 40 | out->st_ino = in->st_ino; 41 | out->st_nlink = in->st_nlink; 42 | 43 | out->st_mode = in->st_mode; 44 | out->st_uid = in->st_uid; 45 | out->st_gid = in->st_gid; 46 | out->st_rdev = in->st_rdev; 47 | out->st_size = in->st_size; 48 | out->st_blksize = in->st_blksize; 49 | out->st_blocks = in->st_blocks; 50 | 51 | out->st_atime_sec = in->st_atime; 52 | out->st_mtime_sec = in->st_mtime; 53 | out->st_ctime_sec = in->st_ctime; 54 | out->st_atime_nsec = in->st_atimespec.tv_nsec; 55 | out->st_mtime_nsec = in->st_mtimespec.tv_nsec; 56 | out->st_ctime_nsec = in->st_ctimespec.tv_nsec; 57 | } 58 | 59 | #endif 60 | 61 | -------------------------------------------------------------------------------- /src/CacheZone.cpp: -------------------------------------------------------------------------------- 1 | #include "CacheZone.h" 2 | #include 3 | #include 4 | #include 5 | 6 | CacheZone::CacheZone(size_t maxBlocks) 7 | : m_maxBlocks(maxBlocks) 8 | { 9 | } 10 | 11 | void CacheZone::setMaxBlocks(size_t max) 12 | { 13 | m_maxBlocks = max; 14 | evictCache(); 15 | } 16 | 17 | void CacheZone::store(const std::string& vfile, uint64_t blockId, const uint8_t* data, size_t bytes) 18 | { 19 | CacheKey key = CacheKey(blockId, vfile); 20 | CacheEntry entry; 21 | std::unordered_map::iterator it; 22 | 23 | #ifdef DEBUG 24 | std::cout << "CacheZone::store(): blockId=" << blockId << ", bytes=" << bytes << std::endl; 25 | #endif 26 | 27 | std::copy(data, data+bytes, entry.data.begin()); 28 | 29 | it = m_cache.insert(m_cache.begin(), { key, entry }); 30 | m_cacheAge.push_back(key); 31 | it->second.itAge = --m_cacheAge.end(); 32 | 33 | if (m_cache.size() > m_maxBlocks) 34 | evictCache(); 35 | } 36 | 37 | size_t CacheZone::get(const std::string& vfile, uint64_t blockId, uint8_t* data, size_t offset, size_t maxBytes) 38 | { 39 | CacheKey key = CacheKey(blockId, vfile); 40 | auto it = m_cache.find(key); 41 | 42 | #ifdef DEBUG 43 | std::cout << "CacheZone::get(): blockId=" << blockId << ", offset=" << offset << ", maxBytes=" << maxBytes << std::endl; 44 | #endif 45 | 46 | m_queries++; 47 | 48 | if (it == m_cache.end()) 49 | return 0; 50 | 51 | maxBytes = std::min(it->second.data.size() - offset, maxBytes); 52 | memcpy(data, &it->second.data[offset], maxBytes); 53 | 54 | m_cacheAge.erase(it->second.itAge); 55 | m_cacheAge.push_back(key); 56 | it->second.itAge = --m_cacheAge.end(); 57 | m_hits++; 58 | 59 | return maxBytes; 60 | } 61 | 62 | void CacheZone::evictCache() 63 | { 64 | while (m_cache.size() > m_maxBlocks) 65 | { 66 | CacheKey& key = m_cacheAge.front(); 67 | m_cache.erase(key); 68 | m_cacheAge.erase(m_cacheAge.begin()); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /test/main-test-hfs+.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "FileReader.h" 3 | #include "AppleDisk.h" 4 | #include "HFSVolume.h" 5 | #include "HFSCatalogBTree.h" 6 | 7 | int main(int argc, char** argv) 8 | { 9 | FileReader* reader = new FileReader(argv[1]); 10 | AppleDisk* adisk = new AppleDisk(reader); 11 | Reader* partReader = nullptr; 12 | HFSVolume* volume; 13 | HFSCatalogBTree* rootTree; 14 | 15 | auto parts = adisk->partitions(); 16 | 17 | for (int i = 0; i < parts.size(); i++) 18 | { 19 | if (parts[i].type == "Apple_HFS") // TODO: Apple_HFSX 20 | { 21 | partReader = adisk->readerForPartition(i); 22 | break; 23 | } 24 | } 25 | 26 | if (!partReader) 27 | { 28 | std::cerr << "HFS partition not found\n"; 29 | return 1; 30 | } 31 | 32 | volume = new HFSVolume(partReader); 33 | 34 | const char* name = "/Squiggle.app/Contents"; 35 | uint64_t total, free; 36 | Reader* fileReader; 37 | char* buf; 38 | 39 | volume->usage(total, free); 40 | std::cout << "Disk size: " << total << " bytes\n"; 41 | std::cout << "Free size: " << free << " bytes\n"; 42 | 43 | rootTree = volume->rootCatalogTree(); 44 | 45 | std::map root; 46 | rootTree->listDirectory(name, root); 47 | 48 | std::cout << name << " contains " << root.size() << " elems\n"; 49 | for (auto it = root.begin(); it != root.end(); it++) 50 | { 51 | RecordType recType = it->second.file.recordType; 52 | std::cout << "* " << it->first << ' '; 53 | 54 | if (recType == RecordType::kHFSPlusFolderRecord) 55 | std::cout << "\n"; 56 | else 57 | std::cout << it->second.file.dataFork.logicalSize << std::endl; 58 | } 59 | 60 | rootTree->openFile("/Squiggle.app/Contents/Info.plist", &fileReader); 61 | buf = new char[874]; 62 | 63 | fileReader->read(buf, 873, 0); 64 | buf[873] = 0; 65 | 66 | std::cout << buf; 67 | 68 | delete [] buf; 69 | delete fileReader; 70 | 71 | delete rootTree; 72 | delete partReader; 73 | delete adisk; 74 | delete reader; 75 | 76 | return 0; 77 | } 78 | -------------------------------------------------------------------------------- /src/unichar.cpp: -------------------------------------------------------------------------------- 1 | #include "unichar.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | using icu::UnicodeString; 9 | UConverter *g_utf16be; 10 | 11 | static void InitConverter() __attribute__((constructor)); 12 | static void ExitConverter() __attribute__((destructor)); 13 | 14 | std::string UnicharToString(uint16_t length, const unichar* string) 15 | { 16 | std::string result; 17 | UErrorCode error = U_ZERO_ERROR; 18 | 19 | UnicodeString str((char*) string, length*2, g_utf16be, error); 20 | 21 | assert(U_SUCCESS(error)); 22 | str.toUTF8String(result); 23 | 24 | return result; 25 | } 26 | 27 | bool EqualNoCase(const HFSString& str1, const std::string& str2) 28 | { 29 | UErrorCode error = U_ZERO_ERROR; 30 | UnicodeString ustr = UnicodeString::fromUTF8(str2); 31 | UnicodeString ustr2 = UnicodeString((char*)str1.string, be(str1.length)*2, g_utf16be, error); 32 | 33 | assert(U_SUCCESS(error)); 34 | 35 | return ustr.caseCompare(ustr2, 0) == 0; 36 | } 37 | 38 | bool EqualCase(const HFSString& str1, const std::string& str2) 39 | { 40 | UErrorCode error = U_ZERO_ERROR; 41 | UnicodeString ustr = UnicodeString::fromUTF8(str2); 42 | UnicodeString ustr2 = UnicodeString((char*)str1.string, be(str1.length)*2, g_utf16be, error); 43 | 44 | assert(U_SUCCESS(error)); 45 | 46 | return ustr == ustr2; 47 | } 48 | 49 | uint16_t StringToUnichar(const std::string& in, unichar* out, size_t maxLength) 50 | { 51 | UErrorCode error = U_ZERO_ERROR; 52 | UnicodeString str = UnicodeString::fromUTF8(in); 53 | auto bytes = str.extract((char*) out, maxLength*sizeof(unichar), g_utf16be, error); 54 | 55 | assert(U_SUCCESS(error)); 56 | 57 | return bytes / sizeof(unichar); 58 | } 59 | 60 | void InitConverter() 61 | { 62 | UErrorCode error = U_ZERO_ERROR; 63 | g_utf16be = ucnv_open("UTF-16BE", &error); 64 | 65 | assert(U_SUCCESS(error)); 66 | } 67 | 68 | void ExitConverter() 69 | { 70 | ucnv_close(g_utf16be); 71 | } 72 | -------------------------------------------------------------------------------- /src/HFSCatalogBTree.h: -------------------------------------------------------------------------------- 1 | #ifndef HFSCATALOGBTREE_H 2 | #define HFSCATALOGBTREE_H 3 | #include "HFSBTree.h" 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "hfsplus.h" 9 | #include "HFSBTreeNode.h" 10 | #include "CacheZone.h" 11 | #include 12 | 13 | class HFSCatalogBTree : protected HFSBTree 14 | { 15 | public: 16 | // using HFSBTree::HFSBTree; 17 | HFSCatalogBTree(std::shared_ptr fork, HFSVolume* volume, CacheZone* zone); 18 | 19 | int listDirectory(const std::string& path, std::map>& contents); 20 | 21 | std::shared_ptr findHFSPlusCatalogFileOrFolderForParentIdAndName(HFSCatalogNodeID parentID, const std::string &elem); 22 | 23 | 24 | int stat(std::string path, HFSPlusCatalogFileOrFolder* s); 25 | int openFile(const std::string& path, std::shared_ptr& forkOut, bool resourceFork = false); 26 | 27 | bool isCaseSensitive() const; 28 | 29 | // Debug only 30 | void dumpTree() const; 31 | 32 | static time_t appleToUnixTime(uint32_t apple); 33 | protected: 34 | std::string readSymlink(HFSPlusCatalogFile* file); 35 | 36 | private: 37 | void appendNameAndHFSPlusCatalogFileOrFolderFromLeafForParentId(std::shared_ptr leafNodePtr, HFSCatalogNodeID cnid, std::map>& map); 38 | void appendNameAndHFSPlusCatalogFileOrFolderFromLeafForParentIdAndName(std::shared_ptr leafNodePtr, HFSCatalogNodeID cnid, const std::string& name, std::map>& map); 39 | 40 | static int caseInsensitiveComparator(const Key* indexKey, const Key* desiredKey); 41 | static int caseSensitiveComparator(const Key* indexKey, const Key* desiredKey); 42 | static int idOnlyComparator(const Key* indexKey, const Key* desiredKey); 43 | static void fixEndian(HFSPlusCatalogFileOrFolder& ff); 44 | static void replaceChars(std::string& str, char oldChar, char newChar); 45 | 46 | void dumpTree(int nodeIndex, int depth) const; 47 | private: 48 | HFSVolume* m_volume; 49 | HFSCatalogNodeID m_hardLinkDirID; 50 | }; 51 | 52 | #endif 53 | 54 | -------------------------------------------------------------------------------- /src/HFSExtentsOverflowBTree.cpp: -------------------------------------------------------------------------------- 1 | #include "HFSExtentsOverflowBTree.h" 2 | #include "be.h" 3 | #include "exceptions.h" 4 | #include 5 | 6 | HFSExtentsOverflowBTree::HFSExtentsOverflowBTree(std::shared_ptr fork, CacheZone* zone) 7 | : HFSBTree(fork, zone, "ExtentsOverflow") 8 | { 9 | } 10 | 11 | void HFSExtentsOverflowBTree::findExtentsForFile(HFSCatalogNodeID cnid, bool resourceFork, uint32_t startBlock, std::vector& extraExtents) 12 | { 13 | HFSPlusExtentKey key; 14 | std::vector> leaves; 15 | bool first = true; 16 | 17 | key.forkType = resourceFork ? 0xff : 0; 18 | key.fileID = htobe32(cnid); 19 | 20 | leaves = findLeafNodes((Key*) &key, cnidComparator); 21 | 22 | for (std::shared_ptr leafPtr : leaves) 23 | { 24 | HFSBTreeNode& leaf = *leafPtr; 25 | for (int i = 0; i < leaf.recordCount(); i++) 26 | { 27 | HFSPlusExtentKey* recordKey = leaf.getRecordKey(i); 28 | HFSPlusExtentDescriptor* extents; 29 | 30 | if (recordKey->forkType != key.forkType || recordKey->fileID != key.fileID) 31 | continue; 32 | 33 | //std::cout << "Examining extra extents from startBlock " << be(recordKey->startBlock) << std::endl; 34 | if (be(recordKey->startBlock) < startBlock) // skip descriptors already contained in the extents file 35 | continue; 36 | 37 | if (first) 38 | { 39 | if (be(recordKey->startBlock) != startBlock) 40 | throw io_error("Unexpected startBlock value"); 41 | first = false; 42 | } 43 | 44 | extents = leaf.getRecordData(i); 45 | 46 | // up to 8 extent descriptors per record 47 | for (int x = 0; x < 8; x++) 48 | { 49 | if (!extents[x].blockCount) 50 | { 51 | //std::cout << "Extent #" << x << " has zero blockCount\n"; 52 | break; 53 | } 54 | 55 | extraExtents.push_back(HFSPlusExtentDescriptor{ be(extents[x].startBlock), be(extents[x].blockCount) }); 56 | } 57 | } 58 | } 59 | } 60 | 61 | int HFSExtentsOverflowBTree::cnidComparator(const Key* indexKey, const Key* desiredKey) 62 | { 63 | const HFSPlusExtentKey* indexExtentKey = reinterpret_cast(indexKey); 64 | const HFSPlusExtentKey* desiredExtentKey = reinterpret_cast(desiredKey); 65 | 66 | if (indexExtentKey->forkType > desiredExtentKey->forkType) 67 | return 1; 68 | else if (indexExtentKey->forkType < desiredExtentKey->forkType) 69 | return -1; 70 | else 71 | { 72 | if (be(indexExtentKey->fileID) > be(desiredExtentKey->fileID)) 73 | return 1; 74 | else if (be(indexExtentKey->fileID) < be(desiredExtentKey->fileID)) 75 | return -1; 76 | else 77 | return 0; 78 | } 79 | } 80 | 81 | -------------------------------------------------------------------------------- /src/GPTDisk.cpp: -------------------------------------------------------------------------------- 1 | #include "GPTDisk.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "gpt.h" 7 | #include "SubReader.h" 8 | #include "exceptions.h" 9 | 10 | GPTDisk::GPTDisk(std::shared_ptr reader) 11 | : m_reader(reader) 12 | { 13 | loadPartitions(nullptr); 14 | } 15 | 16 | GPTDisk::GPTDisk(std::shared_ptr protectiveMBR, std::shared_ptr partitionTable) 17 | : m_reader(nullptr) 18 | { 19 | if (!isGPTDisk(protectiveMBR)) 20 | throw io_error("Not a GPT disk!"); 21 | loadPartitions(partitionTable); 22 | } 23 | 24 | bool GPTDisk::isGPTDisk(std::shared_ptr reader) 25 | { 26 | ProtectiveMBR mbr; 27 | if (reader->read(&mbr, sizeof(mbr), 0) != sizeof(mbr)) 28 | return false; 29 | 30 | if (mbr.signature != MBR_SIGNATURE) 31 | return false; 32 | if (mbr.partitions[0].type != MPT_GPT_FAKE_TYPE) 33 | return false; 34 | 35 | return true; 36 | } 37 | 38 | std::string GPTDisk::makeGUID(const GPT_GUID& guid) 39 | { 40 | std::stringstream ss; 41 | int pos = 0; 42 | 43 | ss << std::hex << std::uppercase; 44 | ss << std::setw(8) << std::setfill('0') << guid.data1; 45 | 46 | ss << '-'; 47 | ss << std::setw(4) << std::setfill('0') << guid.data2 << '-' << guid.data3 << '-'; 48 | 49 | for (int i = 0; i < 8; i++) 50 | { 51 | ss << std::setw(2) << std::setfill('0') << uint32_t(guid.data4[i]); 52 | if (i == 1) 53 | ss << '-'; 54 | } 55 | 56 | return ss.str(); 57 | } 58 | 59 | void GPTDisk::loadPartitions(std::shared_ptr table) 60 | { 61 | uint64_t offset; 62 | int32_t rd; 63 | GPTPartition part[128]; 64 | 65 | if (table) 66 | offset = 0; 67 | else 68 | { 69 | offset = 2*512; 70 | table = m_reader; 71 | } 72 | 73 | rd = table->read(part, sizeof(part), offset); 74 | 75 | for (int i = 0; i < rd / sizeof(GPTPartition); i++) 76 | { 77 | Partition p; 78 | char name[37]; 79 | std::string typeGUID = makeGUID(part[i].typeGUID); 80 | 81 | memset(name, 0, sizeof(name)); 82 | for (int j = 0; j < 36; j++) 83 | name[j] = char(part[i].name[j]); 84 | 85 | p.name = name; 86 | p.size = (part[i].lastLBA - part[i].firstLBA + 1) * 512; 87 | p.offset = part[i].firstLBA * 512; 88 | 89 | if (typeGUID == GUID_EMPTY) 90 | p.type = "Apple_Free"; 91 | else if (typeGUID == GUID_HFSPLUS) 92 | p.type = "Apple_HFS"; 93 | else 94 | p.type = typeGUID; 95 | 96 | m_partitions.push_back(p); 97 | } 98 | } 99 | 100 | std::shared_ptr GPTDisk::readerForPartition(int index) 101 | { 102 | const Partition& part = m_partitions.at(index); 103 | return std::shared_ptr(new SubReader(m_reader, part.offset, part.size)); 104 | } 105 | 106 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # darling-dmg 2 | 3 | This project allows ordinary users to directly mount OS X disk images under Linux via FUSE. darling-dmg is part of Darling - http://www.darlinghq.org 4 | 5 | Without darling-dmg, the only way to do this would be to manually extract the DMG file, become root and mount the HFS+ filesystem as root. This is slow, wasteful and may even crash your system. The project's author has seen the Linux HFS+ implementation cause kernel crashes. 6 | 7 | ## Supported file types 8 | 9 | * DMG (UDIF) files containing an Apple Disk Image. 10 | * Apple Disk Images containing an HFS+/HFSX file system. 11 | * HFS+/HFSX file systems (incl. file systems embedded within HFS). 12 | 13 | This means, darling-dmg can mount DMG files or unpacked DMG files or a single partition carved out of the latter. 14 | 15 | Read only access only. 16 | 17 | ## Build Requirements 18 | 19 | | Dependency | Required version | Notes | 20 | |------------|----------------------|------------------------------------| 21 | | GCC/Clang | >5 (GCC), >3 (Clang) | Compiler with C++11 support | 22 | | CMake | 3.10 | Build system | 23 | | pkg-config | | Library-agnostic package detection | 24 | | OpenSSL | | Base64 decoding | 25 | | Bzip2 | | Decompression | 26 | | Zlib | | Decompression | 27 | | FUSE | 2.x (not 3.x) | Userspace filesystem support | 28 | | libicu | | Unicode support | 29 | | libxml2 | | XML (property list) parsing | 30 | 31 | `darling-dmg` requires a C++11-capable compiler, CMake >3.10 and `make` alongside the remaining dependencies mentioned above. Below are common ways to install library dependencies. 32 | 33 | On Fedora (and derivatives): 34 | 35 | ```bash 36 | sudo dnf install fuse-devel bzip2-devel libicu-devel libxml2-devel openssl-devel zlib-devel pkgconf 37 | ``` 38 | 39 | On Debian (and derivatives): 40 | 41 | ```bash 42 | sudo apt-get install libfuse-dev libbz2-dev libicu-dev libxml2-dev libssl-dev libz-dev pkg-config 43 | ``` 44 | 45 | On Alpine Linux: 46 | 47 | ```bash 48 | sudo apk add fuse-dev bzip2-dev icu-dev libxml2-dev openssl-dev zlib-dev pkgconf 49 | ``` 50 | 51 | ## Usage 52 | 53 | ``` 54 | darling-dmg [FUSE arguments] 55 | ``` 56 | 57 | ### Accessing resource forks 58 | 59 | Resource forks are available via xattrs (extended attributes) or preferably under the name ````/original/filename#..namedfork#rsrc````. 60 | 61 | ### Reusability 62 | 63 | Some people have had success with using darling-dmg as a library for their own use. 64 | 65 | -------------------------------------------------------------------------------- /src/AppleDisk.cpp: -------------------------------------------------------------------------------- 1 | #include "AppleDisk.h" 2 | #include 3 | #include "be.h" 4 | #include "exceptions.h" 5 | #include 6 | #include "SubReader.h" 7 | 8 | AppleDisk::AppleDisk(std::shared_ptr reader) 9 | : m_reader(reader) 10 | { 11 | load(nullptr); 12 | } 13 | 14 | AppleDisk::AppleDisk(std::shared_ptr readerBlock0, std::shared_ptr readerPM) 15 | : m_reader(readerBlock0) 16 | { 17 | load(readerPM); 18 | } 19 | 20 | void AppleDisk::load(std::shared_ptr readerPM) 21 | { 22 | size_t blockSize; 23 | 24 | m_reader->read(&m_block0, sizeof(m_block0), 0); 25 | 26 | if (be(m_block0.sbSig) != BLOCK0_SIGNATURE) 27 | throw io_error("Invalid block0 signature"); 28 | 29 | blockSize = be(m_block0.sbBlkSize); 30 | 31 | if (!blockSize) 32 | { 33 | int lastOK = -1; 34 | blockSize = 512; 35 | 36 | for (int i = 0; i < 63; i++) 37 | { 38 | DPME dpme; 39 | uint64_t offset; 40 | 41 | if (!readerPM) 42 | offset = (i+1)*blockSize; 43 | else 44 | offset = i*blockSize; 45 | 46 | if (!readerPM) 47 | m_reader->read(&dpme, sizeof(dpme), offset); 48 | else 49 | readerPM->read(&dpme, sizeof(dpme), offset); 50 | 51 | if (be(dpme.dpme_signature) != DPME_SIGNATURE) 52 | continue; 53 | 54 | if (lastOK != i-1) 55 | { 56 | blockSize *= i - lastOK; 57 | break; 58 | } 59 | lastOK = i; 60 | } 61 | } 62 | 63 | #ifdef DEBUG 64 | std::cout << "Block size: " << blockSize << std::endl; 65 | #endif 66 | 67 | for (int i = 0; i < 63; i++) 68 | { 69 | DPME dpme; 70 | uint64_t offset; 71 | Partition part; 72 | 73 | if (!readerPM) 74 | offset = (i+1)*blockSize; 75 | else 76 | offset = i*blockSize; 77 | 78 | if (!readerPM) 79 | { 80 | if (m_reader->read(&dpme, sizeof(dpme), offset) != sizeof(dpme)) 81 | break; 82 | } 83 | else 84 | { 85 | if (readerPM->read(&dpme, sizeof(dpme), offset) != sizeof(dpme)) 86 | break; 87 | } 88 | 89 | if (be(dpme.dpme_signature) != DPME_SIGNATURE) 90 | continue; 91 | 92 | #ifdef DEBUG 93 | std::cout << "Partition #" << (i+1) << " type: " << dpme.dpme_type << std::endl; 94 | #endif 95 | part.name = dpme.dpme_name; 96 | part.type = dpme.dpme_type; 97 | part.offset = uint64_t(be(dpme.dpme_pblock_start)) * blockSize; 98 | part.size = uint64_t(be(dpme.dpme_pblocks)) * blockSize; 99 | 100 | #ifdef DEBUG 101 | std::cout << "\tBlock start: " << uint64_t(be(dpme.dpme_pblock_start)) << std::endl; 102 | #endif 103 | 104 | m_partitions.push_back(part); 105 | } 106 | } 107 | 108 | bool AppleDisk::isAppleDisk(std::shared_ptr reader) 109 | { 110 | decltype(Block0::sbSig) sig = 0; 111 | reader->read(&sig, sizeof(sig), 0); 112 | return be(sig) == BLOCK0_SIGNATURE; 113 | } 114 | 115 | std::shared_ptr AppleDisk::readerForPartition(int index) 116 | { 117 | const Partition& part = m_partitions.at(index); 118 | return std::shared_ptr(new SubReader(m_reader, part.offset, part.size)); 119 | } 120 | -------------------------------------------------------------------------------- /src/ResourceFork.cpp: -------------------------------------------------------------------------------- 1 | #include "ResourceFork.h" 2 | #include 3 | #include "be.h" 4 | #include 5 | #include 6 | #include "SubReader.h" 7 | 8 | ResourceFork::ResourceFork(std::shared_ptr reader) 9 | : m_reader(reader) 10 | { 11 | loadResources(); 12 | } 13 | 14 | inline bool operator<(const ResourceFork::Resource& t, const ResourceFork::Resource& that) 15 | { 16 | if (t.type < that.type) 17 | return true; 18 | else if (t.type > that.type) 19 | return false; 20 | else 21 | return (t.id < that.id) ? true : false; 22 | } 23 | 24 | void ResourceFork::loadResources() 25 | { 26 | HFSResourceForkHeader header; 27 | HFSResourceMapHeader mapHeader; 28 | HFSResourceList listHeader; 29 | 30 | if (m_reader->read(&header, sizeof(header), 0) != sizeof(header)) 31 | throw std::runtime_error("Short read of resource fork header"); 32 | 33 | header.dataOffset = be(header.dataOffset); 34 | header.mapOffset = be(header.mapOffset); 35 | header.dataLength = be(header.dataLength); 36 | header.mapLength = be(header.mapLength); 37 | 38 | if (m_reader->read(&mapHeader, sizeof(mapHeader), header.mapOffset) != sizeof(mapHeader)) 39 | throw std::runtime_error("Short read of resource fork map header"); 40 | 41 | mapHeader.listOffset = be(mapHeader.listOffset); 42 | 43 | if (m_reader->read(&listHeader, sizeof(listHeader), header.mapOffset + mapHeader.listOffset) != sizeof(listHeader)) 44 | throw std::runtime_error("Short read of resource fork map list"); 45 | 46 | listHeader.count = be(listHeader.count); 47 | 48 | int pos = header.mapOffset + mapHeader.listOffset + offsetof(HFSResourceList, items); 49 | for (int i = 0; i < listHeader.count+1; i++) 50 | { 51 | HFSResourceListItem item; 52 | std::unique_ptr ptrs; 53 | const int offset = pos + sizeof(item)*i; 54 | 55 | if (m_reader->read(&item, sizeof(item), offset) != sizeof(item)) 56 | throw std::runtime_error("Short read of an HFSResourceListItem"); 57 | 58 | item.type = be(item.type); 59 | item.count = be(item.count); 60 | item.offset = be(item.offset); 61 | 62 | ptrs.reset(new HFSResourcePointer[item.count+1]); 63 | 64 | if (m_reader->read(ptrs.get(), sizeof(HFSResourcePointer) * (item.count+1), offset + item.offset) != sizeof(HFSResourcePointer) * (item.count+1)) 65 | throw std::runtime_error("Short read of HFSResourcePointers"); 66 | 67 | for (int j = 0; j < item.count+1; j++) 68 | { 69 | HFSResourceHeader hdr; 70 | Resource res = { item.type, be(ptrs[j].resourceId) }; 71 | ResourceLocation loc; 72 | 73 | loc.offset = header.dataOffset + be(ptrs[j].dataOffset); 74 | 75 | if (m_reader->read(&hdr, sizeof(hdr), loc.offset) != sizeof(hdr)) 76 | throw std::runtime_error("Short read of HFSResourceHeader"); 77 | 78 | loc.offset += offsetof(HFSResourceHeader, data); 79 | loc.length = be(hdr.length); 80 | 81 | m_resources.insert({ res, loc }); 82 | } 83 | } 84 | } 85 | 86 | std::shared_ptr ResourceFork::getResource(uint32_t resourceType, uint16_t id) 87 | { 88 | Resource res = { resourceType, id }; 89 | auto it = m_resources.find(res); 90 | 91 | if (it == m_resources.end()) 92 | return nullptr; 93 | else 94 | return std::shared_ptr(new SubReader(m_reader, it->second.offset, it->second.length)); 95 | } 96 | -------------------------------------------------------------------------------- /test/CacheTest.cpp: -------------------------------------------------------------------------------- 1 | #include "../src/CacheZone.h" 2 | #include "../src/CachedReader.h" 3 | #include "../src/MemoryReader.h" 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "CacheTest.h" 9 | 10 | #define BOOST_TEST_MODULE CacheTest 11 | #include 12 | 13 | static void generateRandomData(std::vector& testData); 14 | 15 | BOOST_AUTO_TEST_CASE(CacheTest) 16 | { 17 | std::shared_ptr memoryReader; 18 | std::unique_ptr cachedReader; 19 | std::vector testData; 20 | CacheZone zone(50); 21 | 22 | // Generate 20000 bytes of random data 23 | generateRandomData(testData); 24 | 25 | memoryReader.reset(new MyMemoryReader(&testData[0], testData.size())); 26 | 27 | // Specify a few optimal boundaries 28 | memoryReader->setOptimalBoundaries({ 4096, 3*4096 }); 29 | 30 | cachedReader.reset(new CachedReader(memoryReader, &zone, "MyMemoryReader")); 31 | 32 | // Read all the data via cachedReader, 500 bytes at a time 33 | for (int i = 0; i < 20000; i += 500) 34 | { 35 | std::array buf; 36 | 37 | cachedReader->read(buf.begin(), buf.size(), i); 38 | 39 | // Verify data integrity 40 | //for (int j = 0; j < 500; j++) 41 | //{ 42 | // if (buf[j] != testData[i+j]) 43 | // std::cout << "Mismatch at byte " << j << std::endl; 44 | //} 45 | BOOST_CHECK(std::equal(buf.begin(), buf.end(), &testData[i])); 46 | } 47 | 48 | // The cache should contain 5 blocks, up to 4096 bytes each 49 | BOOST_CHECK_EQUAL(zone.size(), 5); 50 | } 51 | 52 | static void generateRandomData(std::vector& randomData) 53 | { 54 | std::random_device rd; 55 | std::mt19937 gen(rd()); 56 | std::uniform_int_distribution<> dis(0, 255); 57 | 58 | randomData.reserve(20000); 59 | 60 | for (int i = 0; i < 20000; i++) 61 | randomData.push_back(dis(gen)); 62 | } 63 | 64 | void MyMemoryReader::setOptimalBoundaries(std::initializer_list bd) 65 | { 66 | m_optimalBoundaries.assign(bd); 67 | } 68 | 69 | void MyMemoryReader::adviseOptimalBlock(uint64_t offset, uint64_t& blockStart, uint64_t& blockEnd) 70 | { 71 | if (m_optimalBoundaries.empty()) 72 | return MemoryReader::adviseOptimalBlock(offset, blockStart, blockEnd); 73 | 74 | auto it = std::upper_bound(m_optimalBoundaries.begin(), m_optimalBoundaries.end(), offset); 75 | 76 | if (it == m_optimalBoundaries.end()) 77 | { 78 | blockStart = m_optimalBoundaries.back(); 79 | blockEnd = length(); 80 | } 81 | else 82 | { 83 | blockEnd = *it; 84 | 85 | if (it != m_optimalBoundaries.begin()) 86 | { 87 | it--; 88 | blockStart = *it; 89 | } 90 | else 91 | blockStart = 0; 92 | } 93 | } 94 | 95 | int32_t MyMemoryReader::read(void* buf, int32_t count, uint64_t offset) 96 | { 97 | // Check that this read starts and ends on an optimal boundary 98 | bool startsOnOptimal, endsOnOptimal; 99 | 100 | std::cout << "Read: offset=" << offset << ", count=" << count << std::endl; 101 | 102 | startsOnOptimal = offset == 0; 103 | startsOnOptimal |= std::find(m_optimalBoundaries.begin(), m_optimalBoundaries.end(), offset) != m_optimalBoundaries.end(); 104 | 105 | BOOST_CHECK(startsOnOptimal); 106 | 107 | endsOnOptimal = (offset+count) == length(); 108 | endsOnOptimal |= std::find(m_optimalBoundaries.begin(), m_optimalBoundaries.end(), offset+count) != m_optimalBoundaries.end(); 109 | 110 | BOOST_CHECK(endsOnOptimal); 111 | 112 | return MemoryReader::read(buf, count, offset); 113 | } 114 | -------------------------------------------------------------------------------- /src/HFSVolume.cpp: -------------------------------------------------------------------------------- 1 | #include "HFSVolume.h" 2 | #include 3 | #include "be.h" 4 | #include "HFSFork.h" 5 | #include "HFSCatalogBTree.h" 6 | #include "HFSExtentsOverflowBTree.h" 7 | #include "HFSAttributeBTree.h" 8 | #include "SubReader.h" 9 | #include "exceptions.h" 10 | 11 | HFSVolume::HFSVolume(std::shared_ptr reader) 12 | : m_reader(reader), m_embeddedReader(nullptr), m_overflowExtents(nullptr), m_attributes(nullptr), 13 | m_fileZone(6400), m_btreeZone(6400) 14 | { 15 | static_assert(sizeof(HFSPlusVolumeHeader) >= sizeof(HFSMasterDirectoryBlock), "Bad read is about to happen"); 16 | 17 | if (m_reader->read(&m_header, sizeof(m_header), 1024) != sizeof(m_header)) 18 | throw io_error("Cannot read volume header"); 19 | 20 | if (be(m_header.signature) == HFS_SIGNATURE) 21 | { 22 | HFSMasterDirectoryBlock* block = reinterpret_cast(&m_header); 23 | processEmbeddedHFSPlus(block); 24 | } 25 | 26 | if (be(m_header.signature) != HFSP_SIGNATURE && be(m_header.signature) != HFSX_SIGNATURE) 27 | throw io_error("Invalid HFS+/HFSX signature"); 28 | 29 | std::shared_ptr fork (new HFSFork(this, m_header.extentsFile)); 30 | m_overflowExtents = new HFSExtentsOverflowBTree(fork, &m_btreeZone); 31 | 32 | if (m_header.attributesFile.logicalSize != 0) 33 | { 34 | fork.reset(new HFSFork(this, m_header.attributesFile, kHFSAttributesFileID)); 35 | m_attributes = new HFSAttributeBTree(fork, &m_btreeZone); 36 | } 37 | } 38 | 39 | HFSVolume::~HFSVolume() 40 | { 41 | delete m_attributes; 42 | delete m_overflowExtents; 43 | //delete m_embeddedReader; 44 | } 45 | 46 | void HFSVolume::processEmbeddedHFSPlus(HFSMasterDirectoryBlock* block) 47 | { 48 | uint32_t blockSize = be(block->drAlBlkSiz); 49 | uint64_t offset, length; 50 | 51 | if (be(block->drEmbedSigWord) != HFSP_SIGNATURE && be(block->drEmbedSigWord) != HFSX_SIGNATURE) 52 | throw function_not_implemented_error("Original HFS is not supported"); 53 | 54 | offset = blockSize * be(block->drEmbedExtent.startBlock) + 512 * be(block->drAlBlSt); 55 | length = blockSize * be(block->drEmbedExtent.blockCount); 56 | 57 | #ifdef DEBUG 58 | std::cout << "HFS+ partition is embedded at offset: " << offset << ", length: " << length << std::endl; 59 | #endif 60 | 61 | m_embeddedReader.reset(new SubReader(m_reader, offset, length)); 62 | m_reader = m_embeddedReader; 63 | 64 | m_reader->read(&m_header, sizeof(m_header), 1024); 65 | } 66 | 67 | bool HFSVolume::isHFSPlus(std::shared_ptr reader) 68 | { 69 | HFSPlusVolumeHeader header; 70 | if (reader->read(&header, sizeof(header), 1024) != sizeof(header)) 71 | return false; 72 | 73 | if (be(header.signature) == HFS_SIGNATURE) 74 | { 75 | HFSMasterDirectoryBlock* block = reinterpret_cast(&header); 76 | return be(block->drEmbedSigWord) == HFSP_SIGNATURE || be(block->drEmbedSigWord) == HFSX_SIGNATURE; 77 | } 78 | 79 | return be(header.signature) == HFSP_SIGNATURE || be(header.signature) == HFSX_SIGNATURE; 80 | } 81 | 82 | bool HFSVolume::isHFSX() const 83 | { 84 | return be(m_header.signature) == HFSX_SIGNATURE; 85 | } 86 | 87 | void HFSVolume::usage(uint64_t& totalBytes, uint64_t& freeBytes) const 88 | { 89 | totalBytes = be(m_header.blockSize) * be(m_header.totalBlocks); 90 | freeBytes = be(m_header.blockSize) * be(m_header.freeBlocks); 91 | } 92 | 93 | HFSCatalogBTree* HFSVolume::rootCatalogTree() 94 | { 95 | std::shared_ptr fork (new HFSFork(this, m_header.catalogFile, kHFSCatalogFileID)); 96 | HFSCatalogBTree* btree = new HFSCatalogBTree(fork, this, &m_btreeZone); 97 | 98 | return btree; 99 | } 100 | 101 | 102 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.13) 2 | 3 | project(darling-dmg CXX) 4 | 5 | set(dmg_sources 6 | src/unichar.cpp 7 | src/Reader.cpp 8 | src/FileReader.cpp 9 | src/HFSVolume.cpp 10 | src/AppleDisk.cpp 11 | src/SubReader.cpp 12 | src/HFSBTree.cpp 13 | src/HFSFork.cpp 14 | src/HFSCatalogBTree.cpp 15 | src/HFSExtentsOverflowBTree.cpp 16 | src/HFSAttributeBTree.cpp 17 | 18 | src/DMGDisk.cpp 19 | src/DMGPartition.cpp 20 | src/DMGDecompressor.cpp 21 | src/adc.cpp 22 | src/HFSZlibReader.cpp 23 | src/MemoryReader.cpp 24 | 25 | src/GPTDisk.cpp 26 | 27 | src/MacBinary.cpp 28 | src/ResourceFork.cpp 29 | src/CacheZone.cpp 30 | src/CachedReader.cpp 31 | 32 | src/HFSHighLevelVolume.cpp 33 | ) 34 | 35 | 36 | # This part of CMakeLists.txt is for when building darling-dmg 37 | # as a standalone FUSE module outside of Darling. 38 | if (NOT DARLING) 39 | 40 | set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} 41 | "${CMAKE_CURRENT_SOURCE_DIR}/cmake_modules/") 42 | 43 | add_definitions(-D_FILE_OFFSET_BITS=64) 44 | SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ggdb -O0") 45 | 46 | include(FindLibXml2) 47 | 48 | find_package(LibXml2 REQUIRED) 49 | include_directories(${LIBXML2_INCLUDE_DIR}) 50 | 51 | if (WITH_TESTS) 52 | enable_testing() 53 | find_package(Boost COMPONENTS unit_test_framework REQUIRED) 54 | 55 | add_definitions(-DBOOST_TEST_DYN_LINK ) # -DDEBUG 56 | 57 | set(CacheTest_SRC 58 | test/CacheTest.cpp 59 | src/CacheZone.cpp 60 | src/CachedReader.cpp 61 | src/Reader.cpp 62 | src/MemoryReader.cpp 63 | ) 64 | 65 | add_executable(CacheTest ${CacheTest_SRC}) 66 | target_link_libraries(CacheTest ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY}) 67 | add_test(NAME CacheTest COMMAND CacheTest) 68 | endif (WITH_TESTS) 69 | 70 | add_library(dmg SHARED 71 | src/unichar.cpp 72 | src/Reader.cpp 73 | src/FileReader.cpp 74 | src/HFSVolume.cpp 75 | src/AppleDisk.cpp 76 | src/SubReader.cpp 77 | src/HFSBTree.cpp 78 | src/HFSFork.cpp 79 | src/HFSCatalogBTree.cpp 80 | src/HFSExtentsOverflowBTree.cpp 81 | src/HFSAttributeBTree.cpp 82 | 83 | src/DMGDisk.cpp 84 | src/DMGPartition.cpp 85 | src/DMGDecompressor.cpp 86 | src/adc.cpp 87 | src/HFSZlibReader.cpp 88 | src/MemoryReader.cpp 89 | 90 | src/GPTDisk.cpp 91 | 92 | src/MacBinary.cpp 93 | src/ResourceFork.cpp 94 | src/CacheZone.cpp 95 | src/CachedReader.cpp 96 | 97 | src/HFSHighLevelVolume.cpp 98 | ) 99 | target_compile_features(dmg PRIVATE cxx_std_11) 100 | target_link_libraries(dmg -licuuc -lcrypto -lz -lbz2 ${LIBXML2_LIBRARY}) 101 | install(TARGETS dmg DESTINATION lib) 102 | 103 | add_executable(darling-dmg 104 | src/main-fuse.cpp 105 | ) 106 | 107 | SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib") 108 | SET(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE) 109 | SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) 110 | 111 | target_link_libraries(darling-dmg -lfuse dmg) 112 | install(TARGETS darling-dmg RUNTIME DESTINATION bin) 113 | 114 | # This is used in Darling build. 115 | else (NOT DARLING) 116 | include(wrap_elf) 117 | include(darling_exe) 118 | include(FindPkgConfig) 119 | 120 | add_definitions(-D_FILE_OFFSET_BITS=64 -DCOMPILE_WITH_LZFSE=1) 121 | 122 | pkg_check_modules(FUSE REQUIRED fuse) 123 | 124 | include_directories( 125 | ${FUSE_INCLUDE_DIRS} 126 | ) 127 | 128 | SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -nostdinc") 129 | 130 | wrap_elf(fuse libfuse.so) 131 | 132 | add_darling_executable(hdiutil 133 | ${dmg_sources} 134 | src/main-hdiutil.cpp 135 | ) 136 | 137 | target_link_libraries(hdiutil fuse icucore z bz2 crypto44 xml2 iconv lzfse) 138 | install(TARGETS hdiutil DESTINATION libexec/darling/usr/bin) 139 | 140 | endif (NOT DARLING) 141 | -------------------------------------------------------------------------------- /src/be.h: -------------------------------------------------------------------------------- 1 | #ifndef BENDIAN_H 2 | #define BENDIAN_H 3 | #include 4 | #ifdef __FreeBSD__ 5 | #include 6 | #elif defined(__APPLE__) 7 | #include 8 | #include 9 | 10 | #if TARGET_RT_LITTLE_ENDIAN 11 | #define __BYTE_ORDER __LITTLE_ENDIAN 12 | 13 | #define be16toh(x) OSSwapInt16(x) 14 | #define be32toh(x) OSSwapInt32(x) 15 | #define be64toh(x) OSSwapInt64(x) 16 | 17 | #define htobe16(x) OSSwapInt16(x) 18 | #define htobe32(x) OSSwapInt32(x) 19 | #define htobe64(x) OSSwapInt64(x) 20 | 21 | #define le16toh(x) (x) 22 | #define le32toh(x) (x) 23 | #define le64toh(x) (x) 24 | 25 | #else 26 | #define __BYTE_ORDER __BIG_ENDIAN 27 | 28 | #define le16toh(x) OSSwapInt16(x) 29 | #define le32toh(x) OSSwapInt32(x) 30 | #define le64toh(x) OSSwapInt64(x) 31 | 32 | #define htobe16(x) (x) 33 | #define htobe32(x) (x) 34 | #define htobe64(x) (x) 35 | 36 | #define be16toh(x) (x) 37 | #define be32toh(x) (x) 38 | #define be64toh(x) (x) 39 | 40 | #endif 41 | 42 | #elif defined(_WIN32) 43 | static uint16_t htobe16(uint16_t x) { 44 | union { uint16_t u16; uint8_t v[2]; } ret; 45 | ret.v[0] = (uint8_t)(x >> 8); 46 | ret.v[1] = (uint8_t)x; 47 | return ret.u16; 48 | } 49 | 50 | static uint32_t htobe32(uint32_t x) { 51 | union { uint32_t u32; uint8_t v[4]; } ret; 52 | ret.v[0] = (uint8_t)(x >> 24); 53 | ret.v[1] = (uint8_t)(x >> 16); 54 | ret.v[2] = (uint8_t)(x >> 8); 55 | ret.v[3] = (uint8_t)x; 56 | return ret.u32; 57 | } 58 | 59 | static uint64_t htobe64(uint64_t x) { 60 | union { uint64_t u64; uint8_t v[8]; } ret; 61 | ret.v[0] = (uint8_t)(x >> 56); 62 | ret.v[1] = (uint8_t)(x >> 48); 63 | ret.v[2] = (uint8_t)(x >> 40); 64 | ret.v[3] = (uint8_t)(x >> 32); 65 | ret.v[4] = (uint8_t)(x >> 24); 66 | ret.v[5] = (uint8_t)(x >> 16); 67 | ret.v[6] = (uint8_t)(x >> 8); 68 | ret.v[7] = (uint8_t)x; 69 | return ret.u64; 70 | } 71 | 72 | // windows can be only LE 73 | #define __BYTE_ORDER __LITTLE_ENDIAN // this define is required in HFSCatalogBTree.cpp 74 | 75 | #define be16toh(x) htobe16(x) 76 | #define be32toh(x) htobe32(x) 77 | #define be64toh(x) htobe64(x) 78 | 79 | #define le16toh(x) x 80 | #define le32toh(x) x 81 | #define le64toh(x) x 82 | 83 | #else 84 | #include 85 | #endif 86 | #include "hfsplus.h" // for RecordType 87 | 88 | 89 | template T be(T value); 90 | 91 | template <> inline RecordType be(RecordType value) 92 | { 93 | return RecordType(be16toh(uint16_t(value))); 94 | } 95 | 96 | template <> inline uint16_t be(uint16_t value) 97 | { 98 | return be16toh(value); 99 | } 100 | 101 | template <> inline uint32_t be(uint32_t value) 102 | { 103 | return be32toh(value); 104 | } 105 | 106 | template <> inline uint64_t be(uint64_t value) 107 | { 108 | return be64toh(value); 109 | } 110 | 111 | template <> inline int16_t be(int16_t value) 112 | { 113 | return be16toh(value); 114 | } 115 | 116 | template <> inline int32_t be(int32_t value) 117 | { 118 | return be32toh(value); 119 | } 120 | 121 | template <> inline int64_t be(int64_t value) 122 | { 123 | return be64toh(value); 124 | } 125 | 126 | template T le(T value); 127 | 128 | template <> inline uint16_t le(uint16_t value) 129 | { 130 | return le16toh(value); 131 | } 132 | 133 | template <> inline uint32_t le(uint32_t value) 134 | { 135 | return le32toh(value); 136 | } 137 | 138 | template <> inline uint64_t le(uint64_t value) 139 | { 140 | return le64toh(value); 141 | } 142 | 143 | template <> inline int16_t le(int16_t value) 144 | { 145 | return le16toh(value); 146 | } 147 | 148 | template <> inline int32_t le(int32_t value) 149 | { 150 | return le32toh(value); 151 | } 152 | 153 | template <> inline int64_t le(int64_t value) 154 | { 155 | return le64toh(value); 156 | } 157 | 158 | #endif 159 | -------------------------------------------------------------------------------- /src/adc.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "adc.h" 4 | 5 | enum 6 | { 7 | ADC_PLAIN = 1, 8 | ADC_2BYTE, 9 | ADC_3BYTE 10 | }; 11 | 12 | 13 | /* Compression phrases 14 | * store phrase - 1 byte header + data, first byte 0x80-0xFF, max length 0x80 (7 bits + 1), no offset 15 | * short phrase - 2 byte header + data, first byte 0x00-0x3F, max length 0x12 (4 bits + 3), max offset 0x3FF (10 bits) 16 | * long phrase - 3 byte header + data, first byte 0x40-0x7F, max length 0x43 (6 bits + 4), max offset 0xFFFF (16 bits) 17 | */ 18 | 19 | int adc_decompress(int in_size, uint8_t* input, int avail_size, uint8_t* output, int restartIndex, int* bytes_written) 20 | { 21 | if (in_size == 0) 22 | return 0; 23 | 24 | bool output_full = false; 25 | bool input_short = false; 26 | unsigned char *inp = input; 27 | unsigned char *outp = output + restartIndex; 28 | int chunk_size; 29 | int offset; 30 | 31 | while (inp - input < in_size) 32 | { 33 | int chunk_type = adc_chunk_type(*inp); 34 | switch (chunk_type) 35 | { 36 | case ADC_PLAIN: 37 | chunk_size = adc_chunk_size(*inp); 38 | if ( inp - input > in_size - (chunk_size+1) ) { 39 | input_short = true; 40 | break; 41 | } 42 | if (outp + chunk_size - output > avail_size) 43 | { 44 | output_full = true; 45 | break; 46 | } 47 | memcpy(outp, inp + 1, chunk_size); 48 | inp += chunk_size + 1; 49 | outp += chunk_size; 50 | break; 51 | 52 | case ADC_2BYTE: 53 | if ( inp - input > in_size - 2 ) { 54 | input_short = true; 55 | break; 56 | } 57 | chunk_size = adc_chunk_size(*inp); 58 | offset = adc_chunk_offset(inp); 59 | if (outp + chunk_size - output > avail_size) 60 | { 61 | output_full = true; 62 | break; 63 | } 64 | if (offset == 0) 65 | { 66 | memset(outp, *(outp - offset - 1), chunk_size); 67 | outp += chunk_size; 68 | inp += 2; 69 | } 70 | else 71 | { 72 | for (int i = 0; i < chunk_size; i++) 73 | { 74 | memcpy(outp, outp - offset - 1, 1); 75 | outp++; 76 | } 77 | inp += 2; 78 | } 79 | break; 80 | 81 | case ADC_3BYTE: 82 | if ( inp - input > in_size - 3 ) { 83 | input_short = true; 84 | break; 85 | } 86 | chunk_size = adc_chunk_size(*inp); 87 | offset = adc_chunk_offset(inp); 88 | if (outp + chunk_size - output > avail_size) 89 | { 90 | output_full = true; 91 | break; 92 | } 93 | if (offset == 0) 94 | { 95 | memset(outp, *(outp - offset - 1), chunk_size); 96 | outp += chunk_size; 97 | inp += 3; 98 | } 99 | else 100 | { 101 | for (int i = 0; i < chunk_size; i++) 102 | { 103 | memcpy(outp, outp - offset - 1, 1); 104 | outp++; 105 | } 106 | inp += 3; 107 | } 108 | break; 109 | } 110 | if (output_full || input_short) 111 | break; 112 | } 113 | *bytes_written = outp - output; 114 | return inp - input; 115 | } 116 | 117 | int adc_chunk_type(char _byte) 118 | { 119 | if (_byte & 0x80) 120 | return ADC_PLAIN; 121 | else if (_byte & 0x40) 122 | return ADC_3BYTE; 123 | else 124 | return ADC_2BYTE; 125 | } 126 | 127 | int adc_chunk_size(char _byte) 128 | { 129 | switch (adc_chunk_type(_byte)) 130 | { 131 | case ADC_PLAIN: 132 | return (_byte & 0x7F) + 1; 133 | case ADC_2BYTE: 134 | return ((_byte & 0x3F) >> 2) + 3; 135 | case ADC_3BYTE: 136 | return (_byte & 0x3F) + 4; 137 | } 138 | return -1; 139 | } 140 | 141 | int adc_chunk_offset(unsigned char *chunk_start) 142 | { 143 | switch (adc_chunk_type(*chunk_start)) 144 | { 145 | case ADC_PLAIN: 146 | return 0; 147 | case ADC_2BYTE: 148 | return ((((unsigned char)*chunk_start & 0x03)) << 8) + (unsigned char)*(chunk_start + 1); 149 | case ADC_3BYTE: 150 | return (((unsigned char)*(chunk_start + 1)) << 8) + (unsigned char)*(chunk_start + 2); 151 | } 152 | return -1; 153 | } 154 | -------------------------------------------------------------------------------- /src/dmg.h: -------------------------------------------------------------------------------- 1 | #ifndef DMG_H 2 | #define DMG_H 3 | 4 | #pragma pack(1) 5 | #define UDIF_SIGNATURE 0x6B6F6C79 6 | 7 | enum 8 | { 9 | kUDIFFlagsFlattened = 1 10 | }; 11 | 12 | enum 13 | { 14 | kUDIFDeviceImageType = 1, 15 | kUDIFPartitionImageType = 2 16 | }; 17 | 18 | struct UDIFChecksum 19 | { 20 | uint32_t type; 21 | uint32_t size; 22 | uint32_t data[0x20]; 23 | }; 24 | 25 | struct UDIFID 26 | { 27 | uint32_t data1; /* smallest */ 28 | uint32_t data2; 29 | uint32_t data3; 30 | uint32_t data4; /* largest */ 31 | }; 32 | 33 | struct UDIFResourceFile 34 | { 35 | uint32_t fUDIFSignature; 36 | uint32_t fUDIFVersion; 37 | uint32_t fUDIFHeaderSize; 38 | uint32_t fUDIFFlags; 39 | 40 | uint64_t fUDIFRunningDataForkOffset; 41 | uint64_t fUDIFDataForkOffset; 42 | uint64_t fUDIFDataForkLength; 43 | uint64_t fUDIFRsrcForkOffset; 44 | uint64_t fUDIFRsrcForkLength; 45 | 46 | uint32_t fUDIFSegmentNumber; 47 | uint32_t fUDIFSegmentCount; 48 | UDIFID fUDIFSegmentID; /* a 128-bit number like a GUID, but does not seem to be a OSF GUID, since it doesn't have the proper versioning byte */ 49 | 50 | UDIFChecksum fUDIFDataForkChecksum; 51 | 52 | uint64_t fUDIFXMLOffset; 53 | uint64_t fUDIFXMLLength; 54 | 55 | uint8_t reserved1[0x78]; /* this is actually the perfect amount of space to store every thing in this struct until the checksum */ 56 | 57 | UDIFChecksum fUDIFMasterChecksum; 58 | 59 | uint32_t fUDIFImageVariant; 60 | uint64_t fUDIFSectorCount; 61 | 62 | uint32_t reserved2; 63 | uint32_t reserved3; 64 | uint32_t reserved4; 65 | 66 | }; 67 | 68 | struct BLKXRun 69 | { 70 | uint32_t type; 71 | uint32_t reserved; 72 | uint64_t sectorStart; 73 | uint64_t sectorCount; 74 | uint64_t compOffset; 75 | uint64_t compLength; 76 | }; 77 | 78 | enum class RunType : uint32_t 79 | { 80 | ZeroFill = 0, 81 | Raw = 1, 82 | Unknown = 2, 83 | ADC = 0x80000004, 84 | Zlib = 0x80000005, 85 | Bzip2 = 0x80000006, 86 | LZFSE = 0x80000007, 87 | Comment = 0x7ffffffe, 88 | Terminator = 0xffffffff 89 | }; 90 | 91 | struct SizeResource 92 | { 93 | uint16_t version; /* set to 5 */ 94 | uint32_t isHFS; /* first dword of v53(ImageInfoRec): Set to 1 if it's a HFS or HFS+ partition -- duh. */ 95 | uint32_t unknown1; /* second dword of v53: seems to be garbage if it's HFS+, stuff related to HFS embedded if it's that*/ 96 | uint8_t dataLen; /* length of data that proceeds, comes right before the data in ImageInfoRec. Always set to 0 for HFS, HFS+ */ 97 | uint8_t data[255]; /* other data from v53, dataLen + 1 bytes, the rest NULL filled... a string? Not set for HFS, HFS+ */ 98 | uint32_t unknown2; /* 8 bytes before volumeModified in v53, seems to be always set to 0 for HFS, HFS+ */ 99 | uint32_t unknown3; /* 4 bytes before volumeModified in v53, seems to be always set to 0 for HFS, HFS+ */ 100 | uint32_t volumeModified; /* offset 272 in v53 */ 101 | uint32_t unknown4; /* always seems to be 0 for UDIF */ 102 | uint16_t volumeSignature; /* HX in our case */ 103 | uint16_t sizePresent; /* always set to 1 */ 104 | }; 105 | 106 | struct CSumResource 107 | { 108 | uint16_t version; /* set to 1 */ 109 | uint32_t type; /* set to 0x2 for MKBlockChecksum */ 110 | uint32_t checksum; 111 | }; 112 | 113 | #define DDM_DESCRIPTOR 0xFFFFFFFF 114 | #define ENTIRE_DEVICE_DESCRIPTOR 0xFFFFFFFE 115 | 116 | struct BLKXTable 117 | { 118 | uint32_t fUDIFBlocksSignature; 119 | uint32_t infoVersion; 120 | uint64_t firstSectorNumber; 121 | uint64_t sectorCount; 122 | 123 | uint64_t dataStart; 124 | uint32_t decompressBufferRequested; 125 | uint32_t blocksDescriptor; 126 | 127 | uint32_t reserved1; 128 | uint32_t reserved2; 129 | uint32_t reserved3; 130 | uint32_t reserved4; 131 | uint32_t reserved5; 132 | uint32_t reserved6; 133 | 134 | UDIFChecksum checksum; 135 | 136 | uint32_t blocksRunCount; 137 | BLKXRun runs[0]; 138 | }; 139 | 140 | #pragma pack() 141 | 142 | #endif 143 | 144 | -------------------------------------------------------------------------------- /src/HFSFork.cpp: -------------------------------------------------------------------------------- 1 | #include "HFSFork.h" 2 | #include "be.h" 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "exceptions.h" 8 | #include "HFSExtentsOverflowBTree.h" 9 | 10 | HFSFork::HFSFork(HFSVolume* vol, const HFSPlusForkData& fork, HFSCatalogNodeID cnid, bool resourceFork) 11 | : m_volume(vol), m_fork(fork), m_cnid(cnid), m_resourceFork(resourceFork) 12 | { 13 | for (int i = 0; i < 8; i++) 14 | { 15 | auto& elem = m_fork.extents[i]; 16 | if (elem.blockCount > 0) 17 | m_extents.push_back(HFSPlusExtentDescriptor{ be(elem.startBlock), be(elem.blockCount) }); 18 | } 19 | } 20 | 21 | uint64_t HFSFork::length() 22 | { 23 | return be(m_fork.logicalSize); 24 | } 25 | 26 | void HFSFork::loadFromOverflowsFile(uint32_t blocksSoFar) 27 | { 28 | const size_t oldCount = m_extents.size(); 29 | 30 | if (!m_cnid) 31 | throw std::logic_error("Cannot search extents file, CNID is kHFSNullID"); 32 | 33 | // if (oldCount > 8) 34 | // throw io_error("Loaded extent count > 8, but appropriate extent not found"); 35 | if (oldCount < 8) 36 | throw io_error("Loaded extent count < 8, but appropriate extent not found"); 37 | 38 | m_volume->m_overflowExtents->findExtentsForFile(m_cnid, m_resourceFork, blocksSoFar, m_extents); 39 | if (m_extents.size() == oldCount) 40 | throw io_error("Overflow extents not found for given CNID"); 41 | } 42 | 43 | int32_t HFSFork::read(void* buf, int32_t count, uint64_t offset) 44 | { 45 | const auto blockSize = be(m_volume->m_header.blockSize); 46 | const uint32_t firstBlock = offset / blockSize; 47 | uint32_t blocksSoFar; 48 | int firstExtent, extent; 49 | uint32_t read = 0; 50 | uint64_t offsetInExtent; 51 | 52 | if (offset > be(m_fork.logicalSize)) 53 | count = 0; 54 | else if (offset+count > be(m_fork.logicalSize)) 55 | count = be(m_fork.logicalSize) - offset; 56 | 57 | if (!count) 58 | return 0; 59 | 60 | firstExtent = -1; 61 | blocksSoFar = 0; 62 | offsetInExtent = offset; 63 | int i = 0; 64 | do 65 | { 66 | // locate the first extent 67 | for ( ; i < m_extents.size(); i++) 68 | { 69 | if (m_extents[i].blockCount + blocksSoFar > firstBlock) 70 | { 71 | firstExtent = i; 72 | break; 73 | } 74 | 75 | blocksSoFar += m_extents[i].blockCount; 76 | offsetInExtent -= m_extents[i].blockCount * uint64_t(blockSize); 77 | } 78 | 79 | //std::cout << "First extent: " << firstExtent << std::endl; 80 | //std::cout << "Block: " << firstBlockInFirstExtent << std::endl; 81 | 82 | if (firstExtent == -1) 83 | loadFromOverflowsFile(blocksSoFar); 84 | 85 | } while(firstExtent == -1); 86 | 87 | // start reading blocks 88 | extent = firstExtent; 89 | while (read < count && read+offset < length()) 90 | { 91 | int32_t thistime; 92 | int32_t reallyRead; 93 | uint64_t volumeOffset; 94 | 95 | if (extent >= m_extents.size()) 96 | loadFromOverflowsFile(blocksSoFar); 97 | 98 | thistime = std::min(m_extents[extent].blockCount * uint64_t(blockSize) - offsetInExtent, count-read); 99 | 100 | if (thistime == 0) 101 | throw std::logic_error("Internal error: thistime == 0"); 102 | 103 | //std::cout << "Remaining to read: " << count-read << std::endl; 104 | //std::cout << "Extent " << extent << " has " << m_extents[extent].blockCount << " blocks\n"; 105 | //std::cout << "This extent holds " << m_extents[extent].blockCount * uint64_t(blockSize) << " bytes\n"; 106 | //std::cout << "Reading " << thistime << " from block: " << startBlock << ", block size: " << blockSize << std::endl; 107 | volumeOffset = m_extents[extent].startBlock * uint64_t(blockSize) + offsetInExtent; 108 | 109 | reallyRead = m_volume->m_reader->read((char*)buf + read, thistime, volumeOffset); 110 | assert(reallyRead <= thistime); 111 | 112 | read += reallyRead; 113 | 114 | if (reallyRead != thistime) 115 | { 116 | //std::cerr << "Short read: " << thistime << " expected, " << reallyRead << " received\n"; 117 | break; 118 | } 119 | 120 | blocksSoFar += m_extents[extent].blockCount; 121 | //std::cout << "Blocks so far: " << blocksSoFar << std::endl; 122 | extent++; 123 | offsetInExtent = 0; 124 | } 125 | 126 | assert(read <= count); 127 | 128 | return read; 129 | } 130 | -------------------------------------------------------------------------------- /src/HFSAttributeBTree.cpp: -------------------------------------------------------------------------------- 1 | #include "HFSAttributeBTree.h" 2 | #include 3 | #include 4 | #include 5 | #include "unichar.h" 6 | using icu::UnicodeString; 7 | HFSAttributeBTree::HFSAttributeBTree(std::shared_ptr fork, CacheZone* zone) 8 | : HFSBTree(fork, zone, "Attribute") 9 | { 10 | } 11 | 12 | std::map> HFSAttributeBTree::getattr(HFSCatalogNodeID cnid) 13 | { 14 | HFSPlusAttributeKey key; 15 | std::vector> leaves; 16 | std::map> rv; 17 | 18 | memset(&key, 0, sizeof(key)); 19 | key.fileID = htobe32(cnid); 20 | 21 | leaves = findLeafNodes((Key*) &key, cnidComparator); 22 | 23 | for (std::shared_ptr leafPtr : leaves) 24 | { 25 | HFSBTreeNode& leaf = *leafPtr; 26 | for (int i = 0; i < leaf.recordCount(); i++) 27 | { 28 | HFSPlusAttributeKey* recordKey = leaf.getRecordKey(i); 29 | HFSPlusAttributeDataInline* data; 30 | std::vector vecData; 31 | std::string name; 32 | 33 | if (be(recordKey->fileID) != cnid) 34 | continue; 35 | 36 | data = leaf.getRecordData(i); 37 | 38 | // process data 39 | if (be(data->recordType) != kHFSPlusAttrInlineData) 40 | continue; 41 | 42 | vecData = std::vector(data->attrData, &data->attrData[be(data->attrSize)]); 43 | name = UnicharToString(be(recordKey->attrNameLength), recordKey->attrName); 44 | 45 | rv[name] = vecData; 46 | } 47 | } 48 | 49 | return rv; 50 | } 51 | 52 | bool HFSAttributeBTree::getattr(HFSCatalogNodeID cnid, const std::string& attrName, std::vector& dataOut) 53 | { 54 | HFSPlusAttributeKey key; 55 | std::shared_ptr leafNodePtr; 56 | UnicodeString ucAttrName = UnicodeString::fromUTF8(attrName); 57 | 58 | memset(&key, 0, sizeof(key)); 59 | key.fileID = htobe32(cnid); 60 | 61 | key.attrNameLength = StringToUnichar(attrName, key.attrName, sizeof(key.attrName)); 62 | key.attrNameLength = htobe16(key.attrNameLength); 63 | 64 | leafNodePtr = findLeafNode((Key*) &key, cnidAttrComparator); 65 | if (!leafNodePtr) 66 | return false; 67 | 68 | HFSBTreeNode& leafNode = *leafNodePtr; // convenience 69 | for (int i = 0; i < leafNode.recordCount(); i++) 70 | { 71 | HFSPlusAttributeKey* recordKey = leafNode.getRecordKey(i); 72 | HFSPlusAttributeDataInline* data; 73 | 74 | UnicodeString recAttrName((char*)recordKey->attrName, be(recordKey->attrNameLength)*2, "UTF-16BE"); 75 | 76 | if (be(recordKey->fileID) == cnid && recAttrName == ucAttrName) 77 | { 78 | data = leafNode.getRecordData(i); 79 | 80 | // process data 81 | if (be(data->recordType) != kHFSPlusAttrInlineData) 82 | continue; 83 | 84 | dataOut = std::vector(data->attrData, &data->attrData[be(data->attrSize)]); 85 | return true; 86 | } 87 | } 88 | 89 | return false; 90 | } 91 | 92 | int HFSAttributeBTree::cnidAttrComparator(const Key* indexKey, const Key* desiredKey) 93 | { 94 | const HFSPlusAttributeKey* indexAttributeKey = reinterpret_cast(indexKey); 95 | const HFSPlusAttributeKey* desiredAttributeKey = reinterpret_cast(desiredKey); 96 | 97 | //std::cout << "Attr search: index cnid: " << be(indexAttributeKey->fileID) << " desired cnid: " << be(desiredAttributeKey->fileID) << std::endl; 98 | 99 | if (be(indexAttributeKey->fileID) > be(desiredAttributeKey->fileID)) 100 | return 1; 101 | else if (be(indexAttributeKey->fileID) < be(desiredAttributeKey->fileID)) 102 | return -1; 103 | else 104 | { 105 | UnicodeString desiredName, indexName; 106 | 107 | desiredName = UnicodeString((char*)desiredAttributeKey->attrName, be(desiredAttributeKey->attrNameLength)*2, "UTF-16BE"); 108 | indexName = UnicodeString((char*)indexAttributeKey->attrName, be(indexAttributeKey->attrNameLength)*2, "UTF-16BE"); 109 | 110 | return indexName.compare(desiredName); 111 | } 112 | } 113 | 114 | int HFSAttributeBTree::cnidComparator(const Key* indexKey, const Key* desiredKey) 115 | { 116 | const HFSPlusAttributeKey* indexAttributeKey = reinterpret_cast(indexKey); 117 | const HFSPlusAttributeKey* desiredAttributeKey = reinterpret_cast(desiredKey); 118 | 119 | if (be(indexAttributeKey->fileID) > be(desiredAttributeKey->fileID)) 120 | return 1; 121 | else if (be(indexAttributeKey->fileID) < be(desiredAttributeKey->fileID)) 122 | return -1; 123 | else 124 | return 0; 125 | } 126 | -------------------------------------------------------------------------------- /src/DMGPartition.cpp: -------------------------------------------------------------------------------- 1 | #include "DMGPartition.h" 2 | #include "be.h" 3 | #include 4 | #include 5 | #include "DMGDecompressor.h" 6 | #include 7 | #include 8 | //#include 9 | #include 10 | #include "SubReader.h" 11 | #include "exceptions.h" 12 | 13 | static const int SECTOR_SIZE = 512; 14 | 15 | DMGPartition::DMGPartition(std::shared_ptr disk, BLKXTable* table) 16 | : m_disk(disk), m_table(table) 17 | { 18 | for (uint32_t i = 0; i < be(m_table->blocksRunCount); i++) 19 | { 20 | RunType type = RunType(be(m_table->runs[i].type)); 21 | if (type == RunType::Comment || type == RunType::Terminator) 22 | continue; 23 | 24 | m_sectors[be(m_table->runs[i].sectorStart)] = i; 25 | 26 | #ifdef DEBUG 27 | std::cout << "Sector " << i << " has type 0x" << std::hex << uint32_t(type) << std::dec << ", starts at byte " 28 | << be(m_table->runs[i].sectorStart)*512l << ", compressed length: " 29 | << be(m_table->runs[i].compLength) << ", compressed offset: " << be(m_table->runs[i].compOffset) + be(m_table->dataStart) << std::endl; 30 | #endif 31 | } 32 | } 33 | 34 | DMGPartition::~DMGPartition() 35 | { 36 | delete m_table; 37 | } 38 | 39 | void DMGPartition::adviseOptimalBlock(uint64_t offset, uint64_t& blockStart, uint64_t& blockEnd) 40 | { 41 | std::map::iterator itRun = m_sectors.upper_bound(offset / SECTOR_SIZE); 42 | 43 | if (itRun == m_sectors.begin()) 44 | throw io_error("Invalid run sector data"); 45 | 46 | if (itRun == m_sectors.end()) 47 | blockEnd = length(); 48 | else 49 | blockEnd = itRun->first * SECTOR_SIZE; 50 | 51 | itRun--; 52 | 53 | blockStart = itRun->first * SECTOR_SIZE; 54 | 55 | // Issue #22: empty areas may be larger than 2**31 (causing bugs in callers). 56 | // Moreover, there is no such thing as "optimal block" in zero-filled areas. 57 | RunType runType = RunType(be(m_table->runs[itRun->second].type)); 58 | if (runType == RunType::ZeroFill || runType == RunType::Unknown || runType == RunType::Raw) 59 | Reader::adviseOptimalBlock(offset, blockStart, blockEnd); 60 | } 61 | 62 | int32_t DMGPartition::read(void* buf, int32_t count, uint64_t offset) 63 | { 64 | int32_t done = 0; 65 | 66 | while (done < count) 67 | { 68 | std::map::iterator itRun = m_sectors.upper_bound((offset + done) / SECTOR_SIZE); 69 | uint64_t offsetInSector = 0; 70 | int32_t thistime; 71 | 72 | if (offset+done >= length()) 73 | break; // read beyond EOF 74 | 75 | if (itRun == m_sectors.begin()) 76 | throw io_error("Invalid run sector data"); 77 | 78 | itRun--; // move to the sector we want to read 79 | 80 | //std::cout << "Reading from offset " << offset << " " << count << " bytes\n"; 81 | //std::cout << "Run sector " << itRun->first << " run index=" << itRun->second << std::endl; 82 | 83 | if (!done) 84 | offsetInSector = offset - itRun->first*SECTOR_SIZE; 85 | 86 | thistime = readRun(((char*)buf) + done, itRun->second, offsetInSector, count-done); 87 | if (!thistime) 88 | throw io_error("Unexpected EOF from readRun"); 89 | 90 | done += thistime; 91 | } 92 | 93 | return done; 94 | } 95 | 96 | int32_t DMGPartition::readRun(void* buf, int32_t runIndex, uint64_t offsetInSector, int32_t count) 97 | { 98 | BLKXRun* run = &m_table->runs[runIndex]; 99 | RunType runType = RunType(be(run->type)); 100 | 101 | count = std::min(count, uint64_t(be(run->sectorCount))*512 - offsetInSector); 102 | 103 | #ifdef DEBUG 104 | std::cout << "readRun(): runIndex = " << runIndex << ", offsetInSector = " << offsetInSector << ", count = " << count << std::endl; 105 | #endif 106 | 107 | switch (runType) 108 | { 109 | case RunType::Unknown: // My guess is that this value indicates a hole in the file (sparse file) 110 | case RunType::ZeroFill: 111 | //std::cout << "ZeroFill\n"; 112 | memset(buf, 0, count); 113 | return count; 114 | case RunType::Raw: 115 | //std::cout << "Raw\n"; 116 | return m_disk->read(buf, count, be(run->compOffset) + be(m_table->dataStart) + offsetInSector); 117 | case RunType::LZFSE: 118 | #ifndef COMPILE_WITH_LZFSE 119 | throw function_not_implemented_error("LZFSE is not yet supported"); 120 | #endif 121 | case RunType::Zlib: 122 | case RunType::Bzip2: 123 | case RunType::ADC: 124 | { 125 | std::unique_ptr decompressor; 126 | std::shared_ptr subReader; 127 | 128 | subReader.reset(new SubReader(m_disk, be(run->compOffset) + be(m_table->dataStart), be(run->compLength))); 129 | decompressor.reset(DMGDecompressor::create(runType, subReader)); 130 | 131 | if (!decompressor) 132 | throw std::logic_error("DMGDecompressor::create() returned nullptr!"); 133 | 134 | unsigned long long int compLength = be(run->sectorCount)*512; 135 | if ( offsetInSector > compLength ) 136 | return 0; 137 | if ( offsetInSector + count > compLength ) 138 | count = compLength - offsetInSector; 139 | 140 | int32_t dec = decompressor->decompress((uint8_t*)buf, count, offsetInSector); 141 | if (dec < count) 142 | throw io_error("Error decompressing stream"); 143 | return count; 144 | } 145 | default: 146 | return 0; 147 | } 148 | } 149 | 150 | uint64_t DMGPartition::length() 151 | { 152 | return be(m_table->sectorCount) * SECTOR_SIZE; 153 | } 154 | -------------------------------------------------------------------------------- /src/CachedReader.cpp: -------------------------------------------------------------------------------- 1 | #include "CachedReader.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "exceptions.h" 7 | 8 | //#define NO_CACHE 9 | 10 | CachedReader::CachedReader(std::shared_ptr reader, CacheZone* zone, const std::string& tag) 11 | : m_reader(reader), m_zone(zone), m_tag(tag) 12 | { 13 | } 14 | 15 | int32_t CachedReader::read(void* buf, int32_t count, uint64_t offset) 16 | { 17 | #ifndef NO_CACHE 18 | int32_t done = 0; // from 0 till count 19 | int32_t lastFetchPos = 0; // pos from 0 till count 20 | 21 | #ifdef DEBUG 22 | std::cout << "CachedReader::read(): offset=" << offset << ", count=" << count << std::endl; 23 | #endif 24 | 25 | if (count+offset > length()) 26 | count = length() - offset; 27 | 28 | while (done < count) 29 | { 30 | int32_t thistime = std::min(count - done, CacheZone::BLOCK_SIZE); 31 | uint64_t blockNumber = (offset+done) / CacheZone::BLOCK_SIZE; 32 | uint64_t blockOffset = 0; 33 | size_t fromCache; 34 | 35 | if (done == 0) // this may also happen when cache doesn't contain a full block, but not on a R/O filesystem 36 | blockOffset = offset % CacheZone::BLOCK_SIZE; 37 | 38 | thistime = std::min(thistime, CacheZone::BLOCK_SIZE - blockOffset); 39 | if (thistime == 0) 40 | throw std::logic_error("Internal error: thistime == 0"); 41 | 42 | fromCache = m_zone->get(m_tag, blockNumber, ((uint8_t*) buf) + done, blockOffset, thistime); 43 | 44 | // Something was retrieved from cache 45 | if (fromCache > 0) 46 | { 47 | // Fetch all previous data from lastFetchPos till (offset+done) from backing store 48 | const int32_t toRead = done - lastFetchPos; 49 | const uint64_t pos = offset + lastFetchPos; 50 | 51 | if (toRead > 0) 52 | { 53 | // Perform non-cached read, while saving everything read into the cache 54 | nonCachedRead(((char*) buf) + lastFetchPos, toRead, pos); 55 | } 56 | 57 | // We move lastFetchPos past the current cached read 58 | lastFetchPos = done+thistime; 59 | 60 | done += fromCache; 61 | } 62 | else 63 | { 64 | // We pretend that the data was read, we'll read it later via nonCachedRead() 65 | //lastFetchPos += thistime; 66 | done += thistime; 67 | } 68 | } 69 | 70 | if (lastFetchPos < count) 71 | { 72 | // Uncached blocks at the end of the requested range 73 | const int32_t toRead = done - lastFetchPos; 74 | const uint64_t pos = offset + lastFetchPos; 75 | 76 | nonCachedRead(((char*) buf) + lastFetchPos, toRead, pos); 77 | } 78 | 79 | return done; 80 | #else 81 | return m_reader->read(buf, count, offset); 82 | #endif 83 | } 84 | 85 | void CachedReader::nonCachedRead(void* buf, int32_t count, uint64_t offset) 86 | { 87 | uint64_t blockStart, blockEnd; 88 | std::unique_ptr optimalBlockBuffer; 89 | uint32_t optimalBlockBufferSize = 0; 90 | uint64_t readPos = offset; 91 | 92 | #ifdef DEBUG 93 | std::cout << "CachedReader::nonCachedRead(): offset=" << offset << ", count=" << count << std::endl; 94 | #endif 95 | 96 | while (readPos < offset+count) 97 | { 98 | int32_t thistime, rd; 99 | 100 | m_reader->adviseOptimalBlock(readPos, blockStart, blockEnd); 101 | 102 | // Does the returned block contain what we asked for? 103 | if (blockStart > readPos || blockEnd <= readPos) 104 | throw std::logic_error("Illegal range returned by adviseOptimalBlock()"); 105 | if (blockEnd - blockStart > std::numeric_limits::max()) 106 | throw std::logic_error("Range returned by adviseOptimalBlock() is too large"); 107 | 108 | thistime = blockEnd-blockStart; 109 | if (thistime > optimalBlockBufferSize) 110 | { 111 | optimalBlockBufferSize = thistime; 112 | optimalBlockBuffer.reset(new uint8_t[optimalBlockBufferSize]); 113 | } 114 | 115 | #ifdef DEBUG 116 | std::cout << "Reading from backing reader: offset=" << blockStart << ", count=" << thistime << std::endl; 117 | #endif 118 | rd = m_reader->read(optimalBlockBuffer.get(), thistime, blockStart); 119 | 120 | if (rd < thistime) 121 | throw io_error("Short read from backing reader"); 122 | 123 | // Align to the next BLOCK_SIZE aligned block 124 | uint64_t cachePos = (blockStart + (CacheZone::BLOCK_SIZE-1)) & ~uint64_t(CacheZone::BLOCK_SIZE-1); 125 | 126 | // And start storing everything we've just read into cache 127 | while (cachePos < blockEnd) 128 | { 129 | m_zone->store(m_tag, cachePos / CacheZone::BLOCK_SIZE, &optimalBlockBuffer[cachePos - blockStart], 130 | std::min(blockEnd-cachePos, CacheZone::BLOCK_SIZE)); 131 | cachePos += CacheZone::BLOCK_SIZE; 132 | } 133 | 134 | // Copy into output buffer 135 | uint32_t optimalOffset = 0; // offset into optimalBlockBuffer to start copying from 136 | uint32_t outputOffset; // offset into 'buf' to copy to 137 | uint32_t toCopy; 138 | 139 | if (readPos > blockStart) 140 | optimalOffset = readPos - blockStart; 141 | outputOffset = readPos - offset; 142 | toCopy = std::min(offset+count - readPos, thistime - optimalOffset); 143 | 144 | #ifdef DEBUG 145 | std::cout << "Copying " << toCopy << " bytes into output buffer at offset " << outputOffset << " from internal offset " << optimalOffset << std::endl; 146 | #endif 147 | // if (toCopy+optimalOffset > thistime) 148 | // throw std::logic_error("Internal error"); 149 | std::copy_n(&optimalBlockBuffer[optimalOffset], toCopy, reinterpret_cast(buf) + outputOffset); 150 | 151 | readPos += toCopy; 152 | } 153 | } 154 | 155 | uint64_t CachedReader::length() 156 | { 157 | return m_reader->length(); 158 | } 159 | -------------------------------------------------------------------------------- /src/HFSBTreeNode.h: -------------------------------------------------------------------------------- 1 | #ifndef HFSBTREENODE_H 2 | #define HFSBTREENODE_H 3 | #include "hfsplus.h" 4 | #include "be.h" 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "Reader.h" 10 | 11 | class HFSBTreeNode 12 | { 13 | public: 14 | HFSBTreeNode() 15 | { 16 | initConveniencePointerFromBuffer(); 17 | #ifdef DEBUG 18 | m_nodeIndex = 0; 19 | #endif 20 | } 21 | 22 | HFSBTreeNode(std::shared_ptr treeReader, uint32_t nodeIndex, uint16_t nodeSize) 23 | { 24 | #ifdef DEBUG 25 | m_nodeIndex = nodeIndex; 26 | #endif 27 | m_descriptorData.resize(nodeSize); 28 | 29 | int32_t read = treeReader->read(&m_descriptorData[0], nodeSize, nodeSize*nodeIndex); 30 | if (read < nodeSize) 31 | throw std::runtime_error("Short read of BTree node. "+std::to_string(read)+" bytes read instead of "+std::to_string(nodeSize)); 32 | 33 | initConveniencePointerFromBuffer(); 34 | } 35 | 36 | HFSBTreeNode(const HFSBTreeNode& that) 37 | { 38 | *this = that; // calling assgnment op below 39 | } 40 | 41 | HFSBTreeNode& operator=(const HFSBTreeNode& that) 42 | { 43 | #ifdef DEBUG 44 | m_nodeIndex = that.m_nodeIndex; 45 | #endif 46 | m_descriptorData = that.m_descriptorData; 47 | initConveniencePointerFromBuffer(); 48 | return *this; 49 | } 50 | 51 | NodeKind kind() const 52 | { 53 | return m_descriptor->kind; 54 | } 55 | 56 | uint16_t recordCount() const 57 | { 58 | return be(m_descriptor->numRecords); 59 | } 60 | 61 | BTNodeDescriptor* descriptor() const 62 | { 63 | return m_descriptor; 64 | } 65 | 66 | uint16_t nodeSize() const 67 | { 68 | return m_descriptorData.size(); 69 | } 70 | 71 | bool isInvalid() const 72 | { 73 | return !m_descriptor; 74 | } 75 | 76 | uint32_t forwardLink() const 77 | { 78 | return be(m_descriptor->fLink); 79 | } 80 | 81 | template KeyType* getKey() const 82 | { 83 | return reinterpret_cast(descPtr() + sizeof(BTNodeDescriptor)); 84 | } 85 | 86 | template KeyType* getRecordKey(uint16_t recordIndex) const 87 | { 88 | uint16_t recordOffset = be(*(m_firstRecordOffset - recordIndex)); 89 | 90 | return reinterpret_cast(descPtr() + recordOffset); 91 | } 92 | 93 | template DataType* getRecordData(uint16_t recordIndex) const 94 | { 95 | uint16_t* keyLength = getRecordKey(recordIndex); 96 | char* keyPtr = reinterpret_cast(keyLength); 97 | 98 | return reinterpret_cast(keyPtr + be(*keyLength) + sizeof(uint16_t)); 99 | } 100 | 101 | template class RecordIterator : public std::iterator 102 | { 103 | public: 104 | typedef typename std::iterator::difference_type difference_type; 105 | 106 | RecordIterator() : m_node(nullptr), m_index(0) 107 | { 108 | } 109 | 110 | RecordIterator(const RecordIterator& that) : m_node(that.m_node), m_index(that.m_index) 111 | { 112 | } 113 | 114 | RecordIterator(const HFSBTreeNode* node, int index) : m_node(node), m_index(index) 115 | { 116 | } 117 | 118 | RecordIterator& operator=(const RecordIterator& that) 119 | { 120 | m_node = that.m_node; 121 | m_index = that.m_index; 122 | return *this; 123 | } 124 | 125 | KeyType* operator*() 126 | { 127 | return m_node->getRecordKey(m_index); 128 | } 129 | RecordIterator& operator++() 130 | { 131 | m_index++; 132 | return *this; 133 | } 134 | RecordIterator& operator--() 135 | { 136 | m_index--; 137 | return *this; 138 | } 139 | 140 | difference_type operator-(const RecordIterator& that) 141 | { 142 | return m_index - that.m_index; 143 | } 144 | 145 | RecordIterator& operator+=(const difference_type& n) 146 | { 147 | m_index += n; 148 | return *this; 149 | } 150 | 151 | RecordIterator& operator-=(const difference_type& n) 152 | { 153 | m_index -= n; 154 | return *this; 155 | } 156 | 157 | bool operator!=(const RecordIterator& that) 158 | { 159 | return m_index != that.m_index; 160 | } 161 | bool operator==(const RecordIterator& that) 162 | { 163 | return m_index == that.m_index; 164 | } 165 | int index() const 166 | { 167 | return m_index; 168 | } 169 | private: 170 | const HFSBTreeNode* m_node; 171 | int m_index; 172 | }; 173 | 174 | template RecordIterator begin() const 175 | { 176 | return RecordIterator(this, 0); 177 | } 178 | 179 | template RecordIterator end() const 180 | { 181 | return RecordIterator(this, recordCount()); 182 | } 183 | 184 | private: 185 | char* descPtr() const 186 | { 187 | return reinterpret_cast(m_descriptor); 188 | } 189 | // Initialised convenience pointer from m_descriptorData 190 | void initConveniencePointerFromBuffer() 191 | { 192 | if (m_descriptorData.size()) // required check! 193 | { 194 | m_descriptor = reinterpret_cast(&m_descriptorData[0]); 195 | m_firstRecordOffset = reinterpret_cast(descPtr() + m_descriptorData.size() - sizeof(uint16_t)); 196 | }else{ 197 | m_descriptor = nullptr; 198 | m_firstRecordOffset = nullptr; 199 | } 200 | } 201 | private: 202 | std::vector m_descriptorData; 203 | // convenience initialised by initConveniencePointerFromBuffer() 204 | mutable BTNodeDescriptor* m_descriptor; 205 | uint16_t* m_firstRecordOffset; 206 | #ifdef DEBUG 207 | uint32_t m_nodeIndex; 208 | #endif 209 | }; 210 | 211 | #endif 212 | -------------------------------------------------------------------------------- /src/HFSZlibReader.cpp: -------------------------------------------------------------------------------- 1 | #include "HFSZlibReader.h" 2 | #include 3 | #include 4 | #include 5 | #include "exceptions.h" 6 | #include "be.h" 7 | 8 | // HFS+ compresses data in 64KB blocks 9 | static const unsigned int RUN_LENGTH = 64*1024; 10 | 11 | HFSZlibReader::HFSZlibReader(std::shared_ptr parent, uint64_t uncompressedSize, bool singleRun) 12 | : m_reader(parent), m_uncompressedSize(uncompressedSize) 13 | { 14 | // read the compression table (little endian) 15 | // uint32_t numEntries 16 | // uint32_t offsets[(num_entries+1)*2] (offset, length) 17 | // 18 | // Each offset points to the start of a 64 KB block 19 | 20 | if (!singleRun) 21 | { 22 | uint32_t numEntries; 23 | std::unique_ptr entries; 24 | 25 | if (m_reader->read(&numEntries, sizeof(numEntries), 0) != sizeof(numEntries)) 26 | throw io_error("Short read of compression map"); 27 | 28 | numEntries = le(numEntries); 29 | entries.reset(new uint32_t[(numEntries+1) * 2]); 30 | 31 | if (m_reader->read(entries.get(), sizeof(uint32_t) * 2 * (numEntries+1), sizeof(numEntries)) != sizeof(uint32_t) * 2 * (numEntries+1)) 32 | throw io_error("Short read of compression map entries"); 33 | 34 | for (size_t i = 0; i < numEntries+1; i++) 35 | m_offsets.push_back(std::make_pair(le(entries[i*2]), le(entries[i*2+1]))); 36 | } 37 | else 38 | { 39 | // This branch is only used for zlib data stored within extended attributes. 40 | // In this case, the reader here is a MemoryReader with a small amount of data, 41 | // thus it is OK to cast the length to uint32_t. 42 | m_offsets.push_back(std::pair(0, m_reader->length())); 43 | } 44 | 45 | zlibInit(); 46 | } 47 | 48 | HFSZlibReader::~HFSZlibReader() 49 | { 50 | zlibExit(); 51 | } 52 | 53 | void HFSZlibReader::adviseOptimalBlock(uint64_t offset, uint64_t& blockStart, uint64_t& blockEnd) 54 | { 55 | blockStart = offset & ~uint64_t(RUN_LENGTH-1); 56 | blockEnd = std::min(blockStart + RUN_LENGTH, length()); 57 | } 58 | 59 | void HFSZlibReader::zlibInit() 60 | { 61 | memset(&m_strm, 0, sizeof(m_strm)); 62 | if (inflateInit(&m_strm) != Z_OK) 63 | throw std::bad_alloc(); 64 | } 65 | 66 | void HFSZlibReader::zlibExit() 67 | { 68 | inflateEnd(&m_strm); 69 | m_inputPos = 0; 70 | m_lastEnd = 0; 71 | } 72 | 73 | int32_t HFSZlibReader::readRun(int runIndex, void* buf, int32_t count, uint64_t offset) 74 | { 75 | // We're in a different run or we're skipping back in the run, so we can't resume anything 76 | if (runIndex != m_lastRun || offset < m_lastEnd) 77 | { 78 | zlibExit(); 79 | zlibInit(); 80 | m_lastEnd = 0; 81 | m_inputPos = 0; 82 | m_lastUncompressed = false; 83 | } 84 | 85 | // We're skipping forward in the current run. Waste decompress the data in between. 86 | if (m_lastEnd < offset) 87 | { 88 | char waste[512]; 89 | 90 | while (m_lastEnd < offset) 91 | { 92 | int thistime = std::min(sizeof(waste), offset - m_lastEnd); 93 | readRun(runIndex, waste, thistime, m_lastEnd); 94 | } 95 | } 96 | 97 | // Decompress 98 | char inputBuffer[512]; 99 | int32_t done = 0; 100 | int32_t readCompressedSoFar = 0; 101 | 102 | while (done < count) 103 | { 104 | int32_t read = 0; 105 | int status, thistime; 106 | 107 | thistime = count - done; 108 | 109 | int thisTimeCompressed = std::min(m_offsets[runIndex].second-readCompressedSoFar, sizeof(inputBuffer)); 110 | 111 | if (!m_lastUncompressed) 112 | read = m_reader->read(inputBuffer, thisTimeCompressed, m_inputPos + m_offsets[runIndex].first); 113 | readCompressedSoFar += read; 114 | 115 | // Special handling for uncompressed blocks 116 | if (m_lastUncompressed || (done == 0 && read > 0 && m_inputPos==0 && (inputBuffer[0] & 0xf) == 0xf)) 117 | { 118 | if (!m_lastUncompressed) 119 | m_inputPos++; 120 | 121 | count = std::min(count, m_offsets[runIndex].second - offset - 1); 122 | read = m_reader->read(buf, count, m_inputPos + m_offsets[runIndex].first); 123 | m_inputPos += read; 124 | 125 | m_lastEnd += read; 126 | m_lastUncompressed = true; 127 | return read; 128 | } 129 | 130 | m_strm.next_in = (Bytef*) inputBuffer; 131 | m_strm.next_out = ((Bytef*) buf) + done; 132 | m_strm.avail_in = read; 133 | m_strm.avail_out = thistime; 134 | 135 | status = inflate(&m_strm, Z_SYNC_FLUSH); 136 | 137 | if (status < 0) 138 | throw io_error("Inflate error"); 139 | 140 | done += thistime - m_strm.avail_out; 141 | m_inputPos += read - m_strm.avail_in; 142 | 143 | if (status == Z_STREAM_END) 144 | break; 145 | } 146 | 147 | m_lastEnd += done; 148 | m_lastRun = runIndex; 149 | 150 | return done; 151 | } 152 | 153 | 154 | int32_t HFSZlibReader::read(void* buf, int32_t count, uint64_t offset) 155 | { 156 | int32_t done = 0; 157 | 158 | if (offset+count > m_uncompressedSize) 159 | count = m_uncompressedSize - offset; 160 | 161 | while (done < count) 162 | { 163 | uint64_t runOffset = 0; 164 | uint32_t thisTime, read; 165 | const int runIndex = (offset+done) / RUN_LENGTH; 166 | 167 | // runOffset only relevant in first run 168 | if (done == 0) 169 | runOffset = offset % RUN_LENGTH; 170 | 171 | thisTime = std::min(RUN_LENGTH, count-done); 172 | read = readRun(runIndex, static_cast(buf) + done, thisTime, runOffset); 173 | 174 | if (read != thisTime) 175 | throw io_error("Short read from readRun"); 176 | 177 | done += read; 178 | } 179 | 180 | return done; 181 | } 182 | 183 | uint64_t HFSZlibReader::length() 184 | { 185 | return m_uncompressedSize; 186 | } 187 | -------------------------------------------------------------------------------- /src/hfsplus.h: -------------------------------------------------------------------------------- 1 | #ifndef HFSPLUS_H 2 | #define HFSPLUS_H 3 | #include 4 | 5 | #pragma pack(1) 6 | 7 | typedef uint16_t unichar; 8 | typedef uint32_t HFSCatalogNodeID; 9 | 10 | #define HFSP_SIGNATURE 0x482b 11 | #define HFSX_SIGNATURE 0x4858 12 | 13 | #define HFSPLUS_S_IFMT 0170000 /* type of file mask */ 14 | #define HFSPLUS_S_IFIFO 0010000 /* named pipe (fifo) */ 15 | #define HFSPLUS_S_IFCHR 0020000 /* character special */ 16 | #define HFSPLUS_S_IFDIR 0040000 /* directory */ 17 | #define HFSPLUS_S_IFBLK 0060000 /* block special */ 18 | #define HFSPLUS_S_IFREG 0100000 /* regular */ 19 | #define HFSPLUS_S_IFLNK 0120000 /* symbolic link */ 20 | #define HFSPLUS_S_IFSOCK 0140000 /* socket */ 21 | #define HFSPLUS_S_IFWHT 0160000 /* whiteout */ 22 | 23 | #define HFS_PERM_OFLAG_COMPRESSED 0x20 24 | 25 | struct HFSString 26 | { 27 | uint16_t length; 28 | unichar string[255]; 29 | }; 30 | 31 | struct HFSPlusBSDInfo 32 | { 33 | uint32_t ownerID; 34 | uint32_t groupID; 35 | uint8_t adminFlags; 36 | uint8_t ownerFlags; 37 | uint16_t fileMode; 38 | union 39 | { 40 | uint32_t iNodeNum; 41 | uint32_t linkCount; 42 | uint32_t rawDevice; 43 | } special; 44 | }; 45 | 46 | struct HFSPlusExtentDescriptor 47 | { 48 | uint32_t startBlock; 49 | uint32_t blockCount; 50 | }; 51 | 52 | struct HFSPlusForkData 53 | { 54 | uint64_t logicalSize; 55 | uint32_t clumpSize; 56 | uint32_t totalBlocks; 57 | HFSPlusExtentDescriptor extents[8]; 58 | }; 59 | 60 | struct HFSPlusVolumeHeader 61 | { 62 | uint16_t signature; 63 | uint16_t version; 64 | uint32_t attributes; 65 | uint32_t lastMountedVersion; 66 | uint32_t journalInfoBlock; 67 | 68 | uint32_t createDate; 69 | uint32_t modifyDate; 70 | uint32_t backupDate; 71 | uint32_t checkedDate; 72 | 73 | uint32_t fileCount; 74 | uint32_t folderCount; 75 | 76 | uint32_t blockSize; 77 | uint32_t totalBlocks; 78 | uint32_t freeBlocks; 79 | 80 | uint32_t nextAllocation; 81 | uint32_t rsrcClumpSize; 82 | uint32_t dataClumpSize; 83 | uint32_t nextCatalogID; 84 | 85 | uint32_t writeCount; 86 | uint64_t encodingsBitmap; 87 | 88 | uint32_t finderInfo[8]; 89 | 90 | HFSPlusForkData allocationFile; 91 | HFSPlusForkData extentsFile; 92 | HFSPlusForkData catalogFile; 93 | HFSPlusForkData attributesFile; 94 | HFSPlusForkData startupFile; 95 | }; 96 | 97 | enum class NodeKind : int8_t 98 | { 99 | kBTLeafNode = -1, 100 | kBTIndexNode = 0, 101 | kBTHeaderNode = 1, 102 | kBTMapNode = 2 103 | }; 104 | 105 | struct BTNodeDescriptor 106 | { 107 | uint32_t fLink; 108 | uint32_t bLink; 109 | NodeKind kind; 110 | uint8_t height; 111 | uint16_t numRecords; 112 | uint16_t reserved; 113 | }; 114 | 115 | enum class KeyCompareType : uint8_t 116 | { 117 | kHFSCaseFolding = 0xCF, 118 | kHFSBinaryCompare = 0xBC 119 | }; 120 | 121 | struct BTHeaderRec 122 | { 123 | uint16_t treeDepth; 124 | uint32_t rootNode; 125 | uint32_t leafRecords; 126 | uint32_t firstLeafNode; 127 | uint32_t lastLeafNode; 128 | uint16_t nodeSize; 129 | uint16_t maxKeyLength; 130 | uint32_t totalNodes; 131 | uint32_t freeNodes; 132 | uint16_t reserved1; 133 | uint32_t clumpSize; // misaligned 134 | uint8_t btreeType; 135 | KeyCompareType keyCompareType; 136 | uint32_t attributes; // long aligned again 137 | uint32_t reserved3[16]; 138 | }; 139 | 140 | enum 141 | { 142 | kHFSNullID = 0, 143 | kHFSRootParentID = 1, 144 | kHFSRootFolderID = 2, 145 | kHFSExtentsFileID = 3, 146 | kHFSCatalogFileID = 4, 147 | kHFSBadBlockFileID = 5, 148 | kHFSAllocationFileID = 6, 149 | kHFSStartupFileID = 7, 150 | kHFSAttributesFileID = 8, 151 | kHFSRepairCatalogFileID = 14, 152 | kHFSBogusExtentFileID = 15, 153 | kHFSFirstUserCatalogNodeID = 16 154 | }; 155 | 156 | struct HFSPlusCatalogKey 157 | { 158 | uint16_t keyLength; 159 | HFSCatalogNodeID parentID; 160 | HFSString nodeName; 161 | }; 162 | 163 | enum class RecordType : uint16_t 164 | { 165 | kHFSPlusFolderRecord = 0x0001, 166 | kHFSPlusFileRecord = 0x0002, 167 | kHFSPlusFolderThreadRecord = 0x0003, 168 | kHFSPlusFileThreadRecord = 0x0004 169 | }; 170 | 171 | struct Point 172 | { 173 | int16_t v, h; 174 | }; 175 | 176 | struct Rect 177 | { 178 | int16_t top, left, bottom, right; 179 | }; 180 | 181 | struct FileInfo 182 | { 183 | uint32_t fileType; 184 | uint32_t fileCreator; 185 | uint16_t finderFlags; 186 | Point location; 187 | uint16_t reservedField; 188 | }; 189 | 190 | //struct ExtendedFileInfo 191 | //{ 192 | // int16_t reserved1[4]; 193 | // uint16_t extendedFinderFlags; 194 | // int16_t reserved2; 195 | // int32_t putAwayFolderID; 196 | //}; 197 | // looking in Apple Source, this is the modern definition of ExtendedFileInfo 198 | struct ExtendedFileInfo 199 | { 200 | uint32_t document_id; 201 | uint32_t date_added; 202 | uint16_t extended_flags; 203 | uint16_t reserved2; 204 | uint32_t write_gen_counter; 205 | } __attribute__((aligned(2), packed)); 206 | 207 | struct FolderInfo 208 | { 209 | Rect windowBounds; 210 | uint16_t finderFlags; 211 | Point location; 212 | uint16_t reservedField; 213 | }; 214 | 215 | //struct ExtendedFolderInfo 216 | //{ 217 | // Point scrollPosition; 218 | // int32_t reserved1; 219 | // uint16_t extendedFinderFlags; 220 | // int16_t reserved2; 221 | // int32_t putAwayFolderID; 222 | //}; 223 | // looking in Apple Source, this is the modern definition of ExtendedFolderInfo 224 | struct ExtendedFolderInfo 225 | { 226 | uint32_t document_id; 227 | uint32_t date_added; 228 | uint16_t extended_flags; 229 | uint16_t reserved3; 230 | uint32_t write_gen_counter; 231 | } __attribute__((aligned(2), packed)); 232 | 233 | struct HFSPlusCatalogFolder 234 | { 235 | RecordType recordType; 236 | uint16_t flags; 237 | uint32_t valence; 238 | HFSCatalogNodeID folderID; 239 | uint32_t createDate; 240 | uint32_t contentModDate; 241 | uint32_t attributeModDate; 242 | uint32_t accessDate; 243 | uint32_t backupDate; 244 | HFSPlusBSDInfo permissions; 245 | FolderInfo userInfo; 246 | ExtendedFolderInfo finderInfo; 247 | uint32_t textEncoding; 248 | uint32_t reserved; 249 | }; 250 | 251 | struct HFSPlusCatalogFile 252 | { 253 | RecordType recordType; 254 | uint16_t flags; 255 | uint32_t reserved1; 256 | HFSCatalogNodeID fileID; 257 | uint32_t createDate; 258 | uint32_t contentModDate; 259 | uint32_t attributeModDate; 260 | uint32_t accessDate; 261 | uint32_t backupDate; 262 | HFSPlusBSDInfo permissions; 263 | FileInfo userInfo; 264 | ExtendedFileInfo finderInfo; 265 | uint32_t textEncoding; 266 | uint32_t reserved2; 267 | 268 | HFSPlusForkData dataFork; 269 | HFSPlusForkData resourceFork; 270 | }; 271 | 272 | struct HFSPlusCatalogFileOrFolder 273 | { 274 | union 275 | { 276 | HFSPlusCatalogFile file; 277 | HFSPlusCatalogFolder folder; 278 | }; 279 | }; 280 | 281 | struct HFSPlusCatalogThread 282 | { 283 | RecordType recordType; 284 | int16_t reserved; 285 | HFSCatalogNodeID parentID; 286 | HFSString nodeName; 287 | }; 288 | 289 | struct HFSPlusExtentKey 290 | { 291 | uint16_t keyLength; 292 | uint8_t forkType; 293 | uint8_t pad; 294 | HFSCatalogNodeID fileID; 295 | uint32_t startBlock; 296 | }; 297 | 298 | struct HFSPlusAttributeKey 299 | { 300 | uint16_t keyLength; 301 | uint16_t padding; 302 | HFSCatalogNodeID fileID; 303 | uint32_t startBlock; // first allocation block number for extents 304 | uint16_t attrNameLength; 305 | uint16_t attrName[127]; 306 | }; 307 | 308 | enum 309 | { 310 | kHFSPlusAttrInlineData = 0x10, 311 | kHFSPlusAttrForkData = 0x20, 312 | kHFSPlusAttrExtents = 0x30 313 | }; 314 | 315 | struct HFSPlusAttributeDataInline 316 | { 317 | uint32_t recordType; // kHFSPlusAttrInlineData 318 | uint64_t reserved; 319 | uint32_t attrSize; 320 | uint8_t attrData[]; 321 | }; 322 | 323 | // File type and creator for symlink 324 | enum { 325 | kSymLinkFileType = 0x736C6E6B, /* 'slnk' */ 326 | kSymLinkCreator = 0x72686170 /* 'rhap' */ 327 | }; 328 | 329 | #pragma pack() 330 | 331 | // File type and creator for hard link 332 | enum { 333 | kHardLinkFileType = 0x686C6E6B, /* 'hlnk' */ 334 | kHFSPlusCreator = 0x6866732B /* 'hfs+' */ 335 | }; 336 | 337 | #endif 338 | 339 | -------------------------------------------------------------------------------- /src/DMGDisk.cpp: -------------------------------------------------------------------------------- 1 | #include "DMGDisk.h" 2 | #include 3 | #include "be.h" 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "DMGPartition.h" 11 | #include "AppleDisk.h" 12 | #include "GPTDisk.h" 13 | #include "CachedReader.h" 14 | #include "SubReader.h" 15 | #include "exceptions.h" 16 | 17 | DMGDisk::DMGDisk(std::shared_ptr reader) 18 | : m_reader(reader), m_zone(40000) 19 | { 20 | uint64_t offset = m_reader->length(); 21 | 22 | if (offset < 512) 23 | throw io_error("File to small to be a DMG"); 24 | 25 | offset -= 512; 26 | 27 | if (m_reader->read(&m_udif, sizeof(m_udif), offset) != sizeof(m_udif)) 28 | throw io_error("Cannot read the KOLY block"); 29 | 30 | if (be(m_udif.fUDIFSignature) != UDIF_SIGNATURE) 31 | throw io_error("Invalid KOLY block signature"); 32 | 33 | loadKoly(m_udif); 34 | } 35 | 36 | DMGDisk::~DMGDisk() 37 | { 38 | xmlFreeDoc(m_kolyXML); 39 | } 40 | 41 | bool DMGDisk::isDMG(std::shared_ptr reader) 42 | { 43 | uint64_t offset = reader->length() - 512; 44 | decltype(UDIFResourceFile::fUDIFSignature) sig = 0; 45 | 46 | reader->read(&sig, sizeof(sig), offset); 47 | return be(sig) == UDIF_SIGNATURE; 48 | } 49 | 50 | void DMGDisk::loadKoly(const UDIFResourceFile& koly) 51 | { 52 | std::unique_ptr xmlData; 53 | xmlXPathContextPtr xpathContext; 54 | xmlXPathObjectPtr xpathObj; 55 | uint64_t offset, length; 56 | bool simpleWayOK = false; 57 | 58 | offset = be(koly.fUDIFXMLOffset); 59 | length = be(koly.fUDIFXMLLength); 60 | 61 | xmlData.reset(new char[length]); 62 | m_reader->read(xmlData.get(), length, offset); 63 | 64 | m_kolyXML = xmlParseMemory(xmlData.get(), length); 65 | 66 | //#if 0 // Asian copies of OS X put crap UTF characters into XML data making type/name parsing unreliable 67 | xpathContext = xmlXPathNewContext(m_kolyXML); 68 | 69 | // select all partition dictionaries with partition ID >= 0 70 | xpathObj = xmlXPathEvalExpression((const xmlChar*) "/plist/dict/key[text()='resource-fork']/following-sibling::dict[1]/key[text()='blkx']" 71 | "/following-sibling::array[1]/dict[key[text()='ID']/following-sibling::string[1][text() >= 0]]", xpathContext); 72 | 73 | if (xpathObj && xpathObj->nodesetval) 74 | simpleWayOK = loadPartitionElements(xpathContext, xpathObj->nodesetval); 75 | 76 | xmlXPathFreeObject(xpathObj); 77 | xmlXPathFreeContext(xpathContext); 78 | //#else 79 | 80 | if (!simpleWayOK) 81 | { 82 | std::shared_ptr rm1, r1; 83 | PartitionedDisk* pdisk; 84 | 85 | rm1 = readerForKolyBlock(-1); 86 | 87 | if (rm1) 88 | { 89 | if (AppleDisk::isAppleDisk(rm1)) 90 | { 91 | r1 = readerForKolyBlock(0); // TODO: this is not always partition 0 92 | pdisk = new AppleDisk(rm1, r1); 93 | } 94 | else if (GPTDisk::isGPTDisk(rm1)) 95 | { 96 | r1 = readerForKolyBlock(1); 97 | pdisk = new GPTDisk(rm1, r1); 98 | } 99 | else 100 | throw function_not_implemented_error("Unknown partition table type"); 101 | 102 | m_partitions = pdisk->partitions(); 103 | 104 | delete pdisk; 105 | } 106 | } 107 | //#endif 108 | } 109 | 110 | bool DMGDisk::loadPartitionElements(xmlXPathContextPtr xpathContext, xmlNodeSetPtr nodes) 111 | { 112 | for (int i = 0; i < nodes->nodeNr; i++) 113 | { 114 | xmlXPathObjectPtr xpathObj; 115 | Partition part; 116 | BLKXTable* table; 117 | 118 | if (nodes->nodeTab[i]->type != XML_ELEMENT_NODE) 119 | continue; 120 | 121 | xpathContext->node = nodes->nodeTab[i]; 122 | 123 | xpathObj = xmlXPathEvalExpression((const xmlChar*) "string(key[text()='CFName']/following-sibling::string)", xpathContext); 124 | 125 | if (!xpathObj || !xpathObj->stringval) 126 | xpathObj = xmlXPathEvalExpression((const xmlChar*) "string(key[text()='Name']/following-sibling::string)", xpathContext); 127 | 128 | if (!xpathObj || !xpathObj->stringval) 129 | throw io_error("Invalid XML data, partition Name key not found"); 130 | 131 | table = loadBLKXTableForPartition(i); 132 | 133 | if (table) 134 | { 135 | part.offset = be(table->firstSectorNumber) * 512; 136 | part.size = be(table->sectorCount) * 512; 137 | } 138 | 139 | if (!parseNameAndType((const char*) xpathObj->stringval, part.name, part.type) && m_partitions.empty()) 140 | return false; 141 | m_partitions.push_back(part); 142 | 143 | xmlXPathFreeObject(xpathObj); 144 | //delete table; 145 | } 146 | 147 | return true; 148 | } 149 | 150 | bool DMGDisk::parseNameAndType(const std::string& nameAndType, std::string& name, std::string& type) 151 | { 152 | // Format: "Apple (Apple_partition_map : 1)" 153 | size_t paren = nameAndType.find('('); 154 | size_t colon, space; 155 | 156 | if (paren == std::string::npos) 157 | return false; 158 | 159 | name = nameAndType.substr(0, paren-1); 160 | colon = nameAndType.find(':', paren); 161 | 162 | if (colon == std::string::npos) 163 | return false; 164 | 165 | type = nameAndType.substr(paren+1, (colon - paren) - 1); 166 | space = type.rfind(' '); 167 | 168 | if (space != std::string::npos && space == type.length()-1) 169 | type.resize(type.length() - 1); // remove space at the end 170 | 171 | return true; 172 | } 173 | 174 | BLKXTable* DMGDisk::loadBLKXTableForPartition(int index) 175 | { 176 | xmlXPathContextPtr xpathContext; 177 | xmlXPathObjectPtr xpathObj; 178 | char expr[300]; 179 | BLKXTable* rv = nullptr; 180 | 181 | sprintf(expr, "string(/plist/dict/key[text()='resource-fork']/following-sibling::dict[1]/key[text()='blkx']" 182 | "/following-sibling::array[1]/dict[key[text()='ID']/following-sibling::string[text() = %d]]/key[text()='Data']/following-sibling::data)", index); 183 | 184 | xpathContext = xmlXPathNewContext(m_kolyXML); 185 | xpathObj = xmlXPathEvalExpression((const xmlChar*) expr, xpathContext); 186 | 187 | if (xpathObj && xpathObj->stringval && *xpathObj->stringval) 188 | { 189 | // load data from base64 190 | std::vector data; 191 | 192 | base64Decode((char*)xpathObj->stringval, data); 193 | rv = static_cast(operator new(data.size())); 194 | 195 | memcpy(rv, &data[0], data.size()); 196 | } 197 | 198 | xmlXPathFreeObject(xpathObj); 199 | xmlXPathFreeContext(xpathContext); 200 | 201 | return rv; 202 | } 203 | 204 | bool DMGDisk::base64Decode(const std::string& input, std::vector& output) 205 | { 206 | BIO *b64, *bmem; 207 | std::unique_ptr buffer(new char[input.length()]); 208 | int rd; 209 | 210 | auto b64_input = input.substr(0, input.find_last_not_of("\r\t\f\v")); 211 | 212 | b64 = BIO_new(BIO_f_base64()); 213 | bmem = BIO_new_mem_buf((void*) b64_input.c_str(), b64_input.length()); 214 | bmem = BIO_push(b64, bmem); 215 | //BIO_set_flags(bmem, BIO_FLAGS_BASE64_NO_NL); 216 | 217 | rd = BIO_read(bmem, buffer.get(), b64_input.length()); 218 | 219 | if (rd > 0) 220 | output.assign(buffer.get(), buffer.get()+rd); 221 | 222 | BIO_free_all(bmem); 223 | return rd >= 0; 224 | } 225 | 226 | std::shared_ptr DMGDisk::readerForPartition(int index) 227 | { 228 | for (int i = -1;; i++) 229 | { 230 | BLKXTable* table = loadBLKXTableForPartition(i); 231 | 232 | if (!table) 233 | continue; 234 | 235 | if (be(table->firstSectorNumber)*512 == m_partitions[index].offset) 236 | { 237 | std::stringstream partName; 238 | uint64_t l = m_reader->length(); 239 | uint32_t data_offset = be(m_udif.fUDIFDataForkOffset); 240 | 241 | partName << "part-" << index; 242 | 243 | if (data_offset) { 244 | std::shared_ptr r(new SubReader(m_reader, 245 | data_offset, 246 | m_reader->length() - data_offset)); 247 | 248 | return std::shared_ptr( 249 | new CachedReader(std::shared_ptr(new DMGPartition(r, table)), &m_zone, partName.str()) 250 | ); 251 | } else { 252 | return std::shared_ptr( 253 | new CachedReader(std::shared_ptr(new DMGPartition(m_reader, table)), &m_zone, partName.str()) 254 | ); 255 | } 256 | } 257 | 258 | delete table; 259 | } 260 | 261 | return nullptr; 262 | } 263 | 264 | std::shared_ptr DMGDisk::readerForKolyBlock(int index) 265 | { 266 | BLKXTable* table = loadBLKXTableForPartition(index); 267 | if (!table) 268 | return nullptr; 269 | return std::shared_ptr(new DMGPartition(m_reader, table)); 270 | } 271 | -------------------------------------------------------------------------------- /src/HFSBTree.cpp: -------------------------------------------------------------------------------- 1 | #include "HFSBTree.h" 2 | #include 3 | #include "be.h" 4 | #include "hfsplus.h" 5 | #include "unichar.h" 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "HFSBTreeNode.h" 11 | #include "CacheZone.h" 12 | #include "exceptions.h" 13 | 14 | HFSBTree::HFSBTree(std::shared_ptr fork, CacheZone* zone, const char* cacheTag) 15 | : m_fork(fork) 16 | { 17 | BTNodeDescriptor desc0; 18 | 19 | //std::cout << "Tree size: " << fork->length() << std::endl; 20 | 21 | m_reader.reset(new CachedReader(m_fork, zone, cacheTag)); 22 | 23 | if (m_reader->read(&desc0, sizeof(desc0), 0) != sizeof(desc0)) 24 | throw io_error("Failed to read BTNodeDescriptor zero"); 25 | 26 | if (desc0.kind != NodeKind::kBTHeaderNode) 27 | throw io_error("Wrong kind of BTree header"); 28 | 29 | if (m_reader->read(&m_header, sizeof(m_header), sizeof(desc0)) != sizeof(m_header)) 30 | throw io_error("Failed to read BTHeaderRec"); 31 | 32 | //std::cout << "leaf records: " << be(m_header.leafRecords) << std::endl; 33 | //std::cout << "node size: " << be(m_header.nodeSize) << std::endl; 34 | //std::cout << "first leaf node: " << be(m_header.firstLeafNode) << std::endl; 35 | //std::cout << "last leaf node: " << be(m_header.lastLeafNode) << std::endl; 36 | 37 | /*if (m_header.rootNode) 38 | { 39 | walkTree(be(m_header.rootNode)); 40 | }*/ 41 | } 42 | 43 | std::shared_ptr HFSBTree::findLeafNode(const Key* indexKey, KeyComparator comp, bool wildcard) 44 | { 45 | return traverseTree(be(m_header.rootNode), indexKey, comp, wildcard); 46 | } 47 | 48 | std::vector> HFSBTree::findLeafNodes(const Key* indexKey, KeyComparator comp) 49 | { 50 | std::vector> rv; 51 | std::set uniqLink; // for broken filesystems 52 | std::shared_ptr currentPtr = findLeafNode(indexKey, comp, true); 53 | 54 | if (!currentPtr) 55 | return rv; 56 | 57 | rv.push_back(currentPtr); 58 | 59 | while (currentPtr->forwardLink() != 0) 60 | { 61 | Key* key; 62 | 63 | if (uniqLink.find(currentPtr->forwardLink()) != uniqLink.end()) 64 | { 65 | std::cerr << "WARNING: forward link loop detected!\n"; 66 | break; 67 | } 68 | else 69 | uniqLink.insert(currentPtr->forwardLink()); 70 | 71 | //std::cout << "Testing node " << current.forwardLink() << std::endl; 72 | currentPtr = std::make_shared(m_reader, currentPtr->forwardLink(), currentPtr->nodeSize()); 73 | 74 | key = currentPtr->getKey(); // TODO: or the key of the first record? 75 | 76 | if (comp(key, indexKey) > 0) 77 | break; 78 | 79 | rv.push_back(currentPtr); 80 | } 81 | 82 | return rv; 83 | } 84 | 85 | std::shared_ptr HFSBTree::traverseTree(int nodeIndex, const Key* indexKey, KeyComparator comp, bool wildcard) 86 | { 87 | //std::cout << "Examining node " << nodeIndex << std::endl; 88 | std::shared_ptr nodePtr = std::make_shared(m_reader, nodeIndex, be(m_header.nodeSize)); 89 | HFSBTreeNode& node = *nodePtr; 90 | 91 | switch (node.kind()) 92 | { 93 | case NodeKind::kBTIndexNode: 94 | { 95 | int position; 96 | uint32_t* childIndex; 97 | 98 | if (wildcard) 99 | { 100 | auto it = std::lower_bound(node.begin(), node.end(), indexKey, [=](const Key* keyA, const Key* keyB) { // can be equal to keyB 101 | return comp(keyA, keyB) < 0; 102 | }); 103 | 104 | position = it.index() - 1; 105 | } 106 | else 107 | { 108 | auto it = std::upper_bound(node.begin(), node.end(), indexKey, [=](const Key* keyA, const Key* keyB) { // is > keyB 109 | return comp(keyA, keyB) < 0; 110 | }); 111 | 112 | position = it.index() - 1; 113 | } 114 | if (position < 0) 115 | position = 0; 116 | 117 | // recurse down 118 | childIndex = node.getRecordData(position); 119 | 120 | return traverseTree(be(*childIndex), indexKey, comp, wildcard); 121 | } 122 | case NodeKind::kBTLeafNode: 123 | { 124 | return nodePtr; 125 | } 126 | case NodeKind::kBTHeaderNode: 127 | case NodeKind::kBTMapNode: 128 | break; 129 | default: 130 | std::cerr << "Invalid node kind! Kind: " << int(node.kind()) << std::endl; 131 | 132 | } 133 | return nullptr; 134 | } 135 | 136 | /* 137 | void HFSBTree::walkTree(int nodeIndex) 138 | { 139 | BTNodeDescriptor* desc; 140 | uint32_t offset = be(m_header.nodeSize)*nodeIndex; 141 | uint16_t* firstRecordOffset; 142 | 143 | desc = reinterpret_cast(m_tree + be(m_header.nodeSize)*nodeIndex); 144 | firstRecordOffset = reinterpret_cast(m_tree + be(m_header.nodeSize)*(nodeIndex+1) - sizeof(uint16_t)); 145 | 146 | switch (desc->kind) 147 | { 148 | case NodeKind::kBTLeafNode: 149 | { 150 | HFSPlusCatalogKey* key = reinterpret_cast(((char*) desc) + sizeof(BTNodeDescriptor)); 151 | std::cout << "LeafNode " << nodeIndex << " is a leaf node: " << UnicharToString(key->nodeName) << std::endl; 152 | std::cout << "LeafSibling: " << be(desc->fLink) << std::endl; 153 | std::cout << "LeafRecords: " << be(desc->numRecords) << std::endl; 154 | 155 | for (long i = 0; i < be(desc->numRecords); i++) 156 | { 157 | uint16_t recordOffset = be(*(firstRecordOffset-i)); 158 | HFSPlusCatalogKey* recordKey = reinterpret_cast(((char*) desc) + recordOffset); 159 | HFSPlusCatalogFile* record; 160 | RecordType recType; 161 | 162 | std::cout << "LeafRecordKey: " << UnicharToString(recordKey->nodeName) << " - parent: " << be(recordKey->parentID) << std::endl; 163 | record = reinterpret_cast(((char*) recordKey) + be(recordKey->keyLength) + sizeof(recordKey->keyLength)); 164 | recType = RecordType(be(uint16_t(record->recordType))); 165 | 166 | switch (recType) 167 | { 168 | case RecordType::kHFSPlusFolderRecord: 169 | { 170 | HFSPlusCatalogFolder* file = (HFSPlusCatalogFolder*) record; 171 | std::cout << "\tFolder: ID " << be(file->folderID) << std::endl; 172 | break; 173 | } 174 | case RecordType::kHFSPlusFileRecord: 175 | { 176 | HFSPlusCatalogFile* file = (HFSPlusCatalogFile*) record; 177 | std::cout << "\tFile: ID " << be(file->fileID) << std::endl; 178 | break; 179 | } 180 | case RecordType::kHFSPlusFolderThreadRecord: 181 | { 182 | HFSPlusCatalogThread* thread = (HFSPlusCatalogThread*) record; 183 | std::cout << "\tA folder named " << UnicharToString(thread->nodeName) << " with CNID " << be(recordKey->parentID) << " has parent CNID " << be(thread->parentID) << std::endl; 184 | break; 185 | } 186 | case RecordType::kHFSPlusFileThreadRecord: 187 | { 188 | HFSPlusCatalogThread* thread = (HFSPlusCatalogThread*) record; 189 | std::cout << "\tA file named " << UnicharToString(thread->nodeName) << " with CNID " << be(recordKey->parentID) << " has parent CNID " << be(thread->parentID) << std::endl; 190 | break; 191 | } 192 | default: 193 | { 194 | std::cout << "\tunknown record type: " << be(uint16_t(record->recordType)) << std::endl; 195 | } 196 | } 197 | } 198 | break; 199 | } 200 | case NodeKind::kBTIndexNode: 201 | { 202 | std::cout << "Node " << nodeIndex << " is an index node with " << be(desc->numRecords) << " records\n"; 203 | //std::cout << "Sibling: " << be(desc->fLink) << std::endl; 204 | 205 | for (long i = 0; i < be(desc->numRecords); i++) 206 | { 207 | uint16_t recordOffset = be(*(firstRecordOffset-i)); 208 | HFSPlusCatalogKey* record = reinterpret_cast(((char*) desc) + recordOffset); 209 | uint16_t keyLen = be(record->keyLength); // TODO: kBTVariableIndexKeysMask 210 | uint32_t childNodeIndex; 211 | 212 | std::cout << "Record " << i << ", key len:" << keyLen << std::endl; 213 | std::cout << "Index key " << be(record->parentID) << std::endl; 214 | 215 | childNodeIndex = be(*(uint32_t*) (((char*)record)+2+keyLen) ); 216 | std::cout << "Child node index: " << childNodeIndex << std::endl; 217 | walkTree(childNodeIndex); 218 | } 219 | break; 220 | } 221 | case NodeKind::kBTHeaderNode: 222 | std::cout << "Node " << nodeIndex << " is a header node\n"; 223 | break; 224 | case NodeKind::kBTMapNode: 225 | std::cout << "Node " << nodeIndex << " is a map node\n"; 226 | break; 227 | } 228 | }*/ 229 | 230 | 231 | -------------------------------------------------------------------------------- /src/main-fuse.cpp: -------------------------------------------------------------------------------- 1 | #include "main-fuse.h" 2 | #include 3 | #include 4 | #include 5 | #include "be.h" 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include "HFSVolume.h" 11 | #include "AppleDisk.h" 12 | #include "GPTDisk.h" 13 | #include "DMGDisk.h" 14 | #include "FileReader.h" 15 | #include "CachedReader.h" 16 | #include "exceptions.h" 17 | #include "HFSHighLevelVolume.h" 18 | #ifdef DARLING 19 | # include "stat_xlate.h" 20 | #endif 21 | 22 | std::shared_ptr g_fileReader; 23 | std::unique_ptr g_volume; 24 | std::unique_ptr g_partitions; 25 | 26 | int main(int argc, const char** argv) 27 | { 28 | try 29 | { 30 | struct fuse_operations ops; 31 | struct fuse_args args = FUSE_ARGS_INIT(0, NULL); 32 | 33 | if (argc < 3) 34 | { 35 | showHelp(argv[0]); 36 | return 1; 37 | } 38 | 39 | openDisk(argv[1]); 40 | 41 | memset(&ops, 0, sizeof(ops)); 42 | 43 | ops.getattr = hfs_getattr; 44 | ops.open = hfs_open; 45 | ops.read = hfs_read; 46 | ops.release = hfs_release; 47 | //ops.opendir = hfs_opendir; 48 | ops.readdir = hfs_readdir; 49 | ops.readlink = hfs_readlink; 50 | //ops.releasedir = hfs_releasedir; 51 | ops.getxattr = hfs_getxattr; 52 | ops.listxattr = hfs_listxattr; 53 | 54 | for (int i = 0; i < argc; i++) 55 | { 56 | if (i == 1) 57 | ; 58 | else 59 | fuse_opt_add_arg(&args, argv[i]); 60 | } 61 | fuse_opt_add_arg(&args, "-oro"); 62 | fuse_opt_add_arg(&args, "-s"); 63 | 64 | std::cerr << "Everything looks OK, disk mounted\n"; 65 | 66 | #ifdef BEFORE_MOUNT_EXTRA // Darling only 67 | BEFORE_MOUNT_EXTRA; 68 | #endif 69 | 70 | return fuse_main(args.argc, args.argv, &ops, 0); 71 | } 72 | catch (const std::exception& e) 73 | { 74 | std::cerr << "Error: " << e.what() << std::endl; 75 | std::cerr << std::endl; 76 | 77 | std::cerr << "Possible reasons:\n" 78 | "1) The file is corrupt.\n" 79 | "2) The file is not really a DMG file, although it resembles one.\n" 80 | "3) There is a bug in darling-dmg.\n"; 81 | 82 | return 1; 83 | } 84 | } 85 | 86 | void showHelp(const char* argv0) 87 | { 88 | std::cerr << "Usage: " << argv0 << " [fuse args]\n\n"; 89 | std::cerr << ".DMG files and raw disk images can be mounted.\n"; 90 | std::cerr << argv0 << " automatically selects the first HFS+/HFSX partition.\n"; 91 | } 92 | 93 | 94 | void openDisk(const char* path) 95 | { 96 | int partIndex = -1; 97 | std::shared_ptr volume; 98 | 99 | g_fileReader.reset(new FileReader(path)); 100 | 101 | if (DMGDisk::isDMG(g_fileReader)) 102 | g_partitions.reset(new DMGDisk(g_fileReader)); 103 | else if (GPTDisk::isGPTDisk(g_fileReader)) 104 | g_partitions.reset(new GPTDisk(g_fileReader)); 105 | else if (AppleDisk::isAppleDisk(g_fileReader)) 106 | g_partitions.reset(new AppleDisk(g_fileReader)); 107 | else if (HFSVolume::isHFSPlus(g_fileReader)) 108 | volume.reset(new HFSVolume(g_fileReader)); 109 | else 110 | throw function_not_implemented_error("Unsupported file format"); 111 | 112 | if (g_partitions) 113 | { 114 | const std::vector& parts = g_partitions->partitions(); 115 | 116 | for (size_t i = 0; i < parts.size(); i++) 117 | { 118 | if (parts[i].type == "Apple_HFS" || parts[i].type == "Apple_HFSX") 119 | { 120 | std::cerr << "Using partition #" << i << " of type " << parts[i].type << std::endl; 121 | partIndex = i; 122 | break; 123 | } 124 | else 125 | std::cerr << "Skipping partition of type " << parts[i].type << std::endl; 126 | } 127 | 128 | if (partIndex == -1) 129 | throw function_not_implemented_error("No suitable partition found in file"); 130 | 131 | volume.reset(new HFSVolume(g_partitions->readerForPartition(partIndex))); 132 | } 133 | 134 | g_volume.reset(new HFSHighLevelVolume(volume)); 135 | } 136 | 137 | int handle_exceptions(std::function func) 138 | { 139 | try 140 | { 141 | return func(); 142 | } 143 | catch (const file_not_found_error& e) 144 | { 145 | std::cerr << "File not found: " << e.what() << std::endl; 146 | return -ENOENT; 147 | } 148 | catch (const function_not_implemented_error& e) 149 | { 150 | std::cerr << "Error: " << e.what() << std::endl; 151 | return -ENOSYS; 152 | } 153 | catch (const io_error& e) 154 | { 155 | std::cerr << "I/O error: " << e.what() << std::endl; 156 | return -EIO; 157 | } 158 | catch (const no_data_error& e) 159 | { 160 | std::cerr << "Non-existent data requested" << std::endl; 161 | return -ENODATA; 162 | } 163 | catch (const attribute_not_found_error& e) 164 | { 165 | std::cerr << e.what() << std::endl; 166 | return -ENODATA; 167 | } 168 | catch (const operation_not_permitted_error& e) 169 | { 170 | std::cerr << e.what() << std::endl; 171 | return -EPERM; 172 | } 173 | catch (const std::logic_error& e) 174 | { 175 | std::cerr << "Fatal error: " << e.what() << std::endl; 176 | abort(); 177 | } 178 | catch (const std::exception& e) 179 | { 180 | std::cerr << "Unknown error: " << e.what() << std::endl; 181 | return -EIO; 182 | } 183 | } 184 | 185 | int hfs_getattr(const char* path, struct stat* stat) 186 | { 187 | std::cerr << "hfs_getattr(" << path << ")\n"; 188 | 189 | return handle_exceptions([&]() { 190 | #ifndef DARLING 191 | *stat = g_volume->stat(path); 192 | #else 193 | struct stat st = g_volume->stat(path); 194 | bsd_stat_to_linux_stat(&st, reinterpret_cast(stat)); 195 | #endif 196 | return 0; 197 | }); 198 | } 199 | 200 | int hfs_readlink(const char* path, char* buf, size_t size) 201 | { 202 | std::cerr << "hfs_readlink(" << path << ")\n"; 203 | 204 | return handle_exceptions([&]() { 205 | 206 | std::shared_ptr file; 207 | size_t rd; 208 | 209 | file = g_volume->openFile(path); 210 | rd = file->read(buf, size-1, 0); 211 | 212 | buf[rd] = '\0'; 213 | return 0; 214 | }); 215 | } 216 | 217 | int hfs_open(const char* path, struct fuse_file_info* info) 218 | { 219 | std::cerr << "hfs_open(" << path << ")\n"; 220 | 221 | return handle_exceptions([&]() { 222 | 223 | std::shared_ptr file; 224 | std::shared_ptr* fh; 225 | 226 | file = g_volume->openFile(path); 227 | fh = new std::shared_ptr(file); 228 | 229 | info->fh = uint64_t(fh); 230 | return 0; 231 | }); 232 | } 233 | 234 | int hfs_read(const char* path, char* buf, size_t bytes, off_t offset, struct fuse_file_info* info) 235 | { 236 | return handle_exceptions([&]() { 237 | if (!info->fh) 238 | return -EIO; 239 | 240 | std::shared_ptr& file = *(std::shared_ptr*) info->fh; 241 | return file->read(buf, bytes, offset); 242 | }); 243 | } 244 | 245 | int hfs_release(const char* path, struct fuse_file_info* info) 246 | { 247 | // std::cout << "File cache zone: hit rate: " << g_volume->getFileZone()->hitRate() << ", size: " << g_volume->getFileZone()->size() << " blocks\n"; 248 | 249 | return handle_exceptions([&]() { 250 | 251 | std::shared_ptr* file = (std::shared_ptr*) info->fh; 252 | delete file; 253 | info->fh = 0; 254 | 255 | return 0; 256 | }); 257 | } 258 | 259 | int hfs_readdir(const char* path, void* buf, fuse_fill_dir_t filler, off_t offset, struct fuse_file_info* info) 260 | { 261 | std::cerr << "hfs_readdir(" << path << ")\n"; 262 | 263 | return handle_exceptions([&]() { 264 | std::map contents; 265 | 266 | contents = g_volume->listDirectory(path); 267 | 268 | for (auto it = contents.begin(); it != contents.end(); it++) 269 | { 270 | if (filler(buf, it->first.c_str(), &it->second, 0) != 0) 271 | return -ENOMEM; 272 | } 273 | 274 | return 0; 275 | }); 276 | 277 | } 278 | 279 | #if defined(__APPLE__) && !defined(DARLING) 280 | int hfs_getxattr(const char* path, const char* name, char* value, size_t vlen, uint32_t position) 281 | #else 282 | int hfs_getxattr(const char* path, const char* name, char* value, size_t vlen) 283 | #endif 284 | { 285 | std::cerr << "hfs_getxattr(" << path << ", " << name << ")\n"; 286 | #if defined(__APPLE__) && !defined(DARLING) 287 | if (position > 0) return -ENOSYS; // it's not supported... yet. I think it doesn't happen anymore since osx use less ressource fork 288 | #endif 289 | 290 | return handle_exceptions([&]() -> int { 291 | std::vector data; 292 | 293 | data = g_volume->getXattr(path, name); 294 | 295 | if (value == nullptr) 296 | return data.size(); 297 | 298 | if (vlen < data.size()) 299 | return -ERANGE; 300 | 301 | memcpy(value, &data[0], data.size()); 302 | return data.size(); 303 | }); 304 | } 305 | 306 | int hfs_listxattr(const char* path, char* buffer, size_t size) 307 | { 308 | return handle_exceptions([&]() -> int { 309 | std::vector attrs; 310 | std::vector output; 311 | 312 | attrs = g_volume->listXattr(path); 313 | 314 | for (const std::string& str : attrs) 315 | output.insert(output.end(), str.c_str(), str.c_str() + str.length() + 1); 316 | 317 | if (buffer == nullptr) 318 | return output.size(); 319 | 320 | if (size < output.size()) 321 | return -ERANGE; 322 | 323 | memcpy(buffer, &output[0], output.size()); 324 | return output.size(); 325 | }); 326 | } 327 | -------------------------------------------------------------------------------- /src/DMGDecompressor.cpp: -------------------------------------------------------------------------------- 1 | #include "DMGDecompressor.h" 2 | #include 3 | #include 4 | #ifdef COMPILE_WITH_LZFSE 5 | #include 6 | #endif 7 | #include "adc.h" 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include "exceptions.h" 15 | #include 16 | 17 | class DMGDecompressor_Zlib : public DMGDecompressor 18 | { 19 | public: 20 | DMGDecompressor_Zlib(std::shared_ptr reader); 21 | ~DMGDecompressor_Zlib(); 22 | virtual int32_t decompress(void* output, int32_t count, int64_t offset) override; 23 | private: 24 | virtual int32_t decompress(void* output, int32_t count); 25 | z_stream m_strm; 26 | }; 27 | 28 | class DMGDecompressor_Bzip2 : public DMGDecompressor 29 | { 30 | public: 31 | DMGDecompressor_Bzip2(std::shared_ptr reader); 32 | ~DMGDecompressor_Bzip2(); 33 | virtual int32_t decompress(void* output, int32_t count, int64_t offset) override; 34 | private: 35 | virtual int32_t decompress(void* output, int32_t count); 36 | bz_stream m_strm; 37 | }; 38 | 39 | class DMGDecompressor_ADC : public DMGDecompressor 40 | { 41 | public: 42 | DMGDecompressor_ADC(std::shared_ptr reader) : DMGDecompressor(reader) {} 43 | virtual int32_t decompress(void* output, int32_t count, int64_t offset) override; 44 | }; 45 | 46 | class DMGDecompressor_LZFSE : public DMGDecompressor 47 | { 48 | public: 49 | DMGDecompressor_LZFSE(std::shared_ptr reader) : DMGDecompressor(reader) {} 50 | virtual int32_t decompress(void* output, int32_t count, int64_t offset) override; 51 | private: 52 | virtual int32_t decompress(void* output, int32_t outputBytes); 53 | }; 54 | 55 | DMGDecompressor::DMGDecompressor(std::shared_ptr reader) 56 | : m_reader(reader), m_pos(0) 57 | { 58 | } 59 | 60 | DMGDecompressor* DMGDecompressor::create(RunType runType, std::shared_ptr reader) 61 | { 62 | switch (runType) 63 | { 64 | case RunType::Zlib: 65 | return new DMGDecompressor_Zlib(reader); 66 | case RunType::Bzip2: 67 | return new DMGDecompressor_Bzip2(reader); 68 | case RunType::ADC: 69 | return new DMGDecompressor_ADC(reader); 70 | #ifdef COMPILE_WITH_LZFSE 71 | case RunType::LZFSE: 72 | return new DMGDecompressor_LZFSE(reader); 73 | #endif 74 | default: 75 | return nullptr; 76 | } 77 | } 78 | 79 | int DMGDecompressor::readSome(char** ptr) 80 | { 81 | *ptr = m_buf; 82 | int rd = m_reader->read(m_buf, sizeof(m_buf), m_pos); 83 | 84 | if (rd <= 0) 85 | throw io_error("DMGDecompressor cannot read from stream"); 86 | 87 | return rd; 88 | } 89 | 90 | void DMGDecompressor::processed(int bytes) 91 | { 92 | m_pos += bytes; 93 | 94 | #ifdef DEBUG 95 | //std::cout << "Processed: " << bytes << ", total: " << m_pos << std::endl; 96 | #endif 97 | } 98 | 99 | DMGDecompressor_Zlib::DMGDecompressor_Zlib(std::shared_ptr reader) 100 | : DMGDecompressor(reader) 101 | { 102 | memset(&m_strm, 0, sizeof(m_strm)); 103 | if (inflateInit(&m_strm) != Z_OK) 104 | throw std::bad_alloc(); 105 | } 106 | 107 | DMGDecompressor_Zlib::~DMGDecompressor_Zlib() 108 | { 109 | inflateEnd(&m_strm); 110 | } 111 | 112 | int32_t DMGDecompressor_Zlib::decompress(void* output, int32_t count) 113 | { 114 | int status; 115 | char* input; 116 | int bytesRead; 117 | 118 | #ifdef DEBUG 119 | //std::cout << "zlib: Asked to provide " << outputBytes << " bytes\n"; 120 | #endif 121 | 122 | m_strm.next_out = (uint8_t*) output; 123 | m_strm.avail_out = count; 124 | do 125 | { 126 | if (m_strm.avail_in == 0) 127 | { 128 | bytesRead = readSome(&input); 129 | if (bytesRead <= 0) 130 | throw io_error("Error reading zlib stream"); 131 | processed(bytesRead); 132 | m_strm.next_in = (uint8_t*)input; 133 | m_strm.avail_in = (uint32_t)bytesRead; 134 | } 135 | 136 | status = inflate(&m_strm, Z_SYNC_FLUSH); 137 | 138 | if (status == Z_STREAM_END) 139 | return count - m_strm.avail_out; 140 | else if (status < 0) 141 | return status; 142 | } 143 | while (m_strm.avail_out > 0); 144 | 145 | return count; 146 | } 147 | 148 | int32_t DMGDecompressor_Zlib::decompress(void* output, int32_t count, int64_t offset) 149 | { 150 | int32_t done = 0; 151 | 152 | #ifdef DEBUG 153 | std::cout << "zlib: Asked to provide " << count << " bytes\n"; 154 | #endif 155 | 156 | while (offset > 0) 157 | { 158 | char waste[4096]; 159 | int32_t to_read = std::min(int64_t(sizeof(waste)), offset); 160 | int32_t bytesDecompressed = decompress(waste, to_read); 161 | if (bytesDecompressed <= 0) 162 | return bytesDecompressed; 163 | offset -= bytesDecompressed; 164 | } 165 | int32_t bytesDecompressed = decompress((uint8_t*)output+done, count); 166 | return bytesDecompressed; 167 | } 168 | 169 | DMGDecompressor_Bzip2::DMGDecompressor_Bzip2(std::shared_ptr reader) 170 | : DMGDecompressor(reader) 171 | { 172 | memset(&m_strm, 0, sizeof(m_strm)); 173 | if (BZ2_bzDecompressInit(&m_strm, 0, false) != Z_OK) 174 | throw std::bad_alloc(); 175 | } 176 | 177 | DMGDecompressor_Bzip2::~DMGDecompressor_Bzip2() 178 | { 179 | BZ2_bzDecompressEnd(&m_strm); 180 | } 181 | 182 | int32_t DMGDecompressor_Bzip2::decompress(void* output, int32_t count) 183 | { 184 | int status; 185 | char* input; 186 | int bytesRead; 187 | 188 | #ifdef DEBUG 189 | //std::cout << "bz2: Asked to provide " << outputBytes << " bytes\n"; 190 | #endif 191 | 192 | m_strm.next_out = (char*) output; 193 | m_strm.avail_out = count; 194 | do 195 | { 196 | if (m_strm.avail_in == 0) 197 | { 198 | bytesRead = readSome(&input); 199 | if (bytesRead <= 0) 200 | throw io_error("Error reading bz2 stream"); 201 | processed(bytesRead); 202 | m_strm.next_in = input; 203 | m_strm.avail_in = (uint32_t)bytesRead; 204 | } 205 | 206 | status = BZ2_bzDecompress(&m_strm); 207 | 208 | if (status == BZ_STREAM_END) 209 | break; 210 | else if (status < 0) 211 | return status; 212 | } 213 | while (m_strm.avail_out > 0); 214 | 215 | return count; 216 | } 217 | 218 | int32_t DMGDecompressor_Bzip2::decompress(void* output, int32_t count, int64_t offset) 219 | { 220 | int32_t done = 0; 221 | 222 | #ifdef DEBUG 223 | //std::cout << "bz2: Asked to provide " << outputBytes << " bytes\n"; 224 | #endif 225 | 226 | while (offset > 0) 227 | { 228 | char waste[4096]; 229 | int32_t to_read = std::min(int64_t(sizeof(waste)), offset); 230 | int32_t bytesDecompressed = decompress(waste, to_read); 231 | if (bytesDecompressed <= 0) 232 | return bytesDecompressed; 233 | offset -= bytesDecompressed; 234 | } 235 | int32_t bytesDecompressed = decompress((uint8_t*)output, count); 236 | return bytesDecompressed; 237 | } 238 | 239 | int32_t DMGDecompressor_ADC::decompress(void* output, int32_t count, int64_t offset) 240 | { 241 | if (offset < 0) 242 | throw io_error("offset < 0"); 243 | 244 | int32_t countLeft = count; 245 | int nb_read; 246 | int32_t nb_input_char_used; 247 | char* inputBuffer; 248 | int restartIndex = 0; 249 | int bytes_written; 250 | 251 | uint8_t decrompressBuffer[0x20000 + 0x80]; // 2x maximum lookback + maximum size of a decompressed chunk 252 | 253 | while ( countLeft > 0 ) 254 | { 255 | nb_read = readSome(&inputBuffer); 256 | 257 | nb_input_char_used = adc_decompress(nb_read, (uint8_t*)inputBuffer, sizeof(decrompressBuffer), (uint8_t* )&decrompressBuffer[0], restartIndex, &bytes_written); 258 | 259 | if (nb_input_char_used == 0) 260 | throw io_error("nb_input_char_used == 0"); 261 | 262 | if ( bytes_written >= offset+countLeft) { 263 | memcpy(output, decrompressBuffer+offset, countLeft); 264 | countLeft = 0; 265 | } 266 | else if ( bytes_written >= 0x20000) { 267 | if (offset < 0x10000) { 268 | memcpy(output, decrompressBuffer+offset, 0x10000-offset); 269 | output = ((uint8_t*)output)+0x10000-offset; 270 | offset = 0; 271 | countLeft -= 0x10000-offset; 272 | }else{ 273 | // to copy = 0 274 | offset -= 0x10000; 275 | } 276 | memmove(decrompressBuffer, decrompressBuffer+0x10000, bytes_written - 0x10000); // memory can overlap, so memmove in mandatory 277 | restartIndex = bytes_written - 0x10000; 278 | }else{ 279 | restartIndex = bytes_written; 280 | } 281 | 282 | processed(nb_input_char_used); 283 | } 284 | return count; 285 | } 286 | 287 | #ifdef COMPILE_WITH_LZFSE 288 | 289 | int32_t DMGDecompressor_LZFSE::decompress(void* output, int32_t outputBytes) 290 | { 291 | // DMGDecompressor can only read by 8k while compressed length of a LZFSE block can be much bigger 292 | 293 | int32_t done = 0; 294 | char* input = nullptr; 295 | char *inputBig = nullptr; 296 | 297 | int inputBytes = readSome(&input); 298 | 299 | const uint64_t readerTotalSize = readerLength(); 300 | 301 | if (inputBytes < readerTotalSize) 302 | { 303 | inputBig = new char[readerTotalSize]; 304 | memcpy(inputBig, input, inputBytes); 305 | 306 | processed(inputBytes); 307 | 308 | do 309 | { 310 | int nextReadBytes = readSome(&input); 311 | 312 | memcpy(inputBig + inputBytes, input, nextReadBytes); 313 | 314 | inputBytes += nextReadBytes; 315 | 316 | processed(nextReadBytes); 317 | } 318 | while (inputBytes < readerTotalSize); 319 | 320 | input = inputBig; 321 | } 322 | 323 | size_t out_size = lzfse_decode_buffer((uint8_t *)output, outputBytes, (const uint8_t *)input, inputBytes, nullptr); 324 | 325 | if (out_size == 0) 326 | throw io_error("DMGDecompressor_LZFSE failed"); 327 | 328 | if (inputBig) 329 | delete[] inputBig; 330 | else 331 | processed(inputBytes); 332 | 333 | return out_size; 334 | } 335 | 336 | int32_t DMGDecompressor_LZFSE::decompress(void* output, int32_t count, int64_t offset) 337 | { 338 | int32_t done = 0; 339 | 340 | #ifdef DEBUG 341 | std::cout << "lzfse: Asked to provide " << count << " bytes\n"; 342 | #endif 343 | 344 | while (offset > 0) 345 | { 346 | char waste[4096]; 347 | int32_t to_read = std::min(int64_t(sizeof(waste)), offset); 348 | int32_t bytesDecompressed = decompress(waste, to_read); 349 | // bytesDecompressed seems to be always equal to to_read 350 | assert(bytesDecompressed == to_read); 351 | offset -= bytesDecompressed; 352 | } 353 | done = decompress(output, count); 354 | return done; 355 | } 356 | 357 | #endif 358 | -------------------------------------------------------------------------------- /src/main-hdiutil.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | static void printMount(); 13 | 14 | #define main main_fuse 15 | #define BEFORE_MOUNT_EXTRA printMount(); daemon(false, false) 16 | #include "main-fuse.cpp" 17 | #undef main 18 | 19 | static void printHelp(); 20 | static void doFork(void); 21 | static int doAttach(int argc, char** argv); 22 | static int doDetach(int argc, char** argv); 23 | static void addFusermountIntoPath(); 24 | 25 | extern "C" int __darling_vchroot_expand(const char* path, char* out); 26 | 27 | int main(int argc, char** argv) 28 | { 29 | if (argc < 2) 30 | printHelp(); 31 | 32 | if (strcmp(argv[1], "attach") == 0) 33 | return doAttach(argc, argv); 34 | else if (strcmp(argv[1], "detach") == 0) 35 | return doDetach(argc, argv); 36 | 37 | printHelp(); 38 | return 1; 39 | } 40 | 41 | static void printHelp() 42 | { 43 | std::cerr << "Usage: hdiutil \n\n"; 44 | std::cerr << "Possible actions:\n" 45 | "\tattach [options] \n" 46 | "\t\tMounts a .dmg file and prints the mount locaton\n" 47 | "\tdetach [options] \n" 48 | "\t\tUnmounts a .dmg file mounted at \n"; 49 | 50 | exit(1); 51 | } 52 | 53 | static std::string g_mountDir; 54 | static bool puppetstrings = false; 55 | static bool plist = false; 56 | 57 | static int doAttach(int argc, char** argv) 58 | { 59 | if (argc < 3) 60 | printHelp(); 61 | 62 | static const struct option longopts[] = { 63 | { "puppetstrings", no_argument, NULL, 1 }, 64 | { "mountpoint", required_argument, NULL, 2 }, 65 | { "mountroot", required_argument, NULL, 3 }, 66 | { "quiet", no_argument, NULL, 4 }, 67 | { "nobrowse", no_argument, NULL, 5 }, 68 | { "noautoopen", no_argument, NULL, 5 }, 69 | { "mountrandom", required_argument, NULL, 6 }, 70 | { "plist", no_argument, NULL, 7 }, 71 | { "readonly", no_argument, NULL, 8 }, 72 | { "noidme", no_argument, NULL, 9 }, 73 | { NULL, 0, NULL, 0 } 74 | }; 75 | 76 | std::string prefix, mount, dmg; 77 | std::string mountname, mountroot; 78 | const char *p, *p2; 79 | int fd; 80 | char output[] = "/tmp/hdiutilXXXXXX"; 81 | const char* args[7]; 82 | bool quiet = false; 83 | 84 | mountroot = "/Volumes"; 85 | 86 | // Skip "attach" 87 | argc--; 88 | argv++; 89 | 90 | int ch; 91 | while ((ch = getopt_long_only(argc, argv, "", longopts, NULL)) != -1) 92 | { 93 | switch (ch) 94 | { 95 | case 1: 96 | puppetstrings = true; 97 | break; 98 | case 2: 99 | mount = optarg; 100 | break; 101 | case 3: 102 | mountroot = optarg; 103 | break; 104 | case 4: 105 | quiet = true; 106 | break; 107 | case 5: 108 | // We don't implement opening Finder yet, so this is our default 109 | break; 110 | case 6: { 111 | size_t opt_len = strlen(optarg); 112 | bool append_slash = optarg[opt_len - 1] != '/'; 113 | size_t template_len = opt_len + 6 + (append_slash ? 1 : 0); 114 | char* template_string = new char[template_len + 1]; 115 | strcpy(template_string, optarg); 116 | if (append_slash) 117 | template_string[opt_len] = '/'; 118 | for (size_t i = 0; i < 6; ++i) 119 | template_string[opt_len + i + (append_slash ? 1 : 0)] = 'X'; 120 | template_string[template_len] = '\0'; 121 | if (!mkdtemp(template_string)) { 122 | delete[] template_string; 123 | std::cerr << "Failed to create temporary directory" << std::endl; 124 | return 1; 125 | } 126 | mount = std::string(template_string); 127 | delete[] template_string; 128 | } break; 129 | case 7: 130 | plist = true; 131 | break; 132 | case 8: 133 | // readonly is already enforced by default 134 | break; 135 | case 9: 136 | // IDME (download post-processing) is not supported anyways, so this option does nothing 137 | break; 138 | case 0: 139 | break; 140 | default: 141 | std::cerr << "Got ch " << ch << std::endl; 142 | printHelp(); 143 | } 144 | } 145 | 146 | if (optind != argc-1) 147 | { 148 | printHelp(); 149 | } 150 | 151 | dmg = argv[optind]; 152 | 153 | if (access(dmg.c_str(), R_OK) != 0) 154 | { 155 | std::cerr << "Cannot access " << dmg.c_str() << std::endl; 156 | return 1; 157 | } 158 | 159 | p = strrchr(argv[optind], '/'); 160 | if (p == nullptr) 161 | p = argv[optind]; 162 | else 163 | p++; 164 | 165 | p2 = strrchr(argv[optind], '.'); 166 | if (p2 == nullptr) 167 | p2 = argv[optind] + strlen(argv[optind]); 168 | 169 | mountname = std::string(p, p2-p); 170 | 171 | if (mount.empty()) 172 | { 173 | mount = mountroot; 174 | if (mount[mount.length()-1] != '/') 175 | mount += '/'; 176 | mount += mountname; 177 | } 178 | 179 | if (mkdir(mount.c_str(), 0777) == -1 && errno != EEXIST) 180 | { 181 | std::cerr << "Cannot mkdir " << mount << std::endl; 182 | return 1; 183 | } 184 | g_mountDir = mount; 185 | 186 | fd = mkstemp(output); 187 | 188 | // redirect stderr into temp file 189 | dup2(2, 255); 190 | close(2); 191 | 192 | dup2(fd, 2); 193 | 194 | args[0] = "darling-dmg"; 195 | args[1] = dmg.c_str(); 196 | 197 | char linux_path[4096]; 198 | __darling_vchroot_expand(mount.c_str(), linux_path); 199 | args[2] = linux_path; 200 | 201 | std::cerr << "Will pass " << args[2] << std::endl; 202 | 203 | args[3] = "-f"; // Fork has to be done by Darling code, otherwise the LKM will not talk to us any more 204 | args[4] = "-o"; 205 | args[5] = "nonempty"; 206 | args[6] = nullptr; 207 | 208 | addFusermountIntoPath(); 209 | 210 | if (main_fuse(4, args) != 0) 211 | { 212 | char buf[512]; 213 | int rd; 214 | 215 | // redirect stderr back 216 | close(2); 217 | dup2(255, 2); 218 | 219 | lseek(fd, SEEK_SET, 0); 220 | 221 | while ((rd = read(fd, buf, sizeof(buf))) > 0) 222 | { 223 | write(2, buf, rd); 224 | } 225 | close(fd); 226 | unlink(output); 227 | 228 | return 1; 229 | } 230 | 231 | close(fd); 232 | unlink(output); 233 | 234 | return 0; 235 | } 236 | 237 | static void printMount() 238 | { 239 | if (plist) { 240 | std::cout << "" << std::endl; 241 | std::cout << "" << std::endl; 242 | std::cout << "" << std::endl; 243 | std::cout << "" << std::endl; 244 | std::cout << "\tsystem-entities" << std::endl; 245 | std::cout << "\t" << std::endl; 246 | std::cout << "\t\t" << std::endl; 247 | std::cout << "\t\t\tcontent-hint" << std::endl; 248 | std::cout << "\t\t\tGUID_partition_scheme" << std::endl; 249 | std::cout << "\t\t\tdev-entry" << std::endl; 250 | std::cout << "\t\t\t/dev/disk1" << std::endl; 251 | std::cout << "\t\t\tpotentially-mountable" << std::endl; 252 | std::cout << "\t\t\t" << std::endl; 253 | std::cout << "\t\t\tunmapped-content-hint" << std::endl; 254 | std::cout << "\t\t\tGUID_partition_scheme" << std::endl; 255 | std::cout << "\t\t" << std::endl; 256 | std::cout << "\t\t" << std::endl; 257 | std::cout << "\t\t\tcontent-hint" << std::endl; 258 | std::cout << "\t\t\tApple_HFS" << std::endl; 259 | std::cout << "\t\t\tdev-entry" << std::endl; 260 | std::cout << "\t\t\t/dev/disk1s1" << std::endl; 261 | std::cout << "\t\t\tmount-point" << std::endl; 262 | std::cout << "\t\t\t" << g_mountDir << "" << std::endl; 263 | std::cout << "\t\t\tpotentially-mountable" << std::endl; 264 | std::cout << "\t\t\t" << std::endl; 265 | std::cout << "\t\t\tunmapped-content-hint" << std::endl; 266 | std::cout << "\t\t\t48465300-0000-11AA-AA11-00306543ECAC" << std::endl; 267 | std::cout << "\t\t\tvolume-kind" << std::endl; 268 | std::cout << "\t\t\thfs" << std::endl; 269 | std::cout << "\t\t" << std::endl; 270 | std::cout << "\t" << std::endl; 271 | std::cout << "" << std::endl; 272 | std::cout << "" << std::endl; 273 | } else if (!puppetstrings) { 274 | std::cout << g_mountDir << std::endl; 275 | } else { 276 | // FIXME: This is kind of fake... 277 | // puppetstrings should produce output like: 278 | // /dev/disk1 GUID_partition_scheme 279 | // /dev/disk1s1 Apple_HFS /Volumes/MachOView 280 | std::cout << "/dev/disk1\tGUID_partition_scheme\t\n"; 281 | std::cout << "/dev/disk1s1\tApple_HFS\t" << g_mountDir << std::endl; 282 | } 283 | } 284 | 285 | extern "C" 286 | { 287 | extern const struct elf_calls* _elfcalls; 288 | extern char **environ; 289 | } 290 | 291 | 292 | static int doDetach(int argc, char** argv) 293 | { 294 | pid_t pid; 295 | int (*elf_posix_spawnp)(pid_t* pid, const char* path, const posix_spawn_file_actions_t *file_actions, const posix_spawnattr_t *attrp, char *const argv[], char *const envp[]); 296 | 297 | if (argc < 3) 298 | printHelp(); 299 | 300 | static const struct option longopts[] = { 301 | { "quiet", no_argument, NULL, 1 }, 302 | { NULL, 0, NULL, 0 } 303 | }; 304 | 305 | // Skip "detach" 306 | argc--; 307 | argv++; 308 | 309 | int ch; 310 | while ((ch = getopt_long_only(argc, argv, "", longopts, NULL)) != -1) 311 | { 312 | switch (ch) 313 | { 314 | case 1: 315 | break; 316 | case 0: 317 | break; 318 | default: 319 | printHelp(); 320 | } 321 | } 322 | 323 | addFusermountIntoPath(); 324 | 325 | char linux_path[4096]; 326 | __darling_vchroot_expand(argv[optind], linux_path); 327 | const char* pargv[] = { "fusermount", "-u", linux_path, nullptr }; 328 | 329 | *((void**)(&elf_posix_spawnp)) = _elfcalls->dlsym_fatal(nullptr, "posix_spawnp"); 330 | if (elf_posix_spawnp(&pid, "fusermount", nullptr, nullptr, (char* const*) pargv, environ) != 0) 331 | { 332 | std::cerr << "Failed to execute fusermount!\n"; 333 | return 1; 334 | } 335 | else 336 | { 337 | int status; 338 | waitpid(pid, &status, 0); 339 | rmdir(argv[optind]); 340 | 341 | return 0; 342 | } 343 | } 344 | 345 | void addFusermountIntoPath() 346 | { 347 | std::string path = getenv("PATH"); 348 | int (*elf_setenv)(const char *name, const char *value, int overwrite); 349 | 350 | path += ":/Volumes/SystemRoot/bin" 351 | ":/Volumes/SystemRoot/usr/bin"; 352 | 353 | // Calling setenv doesn't change current process' environment variables, 354 | // it only modifies the local copy maintained by libc. This local copy 355 | // is then passed by libc to execve(). 356 | // Since the execution of 'fusermount' happens in libfuse.so, we need 357 | // to add it into the environment on the ELF side. 358 | *((void**)(&elf_setenv)) = _elfcalls->dlsym_fatal(nullptr, "setenv"); 359 | elf_setenv("PATH", path.c_str(), true); 360 | } 361 | -------------------------------------------------------------------------------- /src/HFSHighLevelVolume.cpp: -------------------------------------------------------------------------------- 1 | #include "HFSHighLevelVolume.h" 2 | #include 3 | #include 4 | #include "HFSAttributeBTree.h" 5 | #include "HFSZlibReader.h" 6 | #include "MemoryReader.h" 7 | #include "ResourceFork.h" 8 | #include "exceptions.h" 9 | #include "decmpfs.h" 10 | #include 11 | 12 | static const char* RESOURCE_FORK_SUFFIX = "#..namedfork#rsrc"; 13 | static const char* XATTR_RESOURCE_FORK = "com.apple.ResourceFork"; 14 | static const char* XATTR_FINDER_INFO = "com.apple.FinderInfo"; 15 | 16 | HFSHighLevelVolume::HFSHighLevelVolume(std::shared_ptr volume) 17 | : m_volume(volume) 18 | { 19 | uint64_t volumeSize = this->volumeSize(); 20 | if (volumeSize < 50*1024*1024) 21 | { 22 | // limit cache sizes to volume size 23 | m_volume->getFileZone()->setMaxBlocks(volumeSize / CacheZone::BLOCK_SIZE / 2); 24 | m_volume->getBtreeZone()->setMaxBlocks(volumeSize / CacheZone::BLOCK_SIZE / 2); 25 | } 26 | 27 | m_tree.reset(m_volume->rootCatalogTree()); 28 | } 29 | 30 | static bool string_endsWith(const std::string& str, const std::string& what) 31 | { 32 | if (str.size() < what.size()) 33 | return false; 34 | else 35 | return str.compare(str.size()-what.size(), what.size(), what) == 0; 36 | } 37 | 38 | std::map HFSHighLevelVolume::listDirectory(const std::string& path) 39 | { 40 | std::map> contents; 41 | std::map rv; 42 | int err; 43 | 44 | err = m_tree->listDirectory(path, contents); 45 | 46 | if (err != 0) 47 | throw file_not_found_error(path); 48 | 49 | for (auto it = contents.begin(); it != contents.end(); it++) 50 | { 51 | struct stat st; 52 | hfs_nativeToStat_decmpfs(*(it->second), &st, string_endsWith(it->first, RESOURCE_FORK_SUFFIX)); 53 | 54 | rv[it->first] = st; 55 | } 56 | 57 | return rv; 58 | } 59 | 60 | struct stat HFSHighLevelVolume::stat(const std::string& path) 61 | { 62 | HFSPlusCatalogFileOrFolder ff; 63 | std::string spath = path; 64 | int rv; 65 | bool resourceFork = false; 66 | struct stat stat; 67 | 68 | if (string_endsWith(path, RESOURCE_FORK_SUFFIX)) 69 | { 70 | spath.resize(spath.length() - (sizeof(RESOURCE_FORK_SUFFIX)-1) ); 71 | resourceFork = true; 72 | } 73 | 74 | rv = m_tree->stat(spath.c_str(), &ff); 75 | if (rv != 0) 76 | throw file_not_found_error(spath); 77 | 78 | hfs_nativeToStat_decmpfs(ff, &stat, resourceFork); 79 | 80 | return stat; 81 | } 82 | 83 | void HFSHighLevelVolume::hfs_nativeToStat_decmpfs(const HFSPlusCatalogFileOrFolder& ff, struct stat* stat, bool resourceFork) 84 | { 85 | assert(stat != nullptr); 86 | 87 | hfs_nativeToStat(ff, stat, resourceFork); 88 | 89 | // Compressed FS support 90 | if ((ff.file.permissions.ownerFlags & HFS_PERM_OFLAG_COMPRESSED) && !stat->st_size) 91 | { 92 | decmpfs_disk_header* hdr; 93 | std::vector xattrData; 94 | 95 | hdr = get_decmpfs(be(ff.file.fileID), xattrData); 96 | 97 | if (hdr != nullptr) 98 | stat->st_size = hdr->uncompressed_size; 99 | } 100 | } 101 | 102 | std::shared_ptr HFSHighLevelVolume::openFile(const std::string& path) 103 | { 104 | std::shared_ptr file; 105 | std::string spath = path; 106 | int rv = 0; 107 | bool resourceFork = false, compressed = false; 108 | HFSPlusCatalogFileOrFolder ff; 109 | 110 | if (string_endsWith(path, RESOURCE_FORK_SUFFIX)) 111 | { 112 | spath.resize(spath.length() - (sizeof(RESOURCE_FORK_SUFFIX)-1)); 113 | resourceFork = true; 114 | } 115 | 116 | if (!resourceFork) 117 | { 118 | // stat 119 | rv = m_tree->stat(spath.c_str(), &ff); 120 | compressed = ff.file.permissions.ownerFlags & HFS_PERM_OFLAG_COMPRESSED; 121 | } 122 | 123 | if (rv != 0) 124 | throw file_not_found_error(path); 125 | 126 | if (!compressed) 127 | { 128 | rv = m_tree->openFile(spath.c_str(), file, resourceFork); 129 | 130 | if (rv != 0) 131 | throw file_not_found_error(path); 132 | } 133 | else 134 | { 135 | decmpfs_disk_header* hdr; 136 | std::vector holder; 137 | 138 | hdr = get_decmpfs(be(ff.file.fileID), holder); 139 | 140 | if (!hdr) 141 | throw file_not_found_error(path); 142 | 143 | 144 | #ifdef DEBUG 145 | std::cout << "Opening compressed file, compression type: " << int(hdr->compression_type) << std::endl; 146 | #endif 147 | switch (DecmpfsCompressionType(hdr->compression_type)) 148 | { 149 | case DecmpfsCompressionType::UncompressedInline: 150 | file.reset(new MemoryReader(hdr->attr_bytes, hdr->uncompressed_size)); 151 | break; 152 | case DecmpfsCompressionType::CompressedInline: 153 | file.reset(new MemoryReader(hdr->attr_bytes, holder.size() - 16)); 154 | file.reset(new HFSZlibReader(file, hdr->uncompressed_size, true)); 155 | break; 156 | case DecmpfsCompressionType::CompressedResourceFork: 157 | { 158 | rv = m_tree->openFile(spath.c_str(), file, true); 159 | if (rv == 0) 160 | { 161 | std::unique_ptr rsrc (new ResourceFork(file)); 162 | file = rsrc->getResource(DECMPFS_MAGIC, DECMPFS_ID); 163 | 164 | if (file) 165 | file.reset(new HFSZlibReader(file, hdr->uncompressed_size)); 166 | else 167 | throw function_not_implemented_error("Could not find decmpfs resource in resource fork"); 168 | } 169 | break; 170 | } 171 | default: 172 | throw function_not_implemented_error("Unknown compression type"); 173 | } 174 | } 175 | 176 | file.reset(new CachedReader(file, m_volume->getFileZone(), path)); 177 | 178 | return file; 179 | } 180 | 181 | void getXattrFinderInfo(const HFSPlusCatalogFileOrFolder& ff, uint8_t buf[32]) 182 | { 183 | FileInfo& newUserInfo = (*((FileInfo*)buf)); 184 | FolderInfo& newFolderInfo = (*((FolderInfo*)buf)); 185 | ExtendedFileInfo& newFinderInfo (*((ExtendedFileInfo*)(buf+16))); 186 | ExtendedFolderInfo& newExtendedFolderInfo (*((ExtendedFolderInfo*)(buf+16))); 187 | if (be(ff.file.recordType) == RecordType::kHFSPlusFileRecord) 188 | { 189 | // Push finder only if there is non zero data in it, excepted non-exposed field. 190 | newUserInfo = ff.file.userInfo; 191 | if ( be(newUserInfo.fileType) == kSymLinkFileType ) 192 | memset(&newUserInfo.fileType, 0, sizeof(newUserInfo.fileType)); 193 | else 194 | newUserInfo.fileType = newUserInfo.fileType; 195 | if ( be(newUserInfo.fileCreator) == kSymLinkCreator ) 196 | memset(&newUserInfo.fileCreator, 0, sizeof(newUserInfo.fileCreator)); 197 | else 198 | newUserInfo.fileCreator = newUserInfo.fileCreator; 199 | 200 | newFinderInfo = ff.file.finderInfo; 201 | newFinderInfo.document_id = 0; 202 | newFinderInfo.date_added = 0; 203 | newFinderInfo.write_gen_counter = 0; 204 | }else{ 205 | // Folder don't hace ressource fork 206 | // Push finder only if there is non zero data in it, excepted non-exposed field. 207 | newFolderInfo = ff.folder.userInfo; 208 | newExtendedFolderInfo = ff.folder.finderInfo; 209 | newExtendedFolderInfo.document_id = 0; 210 | newExtendedFolderInfo.date_added = 0; 211 | newExtendedFolderInfo.write_gen_counter = 0; 212 | } 213 | } 214 | 215 | std::vector HFSHighLevelVolume::listXattr(const std::string& path) 216 | { 217 | std::vector output; 218 | HFSPlusCatalogFileOrFolder ff; 219 | int err; 220 | 221 | // get CNID 222 | err = m_tree->stat(path, &ff); 223 | 224 | if (err != 0) 225 | throw file_not_found_error(path); 226 | 227 | uint8_t buf[32]; 228 | const char zero[32] = { 0 }; 229 | getXattrFinderInfo(ff, buf); 230 | if ( memcmp(buf, zero, 32) != 0 ) // push FinderInfo only is non zero 231 | output.push_back(XATTR_FINDER_INFO); 232 | 233 | // Push ressource fork only if there is one 234 | if (be(ff.folder.recordType) == RecordType::kHFSPlusFileRecord && ff.file.resourceFork.logicalSize != 0 && !(ff.file.permissions.ownerFlags & HFS_PERM_OFLAG_COMPRESSED)) { 235 | output.push_back(XATTR_RESOURCE_FORK); 236 | } 237 | 238 | if (m_volume->attributes()) 239 | { 240 | for (const auto& kv : m_volume->attributes()->getattr(be(ff.file.fileID))) { 241 | if (!(ff.file.permissions.ownerFlags & HFS_PERM_OFLAG_COMPRESSED) || kv.first != "com.apple.decmpfs") 242 | output.push_back(kv.first); 243 | } 244 | } 245 | 246 | return output; 247 | } 248 | 249 | std::vector HFSHighLevelVolume::getXattr(const std::string& path, const std::string& name) 250 | { 251 | int rv; 252 | std::string spath = path; 253 | std::vector output; 254 | 255 | if (string_endsWith(spath, RESOURCE_FORK_SUFFIX)) 256 | spath.resize(spath.length() - strlen(RESOURCE_FORK_SUFFIX)); 257 | 258 | if (name == XATTR_RESOURCE_FORK) 259 | { 260 | std::shared_ptr file; 261 | 262 | rv = m_tree->openFile(spath.c_str(), file, true); 263 | if (rv == -EISDIR) 264 | throw operation_not_permitted_error(); 265 | if (rv < 0) 266 | throw file_not_found_error(path); 267 | 268 | if (file->length() == 0) 269 | throw attribute_not_found_error(); 270 | 271 | rv = std::min(std::numeric_limits::max(), file->length()); 272 | output.resize(rv); 273 | 274 | file->read(&output[0], rv, 0); 275 | } 276 | else if (name == XATTR_FINDER_INFO) 277 | { 278 | HFSPlusCatalogFileOrFolder ff; 279 | 280 | rv = m_tree->stat(spath.c_str(), &ff); 281 | if (rv != 0) 282 | throw file_not_found_error(spath); 283 | 284 | uint8_t buf[32]; 285 | const char zero[32] = { 0 }; 286 | getXattrFinderInfo(ff, buf); 287 | if ( memcmp(buf, zero, 32) != 0 ) // push FinderInfo only is non zero 288 | output.insert(output.end(), reinterpret_cast(buf), reinterpret_cast(buf)+32); 289 | } 290 | else 291 | { 292 | HFSPlusCatalogFileOrFolder ff; 293 | 294 | // get CNID 295 | rv = m_tree->stat(spath.c_str(), &ff); 296 | 297 | if (rv != 0) 298 | throw file_not_found_error(spath); 299 | 300 | if (!m_volume->attributes()) 301 | throw attribute_not_found_error(); 302 | if (!m_volume->attributes()->getattr(be(ff.file.fileID), name, output)) 303 | throw attribute_not_found_error(); 304 | } 305 | 306 | return output; 307 | } 308 | 309 | void HFSHighLevelVolume::hfs_nativeToStat(const HFSPlusCatalogFileOrFolder& ff, struct stat* stat, bool resourceFork) 310 | { 311 | assert(stat != nullptr); 312 | memset(stat, 0, sizeof(*stat)); 313 | 314 | #if defined(__APPLE__) && !defined(DARLING) 315 | stat->st_birthtime = HFSCatalogBTree::appleToUnixTime(be(ff.file.createDate)); 316 | #endif 317 | stat->st_atime = HFSCatalogBTree::appleToUnixTime(be(ff.file.accessDate)); 318 | stat->st_mtime = HFSCatalogBTree::appleToUnixTime(be(ff.file.contentModDate)); 319 | stat->st_ctime = HFSCatalogBTree::appleToUnixTime(be(ff.file.attributeModDate)); 320 | stat->st_mode = be(ff.file.permissions.fileMode); 321 | stat->st_uid = be(ff.file.permissions.ownerID); 322 | stat->st_gid = be(ff.file.permissions.groupID); 323 | stat->st_ino = be(ff.file.fileID); 324 | stat->st_blksize = 512; 325 | stat->st_nlink = be(ff.file.permissions.special.linkCount); 326 | 327 | if (be(ff.file.recordType) == RecordType::kHFSPlusFileRecord) 328 | { 329 | if (!resourceFork) 330 | { 331 | stat->st_size = be(ff.file.dataFork.logicalSize); 332 | stat->st_blocks = be(ff.file.dataFork.totalBlocks); 333 | } 334 | else 335 | { 336 | stat->st_size = be(ff.file.resourceFork.logicalSize); 337 | stat->st_blocks = be(ff.file.resourceFork.totalBlocks); 338 | } 339 | 340 | if (S_ISCHR(stat->st_mode) || S_ISBLK(stat->st_mode)) 341 | stat->st_rdev = be(ff.file.permissions.special.rawDevice); 342 | } 343 | 344 | if (!stat->st_mode) 345 | { 346 | if (be(ff.file.recordType) == RecordType::kHFSPlusFileRecord) 347 | { 348 | stat->st_mode = S_IFREG; 349 | stat->st_mode |= 0444; 350 | } 351 | else 352 | { 353 | stat->st_mode = S_IFDIR; 354 | stat->st_mode |= 0555; 355 | } 356 | } 357 | } 358 | 359 | decmpfs_disk_header* HFSHighLevelVolume::get_decmpfs(HFSCatalogNodeID cnid, std::vector& holder) 360 | { 361 | HFSAttributeBTree* attributes = m_volume->attributes(); 362 | decmpfs_disk_header* hdr; 363 | 364 | if (!attributes) 365 | return nullptr; 366 | 367 | if (!attributes->getattr(cnid, DECMPFS_XATTR_NAME, holder)) 368 | return nullptr; 369 | 370 | if (holder.size() < 16) 371 | return nullptr; 372 | 373 | hdr = (decmpfs_disk_header*) &holder[0]; 374 | if (hdr->compression_magic != DECMPFS_MAGIC) 375 | return nullptr; 376 | 377 | return hdr; 378 | } 379 | -------------------------------------------------------------------------------- /src/HFSCatalogBTree.cpp: -------------------------------------------------------------------------------- 1 | #include "HFSCatalogBTree.h" 2 | #include "be.h" 3 | #include "exceptions.h" 4 | #include 5 | #include "unichar.h" 6 | #include 7 | #include 8 | using icu::UnicodeString; 9 | static const int MAX_SYMLINKS = 50; 10 | 11 | extern UConverter *g_utf16be; 12 | 13 | HFSCatalogBTree::HFSCatalogBTree(std::shared_ptr fork, HFSVolume* volume, CacheZone* zone) 14 | : HFSBTree(fork, zone, "Catalog"), m_volume(volume), m_hardLinkDirID(0) 15 | { 16 | HFSPlusCatalogFileOrFolder ff; 17 | int rv = stat(std::string("\0\0\0\0HFS+ Private Data", 21), &ff); 18 | if (rv == 0) 19 | m_hardLinkDirID = be(ff.folder.folderID); 20 | } 21 | 22 | bool HFSCatalogBTree::isCaseSensitive() const 23 | { 24 | return m_volume->isHFSX() && m_header.keyCompareType == KeyCompareType::kHFSBinaryCompare; 25 | } 26 | 27 | int HFSCatalogBTree::caseInsensitiveComparator(const Key* indexKey, const Key* desiredKey) 28 | { 29 | const HFSPlusCatalogKey* catIndexKey = reinterpret_cast(indexKey); 30 | const HFSPlusCatalogKey* catDesiredKey = reinterpret_cast(desiredKey); 31 | UnicodeString desiredName, indexName; 32 | UErrorCode error = U_ZERO_ERROR; 33 | 34 | //std::cout << "desired: " << be(catDesiredKey->parentID) << ", index: " << be(catIndexKey->parentID) << "\n"; 35 | if (be(catDesiredKey->parentID) < be(catIndexKey->parentID)) 36 | { 37 | //std::cout << "\t -> bigger\n"; 38 | return 1; 39 | } 40 | else if (be(catDesiredKey->parentID) > be(catIndexKey->parentID)) 41 | { 42 | //std::cout << "\t -> smaller\n"; 43 | return -1; 44 | } 45 | 46 | desiredName = UnicodeString((char*)catDesiredKey->nodeName.string, be(catDesiredKey->nodeName.length)*2, g_utf16be, error); 47 | indexName = UnicodeString((char*)catIndexKey->nodeName.string, be(catIndexKey->nodeName.length)*2, g_utf16be, error); 48 | 49 | // Hack for "\0\0\0\0HFS+ Private Data" which should come as last in ordering (issue #11) 50 | if (indexName.charAt(0) == 0) 51 | return 1; 52 | else if (desiredName.charAt(0) == 0) 53 | return -1; 54 | 55 | { 56 | //std::string des, idx; 57 | //desiredName.toUTF8String(des); 58 | //indexName.toUTF8String(idx); 59 | 60 | int r = indexName.caseCompare(desiredName, 0); 61 | 62 | //std::cout << "desired: " << des << " - index: " << idx << " -> r=" << r << std::endl; 63 | 64 | return r; 65 | } 66 | 67 | return 0; 68 | } 69 | 70 | int HFSCatalogBTree::caseSensitiveComparator(const Key* indexKey, const Key* desiredKey) 71 | { 72 | const HFSPlusCatalogKey* catIndexKey = reinterpret_cast(indexKey); 73 | const HFSPlusCatalogKey* catDesiredKey = reinterpret_cast(desiredKey); 74 | UnicodeString desiredName, indexName; 75 | UErrorCode error = U_ZERO_ERROR; 76 | 77 | if (be(catDesiredKey->parentID) < be(catIndexKey->parentID)) 78 | return 1; 79 | else if (be(catDesiredKey->parentID) > be(catIndexKey->parentID)) 80 | return -1; 81 | 82 | desiredName = UnicodeString((char*)catDesiredKey->nodeName.string, be(catDesiredKey->nodeName.length)*2, g_utf16be, error); 83 | indexName = UnicodeString((char*)catIndexKey->nodeName.string, be(catIndexKey->nodeName.length)*2, g_utf16be, error); 84 | 85 | // Hack for "\0\0\0\0HFS+ Private Data" which should come as last in ordering (issue #11) 86 | if (indexName.charAt(0) == 0) 87 | return 1; 88 | else if (desiredName.charAt(0) == 0) 89 | return 1; 90 | 91 | if (desiredName.length() > 0) 92 | { 93 | //std::string des, idx; 94 | //desiredName.toUTF8String(des); 95 | //indexName.toUTF8String(idx); 96 | 97 | int r = indexName.caseCompare(desiredName, 0); 98 | 99 | // std::cout << "desired: " << des << " - index: " << idx << " -> r=" << r << std::endl; 100 | 101 | return r; 102 | } 103 | 104 | return 0; 105 | } 106 | 107 | int HFSCatalogBTree::idOnlyComparator(const Key* indexKey, const Key* desiredKey) 108 | { 109 | const HFSPlusCatalogKey* catIndexKey = reinterpret_cast(indexKey); 110 | const HFSPlusCatalogKey* catDesiredKey = reinterpret_cast(desiredKey); 111 | 112 | //std::cerr << "idOnly: desired: " << be(catDesiredKey->parentID) << ", index: " << be(catIndexKey->parentID) << std::endl; 113 | 114 | if (be(catDesiredKey->parentID) > be(catIndexKey->parentID)) 115 | return -1; 116 | else if (be(catIndexKey->parentID) > be(catDesiredKey->parentID)) 117 | return 1; 118 | else 119 | return 0; 120 | } 121 | 122 | int HFSCatalogBTree::listDirectory(const std::string& path, std::map>& contents) 123 | { 124 | HFSPlusCatalogFileOrFolder dir; 125 | int rv; 126 | std::vector> leaves; 127 | HFSPlusCatalogKey key; 128 | std::map> beContents; 129 | 130 | contents.clear(); 131 | 132 | // determine the CNID of the directory 133 | rv = stat(path, &dir); 134 | if (rv != 0) 135 | return rv; 136 | 137 | if (be(dir.folder.recordType) != RecordType::kHFSPlusFolderRecord) 138 | return -ENOTDIR; 139 | 140 | // find leaves that may contain directory elements 141 | key.parentID = dir.folder.folderID; 142 | leaves = findLeafNodes((Key*) &key, idOnlyComparator); 143 | 144 | for (std::shared_ptr leafPtr : leaves) 145 | { 146 | //std::cerr << "**** Looking for elems with CNID " << be(key.parentID) << std::endl; 147 | appendNameAndHFSPlusCatalogFileOrFolderFromLeafForParentId(leafPtr, be(key.parentID), beContents); 148 | } 149 | 150 | for (auto it = beContents.begin(); it != beContents.end(); it++) 151 | { 152 | std::string filename = it->first; 153 | 154 | /* Filter out : 155 | * - "\0\0\0\0HFS+ Private Data" (truth is, every filename whose first char is \0 will be filtered out) 156 | * - ".HFS+ Private Directory Data\r" 157 | * - ".journal" 158 | * - ".journal_info_block" 159 | * from root directory 160 | */ 161 | if (be(dir.folder.folderID) != kHFSRootFolderID || (filename[0]!=0 && filename.compare(".HFS+ Private Directory Data\r")!=0 && filename.compare(".journal")!=0 && filename.compare(".journal_info_block")!=0)) 162 | { 163 | replaceChars(filename, '/', ':'); // Issue #36: / and : have swapped meaning in HFS+ 164 | contents[filename] = it->second; 165 | } 166 | } 167 | 168 | return 0; 169 | } 170 | 171 | static void split(const std::string &s, char delim, std::vector& elems) 172 | { 173 | std::stringstream ss(s); 174 | std::string item; 175 | 176 | while (std::getline(ss, item, delim)) 177 | elems.push_back(item); 178 | } 179 | 180 | std::shared_ptr HFSCatalogBTree::findHFSPlusCatalogFileOrFolderForParentIdAndName(HFSCatalogNodeID parentID, const std::string &elem) 181 | { 182 | HFSPlusCatalogKey key; 183 | key.parentID = htobe32(parentID); 184 | std::vector> leaves; 185 | leaves = findLeafNodes((Key*) &key, idOnlyComparator); 186 | std::map> beContents; 187 | for (std::shared_ptr leafPtr : leaves) 188 | { 189 | //std::cerr << "**** Looking for elems with CNID " << be(key.parentID) << std::endl; 190 | appendNameAndHFSPlusCatalogFileOrFolderFromLeafForParentIdAndName(leafPtr, be(key.parentID), elem, beContents); 191 | } 192 | if (beContents.size() == 0) 193 | return nullptr; 194 | if (beContents.size() > 1) 195 | throw io_error("Multiple records with same name"); 196 | 197 | return beContents.begin()->second; 198 | } 199 | 200 | int HFSCatalogBTree::stat(std::string path, HFSPlusCatalogFileOrFolder* s) 201 | { 202 | std::vector elems; 203 | std::shared_ptr leafNodePtr; 204 | std::shared_ptr last = nullptr; 205 | 206 | memset(s, 0, sizeof(*s)); 207 | 208 | if (path.compare(0, 1, "/") == 0) 209 | path = path.substr(1); 210 | if (!path.empty() && path.compare(path.length()-1, 1, "/") == 0) 211 | path = path.substr(0, path.length()-1); 212 | 213 | elems.push_back(std::string()); 214 | split(path, '/', elems); 215 | 216 | for (size_t i = 0; i < elems.size(); i++) 217 | { 218 | std::string elem = elems[i]; 219 | replaceChars(elem, ':', '/'); // Issue #36: / and : have swapped meaning in HFS+ 220 | 221 | HFSCatalogNodeID parentID = last ? be(last->folder.folderID) : kHFSRootParentID; 222 | 223 | //if (ustr.length() > 255) // FIXME: there is a UCS-2 vs UTF-16 issue here! 224 | // return -ENAMETOOLONG; 225 | 226 | last = findHFSPlusCatalogFileOrFolderForParentIdAndName(parentID, elem); 227 | if (last==nullptr) 228 | return -ENOENT; 229 | 230 | // resolve symlinks, check if directory... 231 | // FUSE takes care of this 232 | /* 233 | { 234 | RecordType recType = RecordType(be(uint16_t(last->folder.recordType))); 235 | const bool isLastElem = i+1 == elems.size(); 236 | 237 | if (recType == RecordType::kHFSPlusFileRecord) 238 | { 239 | if (last->file.permissions.fileMode & HFSPLUS_S_IFLNK && (!lstat || !isLastElem)) 240 | { 241 | if (currentDepth >= MAX_SYMLINKS) 242 | return -ELOOP; 243 | // TODO: deal with symlinks 244 | // recurse with increased depth 245 | } 246 | else if (!isLastElem) 247 | return -ENOTDIR; 248 | } 249 | } 250 | */ 251 | 252 | //parent = last->folder.folderID; 253 | } 254 | if (be(last->file.userInfo.fileType) == kHardLinkFileType && m_hardLinkDirID != 0) { 255 | std::string iNodePath; 256 | iNodePath += "iNode"; 257 | iNodePath += std::to_string(be(last->file.permissions.special.iNodeNum)); 258 | std::shared_ptr leafNodeHl = findHFSPlusCatalogFileOrFolderForParentIdAndName(m_hardLinkDirID, iNodePath); 259 | if (leafNodeHl!=nullptr) 260 | last = leafNodeHl; 261 | } 262 | *s = *last; 263 | 264 | //std::cout << "File/folder flags: 0x" << std::hex << s->file.flags << std::endl; 265 | 266 | return 0; 267 | } 268 | extern int mustbreak; 269 | 270 | void HFSCatalogBTree::appendNameAndHFSPlusCatalogFileOrFolderFromLeafForParentId(std::shared_ptr leafNodePtr, HFSCatalogNodeID cnid, std::map>& map) 271 | { 272 | appendNameAndHFSPlusCatalogFileOrFolderFromLeafForParentIdAndName(leafNodePtr, cnid, "", map); 273 | } 274 | 275 | void HFSCatalogBTree::appendNameAndHFSPlusCatalogFileOrFolderFromLeafForParentIdAndName(std::shared_ptr leafNodePtr, HFSCatalogNodeID cnid, const std::string& name, std::map>& map) 276 | { 277 | for (int i = 0; i < leafNodePtr->recordCount(); i++) 278 | { 279 | HFSPlusCatalogKey* recordKey; 280 | HFSPlusCatalogFileOrFolder* ff; 281 | RecordType recType; 282 | 283 | recordKey = leafNodePtr->getRecordKey(i); 284 | ff = leafNodePtr->getRecordData(i); 285 | 286 | recType = be(ff->folder.recordType); 287 | //{ 288 | //std::string name = UnicharToString(recordKey->nodeName); 289 | //std::cerr << "RecType " << int(recType) << ", ParentID: " << be(recordKey->parentID) << ", nodeName " << name << std::endl; 290 | //} 291 | 292 | switch (recType) 293 | { 294 | case RecordType::kHFSPlusFolderRecord: 295 | case RecordType::kHFSPlusFileRecord: 296 | { 297 | 298 | // do NOT skip "\0\0\0\0HFS+ Private Data", we need it to get is folderID in constructor 299 | if ( /* recordKey->nodeName.string[0] != 0 &&*/ be(recordKey->parentID) == cnid) 300 | { 301 | bool equal = name.empty(); 302 | if (!equal) 303 | { 304 | if (isCaseSensitive()) 305 | equal = EqualCase(recordKey->nodeName, name); 306 | else 307 | equal = EqualNoCase(recordKey->nodeName, name); 308 | } 309 | 310 | if (equal) 311 | { 312 | std::string name = UnicharToString(recordKey->nodeName); 313 | map[name] = std::shared_ptr(leafNodePtr, ff); // retain leafPtr, act as a HFSPlusCatalogFileOrFolder 314 | } 315 | } 316 | //else 317 | // std::cerr << "CNID not matched - " << cnid << " required\n"; 318 | break; 319 | } 320 | case RecordType::kHFSPlusFolderThreadRecord: 321 | case RecordType::kHFSPlusFileThreadRecord: 322 | break; 323 | } 324 | } 325 | } 326 | 327 | time_t HFSCatalogBTree::appleToUnixTime(uint32_t apple) 328 | { 329 | const time_t offset = 2082844800L; // Nb of seconds between 1st January 1904 12:00:00 and unix epoch 330 | if (apple == 0) 331 | return 0; // 0 stays 0, even if it change the date from 1904 to 1970. 332 | // Force time to wrap around and stay positive number. That's how Mac does it. 333 | // File from before 70 will have date in the future. 334 | // Example : Time value 2082844799, that should be Dec 31st 12:59:59 PM will become February 7th 2106 6:28:15 AM. 335 | return uint32_t(apple - offset); 336 | } 337 | 338 | int HFSCatalogBTree::openFile(const std::string& path, std::shared_ptr& forkOut, bool resourceFork) 339 | { 340 | HFSPlusCatalogFileOrFolder ff; 341 | int rv; 342 | 343 | forkOut.reset(); 344 | 345 | rv = stat(path, &ff); 346 | if (rv < 0) 347 | return rv; 348 | 349 | if (be(ff.folder.recordType) != RecordType::kHFSPlusFileRecord) 350 | return -EISDIR; 351 | 352 | forkOut.reset(new HFSFork(m_volume, resourceFork ? ff.file.resourceFork : ff.file.dataFork, 353 | be(ff.file.fileID), resourceFork)); 354 | 355 | return 0; 356 | } 357 | 358 | 359 | #ifdef DEBUG 360 | 361 | void HFSCatalogBTree::dumpTree() const 362 | { 363 | dumpTree(be(m_header.rootNode), 0); 364 | } 365 | 366 | void HFSCatalogBTree::dumpTree(int nodeIndex, int depth) const 367 | { 368 | HFSBTreeNode node(m_reader, nodeIndex, be(m_header.nodeSize)); 369 | 370 | switch (node.kind()) 371 | { 372 | case NodeKind::kBTIndexNode: 373 | { 374 | for (size_t i = 0; i < node.recordCount(); i++) 375 | { 376 | UErrorCode error = U_ZERO_ERROR; 377 | HFSPlusCatalogKey* key = node.getRecordKey(i); 378 | UnicodeString keyName((char*)key->nodeName.string, be(key->nodeName.length)*2, g_utf16be, error); 379 | std::string str; 380 | 381 | keyName.toUTF8String(str); 382 | 383 | // recurse down 384 | uint32_t* childIndex = node.getRecordData(i); 385 | #ifdef DEBUG 386 | printf("Index Node(%4d,%4zd) %s %s(%d) ->child %d\n", nodeIndex, i, std::string(depth, ' ').c_str(), str.c_str(), be(key->parentID), be(*childIndex)); 387 | // std::cout << "Index node(" << nodeIndex << "): " << std::string(depth, ' ') << str << "(" << be(key->parentID) << ")\n"; 388 | #endif 389 | dumpTree(be(*childIndex), depth+2); 390 | } 391 | 392 | break; 393 | } 394 | case NodeKind::kBTLeafNode: 395 | { 396 | for (size_t i = 0; i < node.recordCount(); i++) 397 | { 398 | HFSPlusCatalogKey* recordKey; 399 | UErrorCode error = U_ZERO_ERROR; 400 | UnicodeString keyName; 401 | std::string str; 402 | 403 | recordKey = node.getRecordKey(i); 404 | keyName = UnicodeString((char*)recordKey->nodeName.string, be(recordKey->nodeName.length)*2, g_utf16be, error); 405 | keyName.toUTF8String(str); 406 | 407 | #ifdef DEBUG 408 | printf("Leaf Node(%4d,%4zd) %s %s(%d)\n", nodeIndex, i, std::string(depth, ' ').c_str(), str.c_str(), be(recordKey->parentID)); 409 | // std::cout << "dumpTree(l): " << std::string(depth, ' ') << str << "(" << be(recordKey->parentID) << ")\n"; 410 | #endif 411 | } 412 | 413 | break; 414 | } 415 | case NodeKind::kBTHeaderNode: 416 | case NodeKind::kBTMapNode: 417 | break; 418 | default: 419 | std::cerr << "Invalid node kind! Kind: " << int(node.kind()) << std::endl; 420 | 421 | } 422 | } 423 | #endif 424 | 425 | void HFSCatalogBTree::replaceChars(std::string& str, char oldChar, char newChar) 426 | { 427 | size_t pos = 0; 428 | 429 | while ((pos = str.find(oldChar, pos)) != std::string::npos) 430 | { 431 | str[pos] = newChar; 432 | pos++; 433 | } 434 | } 435 | 436 | --------------------------------------------------------------------------------