├── .gitignore ├── Gopkg.lock ├── Gopkg.toml ├── LICENSE ├── README.md ├── consts.go ├── csum.go ├── dir.go ├── file.go ├── filesystem.go ├── gexto.go ├── group_descriptor.go ├── inode.go ├── integration_test.go ├── superblock.go └── util.go /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | gexto 3 | vendor/ 4 | -------------------------------------------------------------------------------- /Gopkg.lock: -------------------------------------------------------------------------------- 1 | # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. 2 | 3 | 4 | [[projects]] 5 | name = "github.com/davecgh/go-spew" 6 | packages = ["spew"] 7 | revision = "346938d642f2ec3594ed81d874461961cd0faa76" 8 | version = "v1.1.0" 9 | 10 | [[projects]] 11 | branch = "master" 12 | name = "github.com/lunixbochs/struc" 13 | packages = ["."] 14 | revision = "02e4c2afbb2ac4bae6876f52c8273fc4cf5a4b0a" 15 | 16 | [[projects]] 17 | name = "github.com/pmezard/go-difflib" 18 | packages = ["difflib"] 19 | revision = "792786c7400a136282c1664665ae0a8db921c6c2" 20 | version = "v1.0.0" 21 | 22 | [[projects]] 23 | name = "github.com/stretchr/testify" 24 | packages = [ 25 | "assert", 26 | "require" 27 | ] 28 | revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" 29 | version = "v1.2.1" 30 | 31 | [solve-meta] 32 | analyzer-name = "dep" 33 | analyzer-version = 1 34 | inputs-digest = "44dafcfa0a18e62d6ee0a0579094417d00d84beb9f5d4b964c8afa860d41e141" 35 | solver-name = "gps-cdcl" 36 | solver-version = 1 37 | -------------------------------------------------------------------------------- /Gopkg.toml: -------------------------------------------------------------------------------- 1 | # Gopkg.toml example 2 | # 3 | # Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md 4 | # for detailed Gopkg.toml documentation. 5 | # 6 | # required = ["github.com/user/thing/cmd/thing"] 7 | # ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] 8 | # 9 | # [[constraint]] 10 | # name = "github.com/user/project" 11 | # version = "1.0.0" 12 | # 13 | # [[constraint]] 14 | # name = "github.com/user/project2" 15 | # branch = "dev" 16 | # source = "github.com/myfork/project2" 17 | # 18 | # [[override]] 19 | # name = "github.com/x/y" 20 | # version = "2.4.0" 21 | # 22 | # [prune] 23 | # non-go = false 24 | # go-tests = true 25 | # unused-packages = true 26 | 27 | 28 | [prune] 29 | go-tests = true 30 | unused-packages = true 31 | 32 | [[constraint]] 33 | branch = "master" 34 | name = "github.com/lunixbochs/struc" 35 | 36 | [[constraint]] 37 | name = "github.com/stretchr/testify" 38 | version = "1.2.1" 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # gexto 2 | ## EXT2/EXT3/EXT4 Filesystem library for Golang 3 | 4 | #### Introduction 5 | 6 | Gexto is a Go library to allow read / write access to EXT2/3/4 filesystems. 7 | 8 | Created due to my eternal frustration at the crazy world of guestfish, where starting a VM containing a separate and complete linux kernel is apparently the only non-root way of editing a filesystem image. 9 | 10 | Aims to provide an "os."-like interface to the filesystem with file objects behaving basically how you would expect them to. 11 | 12 | #### Minimal Example 13 | 14 | Error checking omitted for brevity 15 | 16 | ``` 17 | import ( 18 | "log" 19 | "github.com/nerd2/gexto" 20 | ) 21 | 22 | func main() { 23 | fs, _ := gexto.NewFileSystem("file.ext4") 24 | 25 | f, _ := fs.Create("/test") 26 | f.Write([]byte("hello world") 27 | f.Close() 28 | 29 | g, _ := fs.Open("/another/file") 30 | log.Println(ioutil.ReadAll(file)) 31 | } 32 | ``` 33 | 34 | #### Testing 35 | 36 | Note that testing requires (passwordless) sudo, in order that the test filesystems can be mounted. 37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /consts.go: -------------------------------------------------------------------------------- 1 | package gexto 2 | 3 | const BAD_INO = 1 4 | const ROOT_INO = 2 5 | const USR_QUOTA_INO = 3 6 | const GRP_QUOTA_INO = 4 7 | const BOOT_LOADER_INO = 5 8 | const UNDEL_DIR_INO = 6 9 | const RESIZE_INO = 7 10 | const JOURNAL_INO = 8 11 | 12 | const BG_INODE_UNINIT = 0x0001 13 | const BG_BLOCK_UNINIT = 0x0002 14 | const BG_INODE_ZEROED = 0x0004 15 | 16 | // Inode flags 17 | const SECRM_FL = 0x00000001 18 | const UNRM_FL = 0x00000002 19 | const COMPR_FL = 0x00000004 20 | const SYNC_FL = 0x00000008 21 | const IMMUTABLE_FL = 0x00000010 22 | const APPEND_FL = 0x00000020 23 | const NODUMP_FL = 0x00000040 24 | const NOATIME_FL = 0x00000080 25 | const DIRTY_FL = 0x00000100 26 | const COMPRBLK_FL = 0x00000200 27 | const NOCOMPR_FL = 0x00000400 28 | const ENCRYPT_FL = 0x00000800 29 | const INDEX_FL = 0x00001000 30 | const IMAGIC_FL = 0x00002000 31 | const JOURNAL_DATA_FL = 0x00004000 32 | const NOTAIL_FL = 0x00008000 33 | const DIRSYNC_FL = 0x00010000 34 | const TOPDIR_FL = 0x00020000 35 | const HUGE_FILE_FL = 0x00040000 36 | const EXTENTS_FL = 0x00080000 37 | const EA_INODE_FL = 0x00200000 38 | const EOFBLOCKS_FL = 0x00400000 39 | const INLINE_DATA_FL = 0x10000000 40 | const PROJINHERIT_FL = 0x20000000 41 | const RESERVED_FL = 0x80000000 42 | 43 | const FL_USER_VISIBLE = 0x304BDFFF 44 | const FL_USER_MODIFIABLE = 0x204BC0FF 45 | 46 | const ( 47 | EXT4_INODE_SECRM = 0 48 | EXT4_INODE_UNRM = 1 49 | EXT4_INODE_COMPR = 2 50 | EXT4_INODE_SYNC = 3 51 | EXT4_INODE_IMMUTABLE = 4 52 | EXT4_INODE_APPEND = 5 53 | EXT4_INODE_NODUMP = 6 54 | EXT4_INODE_NOATIME = 7 55 | EXT4_INODE_DIRTY = 8 56 | EXT4_INODE_COMPRBLK = 9 57 | EXT4_INODE_NOCOMPR = 10 58 | EXT4_INODE_ENCRYPT = 11 59 | EXT4_INODE_INDEX = 12 60 | EXT4_INODE_IMAGIC = 13 61 | EXT4_INODE_JOURNAL_DATA = 14 62 | EXT4_INODE_NOTAIL = 15 63 | EXT4_INODE_DIRSYNC = 16 64 | EXT4_INODE_TOPDIR = 17 65 | EXT4_INODE_HUGE_FILE = 18 66 | EXT4_INODE_EXTENTS = 19 67 | EXT4_INODE_EA_INODE = 21 68 | EXT4_INODE_EOFBLOCKS = 22 69 | EXT4_INODE_INLINE_DATA = 28 70 | EXT4_INODE_PROJINHERIT = 29 71 | EXT4_INODE_RESERVED = 31 72 | ) 73 | 74 | const FEATURE_COMPAT_DIR_PREALLOC = 0x0001 75 | const FEATURE_COMPAT_IMAGIC_INODES = 0x0002 76 | const FEATURE_COMPAT_HAS_JOURNAL = 0x0004 77 | const FEATURE_COMPAT_EXT_ATTR = 0x0008 78 | const FEATURE_COMPAT_RESIZE_INODE = 0x0010 79 | const FEATURE_COMPAT_DIR_INDEX = 0x0020 80 | const FEATURE_COMPAT_SPARSE_SUPER2 = 0x0200 81 | 82 | const FEATURE_RO_COMPAT_SPARSE_SUPER = 0x0001 83 | const FEATURE_RO_COMPAT_LARGE_FILE = 0x0002 84 | const FEATURE_RO_COMPAT_BTREE_DIR = 0x0004 85 | const FEATURE_RO_COMPAT_HUGE_FILE = 0x0008 86 | const FEATURE_RO_COMPAT_GDT_CSUM = 0x0010 87 | const FEATURE_RO_COMPAT_DIR_NLINK = 0x0020 88 | const FEATURE_RO_COMPAT_EXTRA_ISIZE = 0x0040 89 | const FEATURE_RO_COMPAT_QUOTA = 0x0100 90 | const FEATURE_RO_COMPAT_BIGALLOC = 0x0200 91 | const FEATURE_RO_COMPAT_METADATA_CSUM = 0x0400 92 | const FEATURE_RO_COMPAT_READONLY = 0x1000 93 | const FEATURE_RO_COMPAT_PROJECT = 0x2000 94 | 95 | const FEATURE_INCOMPAT_COMPRESSION = 0x0001 96 | const FEATURE_INCOMPAT_FILETYPE = 0x0002 97 | const FEATURE_INCOMPAT_RECOVER = 0x0004 98 | const FEATURE_INCOMPAT_JOURNAL_DEV = 0x0008 99 | const FEATURE_INCOMPAT_META_BG = 0x0010 100 | const FEATURE_INCOMPAT_EXTENTS = 0x0040 101 | const FEATURE_INCOMPAT_64BIT = 0x0080 102 | const FEATURE_INCOMPAT_MMP = 0x0100 103 | const FEATURE_INCOMPAT_FLEX_BG = 0x0200 104 | const FEATURE_INCOMPAT_EA_INODE = 0x0400 105 | const FEATURE_INCOMPAT_DIRDATA = 0x1000 106 | const FEATURE_INCOMPAT_CSUM_SEED = 0x2000 107 | const FEATURE_INCOMPAT_LARGEDIR = 0x4000 108 | const FEATURE_INCOMPAT_INLINE_DATA = 0x8000 109 | const FEATURE_INCOMPAT_ENCRYPT = 0x10000 110 | -------------------------------------------------------------------------------- /csum.go: -------------------------------------------------------------------------------- 1 | package gexto 2 | 3 | import ( 4 | "hash/crc32" 5 | "encoding/binary" 6 | "io" 7 | ) 8 | 9 | type Checksummer interface { 10 | io.Writer 11 | WriteUint32(uint32) 12 | Get() uint32 13 | } 14 | 15 | func NewChecksummer(sb *Superblock) Checksummer { 16 | return &checksummer{ 17 | sb: sb, 18 | val: 0, 19 | table: crc32.MakeTable(crc32.Castagnoli), // TODO: Check crc used in sb? 20 | } 21 | } 22 | 23 | type checksummer struct { 24 | sb *Superblock 25 | val uint32 26 | table *crc32.Table 27 | } 28 | 29 | func (cs *checksummer) Write(b []byte) (n int, err error) { 30 | cs.val = crc32.Update(cs.val, cs.table, b) 31 | return len(b), nil 32 | } 33 | 34 | func (cs *checksummer) WriteUint32(x uint32) { 35 | b := make([]byte, 4) 36 | binary.LittleEndian.PutUint32(b, x) 37 | cs.Write(b) 38 | } 39 | 40 | func (cs *checksummer) Get() uint32 { 41 | return ^cs.val 42 | } -------------------------------------------------------------------------------- /dir.go: -------------------------------------------------------------------------------- 1 | package gexto 2 | 3 | import ( 4 | "github.com/lunixbochs/struc" 5 | "fmt" 6 | "log" 7 | ) 8 | 9 | type directory struct { 10 | sb *Superblock 11 | f *File 12 | } 13 | 14 | func NewDirectory(inode *Inode) *directory { 15 | return &directory{ 16 | f: &File{ 17 | extFile{ 18 | fs: inode.fs, 19 | inode: inode, 20 | pos: 0, 21 | }, 22 | }, 23 | sb: inode.fs.sb, 24 | } 25 | } 26 | 27 | func (dir *directory) AddEntry(entry *DirectoryEntry2) error { 28 | entrySize, _ := struc.Sizeof(entry) 29 | entry.Rec_len = uint16(entrySize) 30 | 31 | pos, _ := dir.f.Seek(0, 2) 32 | if pos % dir.sb.GetBlockSize() != 0 { 33 | return fmt.Errorf("Unexpected size of directory file: %d", pos) 34 | } else if pos == 0 { 35 | return fmt.Errorf("Unexpected empty directory") 36 | } 37 | dir.f.Seek(pos - dir.sb.GetBlockSize(), 0) 38 | 39 | //log.Println("AddEntry", pos) 40 | 41 | checksummer := NewChecksummer(dir.sb) 42 | checksummer.Write(dir.f.inode.fs.sb.Uuid[:]) 43 | checksummer.WriteUint32(uint32(dir.f.inode.num)) 44 | checksummer.WriteUint32(uint32(dir.f.inode.Generation)) 45 | 46 | totalLen := int64(0) 47 | modified := false 48 | for totalLen < dir.sb.GetBlockSize() { 49 | //log.Println("AddEntry loop ", totalLen) 50 | if totalLen == dir.sb.GetBlockSize() - 12 { 51 | //log.Println("AddEntry found checksum", modified) 52 | if modified { 53 | dirSum := DirectoryEntryCsum{ 54 | FakeInodeZero: 0, 55 | Rec_len: uint16(12), 56 | FakeName_len: 0, 57 | FakeFileType: 0xDE, 58 | Checksum: checksummer.Get(), 59 | } 60 | struc.Pack(dir.f, &dirSum) 61 | } 62 | break 63 | } 64 | 65 | dirEntry := &DirectoryEntry2{} 66 | err := struc.Unpack(dir.f, dirEntry) 67 | if err != nil { 68 | return err 69 | } 70 | 71 | //log.Println("AddEntry found entry", dirEntry.Rec_len, dirEntry.Name) 72 | 73 | if dirEntry.Rec_len == 0 { 74 | log.Fatalf("Invalid") 75 | } 76 | 77 | deSize, _ := struc.Sizeof(dirEntry) 78 | if !modified && int64(dirEntry.Rec_len) >= int64(deSize) + int64(entrySize) { 79 | //log.Println("Found a hole", dirEntry.Rec_len, deSize, entrySize) 80 | dir.f.Seek(pos - dir.sb.GetBlockSize() + int64(totalLen), 0) 81 | newDeSize := (deSize + 3) & ^3 82 | entry.Rec_len = dirEntry.Rec_len - uint16(newDeSize) 83 | dirEntry.Rec_len = uint16(newDeSize) 84 | struc.Pack(dir.f, dirEntry) 85 | struc.Pack(checksummer, dirEntry) 86 | pad1 := make([]byte, newDeSize - deSize) 87 | dir.f.Write(pad1) 88 | checksummer.Write(pad1) 89 | struc.Pack(dir.f, entry) 90 | struc.Pack(checksummer, entry) 91 | pad := make([]byte, int(entry.Rec_len) - entrySize) 92 | dir.f.Write(pad) 93 | checksummer.Write(pad) 94 | totalLen += int64(dirEntry.Rec_len) + int64(entry.Rec_len) 95 | modified = true 96 | } else { 97 | struc.Pack(checksummer, dirEntry) 98 | skip := int64(dirEntry.Rec_len) - int64(deSize) 99 | dir.f.Seek(skip, 1) 100 | checksummer.Write(make([]byte, skip)) 101 | totalLen += int64(dirEntry.Rec_len) 102 | } 103 | } 104 | 105 | if !modified { 106 | log.Println("No hole found, adding new block") 107 | checksummer := NewChecksummer(dir.sb) 108 | checksummer.Write(dir.f.inode.fs.sb.Uuid[:]) 109 | checksummer.WriteUint32(uint32(dir.f.inode.num)) 110 | checksummer.WriteUint32(uint32(dir.f.inode.Generation)) 111 | 112 | entry.Rec_len = uint16(dir.sb.GetBlockSize() - 12) 113 | struc.Pack(dir.f, entry) 114 | struc.Pack(checksummer, entry) 115 | dirSum := DirectoryEntryCsum{ 116 | FakeInodeZero: 0, 117 | Rec_len: uint16(12), 118 | FakeName_len: 0, 119 | FakeFileType: 0xDE, 120 | Checksum: checksummer.Get(), 121 | } 122 | struc.Pack(dir.f, &dirSum) 123 | } 124 | 125 | return nil 126 | } 127 | -------------------------------------------------------------------------------- /file.go: -------------------------------------------------------------------------------- 1 | package gexto 2 | 3 | import ( 4 | "io" 5 | "fmt" 6 | "log" 7 | ) 8 | 9 | type extFile struct { 10 | fs *fs 11 | inode *Inode 12 | pos int64 13 | } 14 | 15 | func (f *File) Read(p []byte) (n int, err error) { 16 | //log.Println("read", len(p), f.pos, f.inode.GetSize()) 17 | blockNum := f.pos / f.fs.sb.GetBlockSize() 18 | blockPos := f.pos % f.fs.sb.GetBlockSize() 19 | len := int64(len(p)) 20 | offset := int64(0) 21 | 22 | //log.Println("Read", len, f.pos, f.inode.GetSize()) 23 | if len + f.pos > int64(f.inode.GetSize()) { 24 | len = int64(f.inode.GetSize()) - f.pos 25 | } 26 | 27 | if len <= 0 { 28 | //log.Println("EOF") 29 | return 0, io.EOF 30 | } 31 | 32 | for len > 0 { 33 | blockPtr, contiguousBlocks, found := f.inode.GetBlockPtr(blockNum) 34 | if !found { 35 | return int(offset), io.ErrUnexpectedEOF 36 | } 37 | 38 | f.fs.dev.Seek(blockPtr * f.fs.sb.GetBlockSize() + blockPos, 0) 39 | 40 | blockReadLen := contiguousBlocks * f.fs.sb.GetBlockSize() - blockPos 41 | if blockReadLen > len { 42 | blockReadLen = len 43 | } 44 | //log.Println(len, blockNum, blockPos, blockPtr, blockReadLen, offset) 45 | n, err := io.LimitReader(f.fs.dev, blockReadLen).Read(p[offset:]) 46 | if err != nil { 47 | return 0, err 48 | } 49 | offset += int64(n) 50 | blockPos = 0 51 | blockNum++ 52 | len -= int64(n) 53 | } 54 | f.pos += offset 55 | //log.Println(int(offset)) 56 | return int(offset), nil 57 | } 58 | 59 | func (f *File) Write(p []byte) (n int, err error) { 60 | totalLen := len(p) 61 | 62 | //log.Println("Doing write", totalLen, p) 63 | 64 | for len(p) > 0 { 65 | blockNum := f.pos / f.fs.sb.GetBlockSize() 66 | blockPos := f.pos % f.fs.sb.GetBlockSize() 67 | 68 | //log.Println("Doing write", f.pos, blockNum, blockPos) 69 | 70 | blockPtr, contiguousBlocks, found := f.inode.GetBlockPtr(blockNum) 71 | 72 | if !found { 73 | //log.Println("Not found, extending") 74 | blockPtr, contiguousBlocks = f.inode.AddBlocks((int64(len(p)) + f.inode.fs.sb.GetBlockSize() - 1) / f.inode.fs.sb.GetBlockSize()) 75 | } 76 | 77 | //log.Println(blockNum, blockPos, blockPtr, contiguousBlocks, len(p)) 78 | writable := contiguousBlocks * f.fs.sb.GetBlockSize() - blockPos 79 | 80 | if writable == 0 { 81 | log.Fatalf("panic") 82 | } 83 | 84 | if writable > int64(len(p)) { 85 | writable = int64(len(p)) 86 | } 87 | 88 | f.pos += writable 89 | //log.Println("seek", blockPtr * f.fs.sb.GetBlockSize() + blockPos, "write", writable) 90 | f.fs.dev.Seek(blockPtr * f.fs.sb.GetBlockSize() + blockPos, 0) 91 | f.fs.dev.Write(p[:writable]) 92 | p = p[writable:] 93 | } 94 | 95 | if f.inode.GetSize() < f.pos { 96 | f.inode.SetSize(f.inode.GetSize() + int64(totalLen)) 97 | } 98 | //log.Println("Write complete") 99 | 100 | return totalLen, nil 101 | } 102 | 103 | func (f *File) Seek(offset int64, whence int) (ret int64, err error) { 104 | switch whence { 105 | case 0: 106 | f.pos = offset 107 | case 1: 108 | f.pos += offset 109 | case 2: 110 | f.pos = f.inode.GetSize() - offset 111 | default: 112 | return 0, fmt.Errorf("Unsupported whence") 113 | } 114 | 115 | if f.pos >= f.inode.GetSize() { 116 | return f.inode.GetSize(), io.EOF 117 | } else { 118 | return f.pos, nil 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /filesystem.go: -------------------------------------------------------------------------------- 1 | package gexto 2 | 3 | import ( 4 | "os" 5 | "strings" 6 | "github.com/lunixbochs/struc" 7 | "log" 8 | "fmt" 9 | "io" 10 | ) 11 | 12 | type fs struct { 13 | sb *Superblock 14 | dev *os.File 15 | } 16 | 17 | func (fs *fs) Open(name string) (*File, error) { 18 | parts := strings.Split(name, "/") 19 | 20 | inodeNum := int64(ROOT_INO) 21 | var inode *Inode 22 | for _, part := range parts { 23 | if len(part) == 0 { 24 | continue 25 | } 26 | 27 | inode = fs.getInode(inodeNum) 28 | dirContents := inode.ReadDirectory() 29 | found := false 30 | for i := 0; i < len(dirContents); i++ { 31 | //log.Println(string(dirContents[i].Name), part, dirContents[i].Flags, dirContents[i].Inode) 32 | if string(dirContents[i].Name) == part { 33 | found = true 34 | inodeNum = int64(dirContents[i].Inode) 35 | break 36 | } 37 | } 38 | 39 | if !found { 40 | return nil, fmt.Errorf("No such file or directory") 41 | } 42 | } 43 | 44 | inode = fs.getInode(inodeNum) 45 | //log.Printf("Inode %d with mode %x", inode.num, inode.Mode) 46 | return &File{extFile{ 47 | fs: fs, 48 | inode: inode, 49 | pos: 0, 50 | }}, nil 51 | } 52 | 53 | func (fs *fs) Create(path string) (*File, error) { 54 | log.Println("CREATE", path) 55 | parts := strings.Split(path, "/") 56 | 57 | inode := fs.getInode(int64(ROOT_INO)) 58 | 59 | for _, part := range parts[:len(parts)-1] { 60 | if len(part) == 0 { 61 | continue 62 | } 63 | 64 | dirContents := inode.ReadDirectory() 65 | found := false 66 | for i := 0; i < len(dirContents); i++ { 67 | //log.Println(string(dirContents[i].Name), part, dirContents[i].Flags, dirContents[i].Inode) 68 | if string(dirContents[i].Name) == part { 69 | found = true 70 | inode = fs.getInode(int64(dirContents[i].Inode)) 71 | break 72 | } 73 | } 74 | 75 | if !found { 76 | return nil, fmt.Errorf("No such file or directory") 77 | } 78 | } 79 | 80 | name := parts[len(parts)-1] 81 | 82 | newFile := fs.CreateNewFile(0777) 83 | log.Printf("Creating new file with inode %d and perms %d", newFile.inode.num, newFile.inode.Mode) 84 | newFile.inode.Mode |= 0x8000 85 | newFile.inode.UpdateCsumAndWriteback() 86 | 87 | NewDirectory(inode).AddEntry(&DirectoryEntry2{ 88 | Inode: uint32(newFile.inode.num), 89 | Flags: 0, 90 | Name: name, 91 | }) 92 | 93 | return newFile, nil 94 | } 95 | 96 | func (fs *fs) Remove(name string) error { 97 | return nil 98 | } 99 | 100 | func (fs *fs) Mkdir(path string, perm os.FileMode) error { 101 | log.Println("MKDIR", path) 102 | parts := strings.Split(path, "/") 103 | 104 | inode := fs.getInode(int64(ROOT_INO)) 105 | 106 | for _, part := range parts[:len(parts)-1] { 107 | if len(part) == 0 { 108 | continue 109 | } 110 | 111 | dirContents := inode.ReadDirectory() 112 | found := false 113 | for i := 0; i < len(dirContents); i++ { 114 | //log.Println(string(dirContents[i].Name), part, dirContents[i].Flags, dirContents[i].Inode) 115 | if string(dirContents[i].Name) == part { 116 | found = true 117 | inode = fs.getInode(int64(dirContents[i].Inode)) 118 | break 119 | } 120 | } 121 | 122 | if !found { 123 | return fmt.Errorf("No such file or directory") 124 | } 125 | } 126 | 127 | name := parts[len(parts)-1] 128 | 129 | newFile := fs.CreateNewFile(perm) 130 | log.Printf("Creating new directory with inode %d and perms %d", newFile.inode.num, newFile.inode.Mode) 131 | newFile.inode.Mode |= 0x4000 132 | newFile.inode.UpdateCsumAndWriteback() 133 | 134 | { 135 | checksummer := NewChecksummer(inode.fs.sb) 136 | checksummer.Write(inode.fs.sb.Uuid[:]) 137 | checksummer.WriteUint32(uint32(newFile.inode.num)) 138 | checksummer.WriteUint32(uint32(newFile.inode.Generation)) 139 | 140 | dirEntryDot := DirectoryEntry2{ 141 | Inode: uint32(newFile.inode.num), 142 | Flags: 2, 143 | Rec_len: 12, 144 | Name: ".", 145 | } 146 | recLenDot, _ := struc.Sizeof(&dirEntryDot) 147 | struc.Pack(checksummer, dirEntryDot) 148 | struc.Pack(newFile, dirEntryDot) 149 | { 150 | blank1 := make([]byte, 12-recLenDot) 151 | checksummer.Write(blank1) 152 | newFile.Write(blank1) 153 | } 154 | 155 | dirEntryDotDot := DirectoryEntry2{ 156 | Inode: uint32(inode.num), 157 | Flags: 2, 158 | Name: "..", 159 | } 160 | recLenDotDot, _ := struc.Sizeof(&dirEntryDotDot) 161 | dirEntryDotDot.Rec_len = uint16(1024 - 12 - 12) 162 | struc.Pack(checksummer, dirEntryDotDot) 163 | struc.Pack(newFile, dirEntryDotDot) 164 | 165 | blank := make([]byte, 1024 - 12 - 12 - recLenDotDot) 166 | checksummer.Write(blank) 167 | newFile.Write(blank) 168 | 169 | dirSum := DirectoryEntryCsum{ 170 | FakeInodeZero: 0, 171 | Rec_len: uint16(12), 172 | FakeName_len: 0, 173 | FakeFileType: 0xDE, 174 | Checksum: checksummer.Get(), 175 | } 176 | struc.Pack(newFile, &dirSum) 177 | } 178 | 179 | NewDirectory(inode).AddEntry(&DirectoryEntry2{ 180 | Inode: uint32(newFile.inode.num), 181 | Flags: 0, 182 | Name: name, 183 | }) 184 | 185 | newFile.inode.Links_count++ 186 | newFile.inode.UpdateCsumAndWriteback() 187 | 188 | inode.Links_count++ 189 | inode.UpdateCsumAndWriteback() 190 | 191 | bgd := fs.getBlockGroupDescriptor((newFile.inode.num-1) / int64(inode.fs.sb.InodePer_group)) 192 | bgd.Used_dirs_count_lo++ 193 | bgd.UpdateCsumAndWriteback() 194 | 195 | return nil 196 | } 197 | 198 | func (fs *fs) Close() error { 199 | err := fs.dev.Close() 200 | if err != nil { 201 | return err 202 | } 203 | fs.sb = nil 204 | fs.dev = nil 205 | return nil 206 | } 207 | 208 | // -------------------------- 209 | 210 | 211 | func (fs *fs) getInode(inodeAddress int64) *Inode { 212 | bgd := fs.getBlockGroupDescriptor((inodeAddress - 1) / int64(fs.sb.InodePer_group)) 213 | index := (inodeAddress - 1) % int64(fs.sb.InodePer_group) 214 | pos := bgd.GetInodeTableLoc() * fs.sb.GetBlockSize() + index * int64(fs.sb.Inode_size) 215 | //log.Printf("%d %d %d %d", bgd.GetInodeTableLoc(), fs.sb.GetBlockSize(), index, fs.sb.Inode_size) 216 | fs.dev.Seek(pos, 0) 217 | 218 | inode := &Inode{ 219 | fs: fs, 220 | address: pos, 221 | num: inodeAddress,} 222 | struc.Unpack(fs.dev, &inode) 223 | //log.Printf("Read inode %d, contents:\n%+v\n", inodeAddress, inode) 224 | return inode 225 | } 226 | 227 | func (fs *fs) getBlockGroupDescriptor(blockGroupNum int64) *GroupDescriptor { 228 | blockSize := fs.sb.GetBlockSize() 229 | bgdtLocation := 1024/blockSize + 1 230 | 231 | size := int64(32) 232 | if fs.sb.FeatureIncompat64bit() { 233 | size = int64(64) 234 | } 235 | addr := bgdtLocation*blockSize + size * blockGroupNum 236 | bgd := &GroupDescriptor{ 237 | fs:fs, 238 | address: addr, 239 | num: blockGroupNum, 240 | } 241 | fs.dev.Seek(addr, 0) 242 | struc.Unpack(io.LimitReader(fs.dev, size), &bgd) 243 | //log.Printf("Read block group %d, contents:\n%+v\n", blockGroupNum, bgd) 244 | return bgd 245 | } 246 | 247 | func (fs *fs) CreateNewFile(perm os.FileMode) *File { 248 | var inode *Inode 249 | for i := int64(0); i < fs.sb.numBlockGroups; i++ { 250 | bgd := fs.getBlockGroupDescriptor(i) 251 | inode = bgd.GetFreeInode() 252 | if inode != nil { 253 | break 254 | } 255 | } 256 | 257 | if inode == nil { 258 | log.Fatalln("Couldn't get free inode", fs.sb.numBlockGroups, fs.sb.Free_inodeCount) 259 | return nil 260 | } 261 | 262 | inode.Mode = uint16(perm & 0x1FF) 263 | inode.UpdateCsumAndWriteback() 264 | 265 | return &File{extFile{ 266 | fs: fs, 267 | inode: inode, 268 | }} 269 | } 270 | 271 | func (fs *fs) GetFreeBlocks(n int) (int64, int64) { 272 | for i := int64(0); i < fs.sb.numBlockGroups; i++ { 273 | bgd := fs.getBlockGroupDescriptor(i) 274 | blockNum, numBlocks := bgd.GetFreeBlocks(int64(n)) 275 | if blockNum > 0 { 276 | return blockNum + i * int64(fs.sb.BlockPer_group), numBlocks 277 | } 278 | } 279 | log.Fatalf("Failed to find free block") 280 | return 0, 0 281 | } -------------------------------------------------------------------------------- /gexto.go: -------------------------------------------------------------------------------- 1 | package gexto 2 | 3 | import ( 4 | "os" 5 | "github.com/lunixbochs/struc" 6 | "fmt" 7 | "syscall" 8 | ) 9 | 10 | type File struct { 11 | extFile 12 | } 13 | 14 | type FileSystem interface { 15 | Open(name string) (*File, error) 16 | Create(name string) (*File, error) 17 | Remove(name string) error 18 | Mkdir(name string, perm os.FileMode) error 19 | Close() error 20 | } 21 | 22 | func NewFileSystem(devicePath string) (FileSystem, error) { 23 | f, err := os.OpenFile(devicePath, syscall.O_RDWR, 0755) 24 | if err != nil { 25 | return nil, err 26 | } 27 | 28 | ret := fs{} 29 | 30 | f.Seek(1024, 0) 31 | 32 | ret.dev = f 33 | ret.sb = &Superblock{ 34 | address: 1024, 35 | fs: &ret, 36 | } 37 | err = struc.Unpack(f, ret.sb) 38 | if err != nil { 39 | return nil, err 40 | } 41 | 42 | //log.Printf("Super:\n%+v\n", *ret.sb) 43 | 44 | numBlockGroups := (ret.sb.GetBlockCount() + int64(ret.sb.BlockPer_group) - 1) / int64(ret.sb.BlockPer_group) 45 | numBlockGroups2 := (ret.sb.InodeCount + ret.sb.InodePer_group - 1) / ret.sb.InodePer_group 46 | if numBlockGroups != int64(numBlockGroups2) { 47 | return nil, fmt.Errorf("Block/inode mismatch: %d %d %d", ret.sb.GetBlockCount(), numBlockGroups, numBlockGroups2) 48 | } 49 | 50 | ret.sb.numBlockGroups = numBlockGroups 51 | 52 | return &ret, nil 53 | } 54 | 55 | 56 | -------------------------------------------------------------------------------- /group_descriptor.go: -------------------------------------------------------------------------------- 1 | package gexto 2 | 3 | import ( 4 | "github.com/lunixbochs/struc" 5 | "math/bits" 6 | ) 7 | 8 | type GroupDescriptor struct { 9 | Block_bitmap_lo uint32 `struc:"uint32,little"` 10 | Inode_bitmap_lo uint32 `struc:"uint32,little"` 11 | Inode_table_lo uint32 `struc:"uint32,little"` 12 | Free_blocks_count_lo uint16 `struc:"uint16,little"` 13 | Free_inodes_count_lo uint16 `struc:"uint16,little"` 14 | Used_dirs_count_lo uint16 `struc:"uint16,little"` 15 | Flags uint16 `struc:"uint16,little"` 16 | Exclude_bitmap_lo uint32 `struc:"uint32,little"` 17 | Block_bitmap_csum_lo uint16 `struc:"uint16,little"` 18 | Inode_bitmap_csum_lo uint16 `struc:"uint16,little"` 19 | Itable_unused_lo uint16 `struc:"uint16,little"` 20 | Checksum uint16 `struc:"uint16,little"` 21 | Block_bitmap_hi uint32 `struc:"uint32,little"` 22 | Inode_bitmap_hi uint32 `struc:"uint32,little"` 23 | Inode_table_hi uint32 `struc:"uint32,little"` 24 | Free_blocks_count_hi uint16 `struc:"uint16,little"` 25 | Free_inodes_count_hi uint16 `struc:"uint16,little"` 26 | Used_dirs_count_hi uint16 `struc:"uint16,little"` 27 | Itable_unused_hi uint16 `struc:"uint16,little"` 28 | Exclude_bitmap_hi uint32 `struc:"uint32,little"` 29 | Block_bitmap_csum_hi uint16 `struc:"uint16,little"` 30 | Inode_bitmap_csum_hi uint16 `struc:"uint16,little"` 31 | Reserved uint32 `struc:"uint32,little"` 32 | fs *fs 33 | num int64 34 | address int64 35 | }; 36 | 37 | func (bgd *GroupDescriptor) GetInodeBitmapLoc() int64 { 38 | if bgd.fs.sb.FeatureIncompat64bit() { 39 | return (int64(bgd.Inode_bitmap_hi) << 32) | int64(bgd.Inode_bitmap_lo) 40 | } else { 41 | return int64(bgd.Inode_bitmap_lo) 42 | } 43 | } 44 | 45 | func (bgd *GroupDescriptor) GetInodeTableLoc() int64 { 46 | if bgd.fs.sb.FeatureIncompat64bit() { 47 | return (int64(bgd.Inode_table_hi) << 32) | int64(bgd.Inode_table_lo) 48 | } else { 49 | return int64(bgd.Inode_table_lo) 50 | } 51 | } 52 | 53 | func (bgd *GroupDescriptor) GetBlockBitmapLoc() int64 { 54 | if bgd.fs.sb.FeatureIncompat64bit() { 55 | return (int64(bgd.Block_bitmap_hi) << 32) | int64(bgd.Block_bitmap_lo) 56 | } else { 57 | return int64(bgd.Block_bitmap_lo) 58 | } 59 | } 60 | 61 | func (bgd *GroupDescriptor) UpdateCsumAndWriteback() { 62 | cs := NewChecksummer(bgd.fs.sb) 63 | 64 | cs.Write(bgd.fs.sb.Uuid[:]) 65 | cs.WriteUint32(uint32(bgd.num)) 66 | bgd.Checksum = 0 67 | struc.Pack(cs, bgd) 68 | bgd.Checksum = uint16(cs.Get() & 0xFFFF) 69 | 70 | bgd.fs.dev.Seek(bgd.address, 0) 71 | struc.Pack(bgd.fs.dev, bgd) 72 | } 73 | 74 | func(bgd *GroupDescriptor) GetFreeInode() *Inode { 75 | start := bgd.GetInodeBitmapLoc() * bgd.fs.sb.GetBlockSize() 76 | bgd.fs.dev.Seek(start, 0) 77 | 78 | subInodeNum := int64(-1) 79 | 80 | if bgd.Flags & BG_INODE_UNINIT != 0 { 81 | b := make([]byte, bgd.fs.sb.InodePer_group/8) 82 | b[0] = 1 83 | bgd.fs.dev.Write(b) 84 | 85 | bgd.Flags &= 0xFFFF ^ BG_INODE_UNINIT 86 | bgd.UpdateCsumAndWriteback() 87 | 88 | subInodeNum = 0 89 | } else { 90 | // Find free inode in bitmap 91 | for i := 0; i < int(bgd.fs.sb.InodePer_group/8); i++ { 92 | b := make([]byte, 1) 93 | bgd.fs.dev.Read(b) 94 | if b[0] != 0xFF { 95 | //log.Println("free at ", bgd.num, start, i) 96 | bitNum := bits.TrailingZeros8(^b[0]) 97 | subInodeNum = int64(i)*8 + int64(bitNum) 98 | b[0] |= 1 << uint(bitNum) 99 | bgd.fs.dev.Seek(-1, 1) 100 | bgd.fs.dev.Write(b) 101 | break 102 | } 103 | } 104 | } 105 | 106 | if subInodeNum < 0 { 107 | //log.Println("!!!! bgd full !!!", bgd.num, bgd.Free_inodes_count_lo) 108 | return nil 109 | } 110 | 111 | if bgd.Flags & BG_INODE_ZEROED == 0 { 112 | bgd.fs.dev.Seek(bgd.GetInodeTableLoc() * bgd.fs.sb.GetBlockSize(), 0) 113 | bgd.fs.dev.Write(make([]byte, int64(bgd.fs.sb.InodePer_group) / int64(bgd.fs.sb.Inode_size))) 114 | bgd.Flags |= BG_INODE_ZEROED 115 | bgd.UpdateCsumAndWriteback() 116 | } 117 | 118 | // Update inode bitmap checksum 119 | checksummer := NewChecksummer(bgd.fs.sb) 120 | checksummer.Write(bgd.fs.sb.Uuid[:]) 121 | bgd.fs.dev.Seek(start, 0) 122 | b := make([]byte, int64(bgd.fs.sb.InodePer_group) / 8) 123 | bgd.fs.dev.Read(b) 124 | checksummer.Write(b) 125 | bgd.Inode_bitmap_csum_lo = uint16(checksummer.Get() & 0xFFFF) 126 | bgd.Inode_bitmap_csum_hi = uint16(checksummer.Get() >> 16) 127 | 128 | bgd.Free_inodes_count_lo-- 129 | bgd.Itable_unused_lo-- 130 | bgd.UpdateCsumAndWriteback() 131 | 132 | bgd.fs.sb.Free_inodeCount-- 133 | bgd.fs.sb.UpdateCsumAndWriteback() 134 | 135 | // Insert in Inode table 136 | inode := &Inode{ 137 | Mode: 0, 138 | Links_count: 1, 139 | Flags: 524288, //TODO: what 140 | BlockOrExtents: [60]byte{0x0a, 0xf3, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00}, 141 | fs: bgd.fs, 142 | address: bgd.GetInodeTableLoc() * bgd.fs.sb.GetBlockSize() + subInodeNum * int64(bgd.fs.sb.Inode_size), 143 | num: 1 + bgd.num * int64(bgd.fs.sb.InodePer_group) + subInodeNum, 144 | } 145 | inode.UpdateCsumAndWriteback() 146 | 147 | return inode 148 | } 149 | 150 | func (bgd *GroupDescriptor) setBitRange(offset int64, start int64, n int64) { 151 | if start < 0 || start > int64(bgd.fs.sb.BlockPer_group) { 152 | return 153 | } 154 | 155 | n = (n + bgd.fs.sb.GetBlockSize() - 1) / bgd.fs.sb.GetBlockSize() 156 | for i := start; i < start + n; i++ { 157 | b := make([]byte, 1) 158 | bgd.fs.dev.Seek(offset + i / 8, 0) 159 | bgd.fs.dev.Read(b) 160 | b[0] |= 1 << uint(i % 8) 161 | bgd.fs.dev.Seek(-1, 1) 162 | bgd.fs.dev.Write(b) 163 | } 164 | bgd.Free_blocks_count_lo-=uint16(n) 165 | } 166 | 167 | func (bgd *GroupDescriptor) GetFreeBlocks(n int64) (int64, int64) { 168 | // Find free block in bitmap 169 | start := bgd.GetBlockBitmapLoc() * bgd.fs.sb.GetBlockSize() 170 | 171 | if bgd.Flags & BG_BLOCK_UNINIT != 0 { 172 | bgd.fs.dev.Seek(start, 0) 173 | bgd.fs.dev.Write(make([]byte, bgd.fs.sb.BlockPer_group/8)) 174 | bgd.Free_blocks_count_lo = uint16(bgd.fs.sb.BlockPer_group) 175 | 176 | if !bgd.fs.sb.FeatureRoCompatSparse_super() || bgd.num <= 1 || bgd.num % 3 == 0 || bgd.num % 5 == 0 || bgd.num % 7 == 0 { 177 | bgd.setBitRange(start, 0, bgd.fs.sb.GetBlockSize()+(bgd.fs.sb.numBlockGroups*32)+int64(bgd.fs.sb.Reserved_gdt_blocks)*bgd.fs.sb.GetBlockSize()) 178 | } 179 | bgd.setBitRange(start, bgd.GetInodeBitmapLoc() - bgd.num * int64(bgd.fs.sb.BlockPer_group) * bgd.fs.sb.GetBlockSize(), int64(bgd.fs.sb.InodePer_group)/8) 180 | bgd.setBitRange(start, bgd.GetBlockBitmapLoc() - bgd.num * int64(bgd.fs.sb.BlockPer_group) * bgd.fs.sb.GetBlockSize(), int64(bgd.fs.sb.BlockPer_group)/8) 181 | bgd.setBitRange(start, bgd.GetInodeTableLoc() - bgd.num * int64(bgd.fs.sb.BlockPer_group) * bgd.fs.sb.GetBlockSize(), int64(bgd.fs.sb.Inode_size)*int64(bgd.fs.sb.InodePer_group)/8) 182 | bgd.UpdateCsumAndWriteback() 183 | 184 | blocksFree := int64(0) 185 | for i := int64(0); i < bgd.fs.sb.numBlockGroups; i++ { 186 | blocksFree += int64(bgd.fs.getBlockGroupDescriptor(i).Free_blocks_count_lo) 187 | } 188 | 189 | bgd.fs.sb.Free_blockCount_lo = uint32(blocksFree) 190 | bgd.fs.sb.UpdateCsumAndWriteback() 191 | 192 | bgd.Flags &= 0xFFFF ^ BG_BLOCK_UNINIT 193 | bgd.UpdateCsumAndWriteback() 194 | } 195 | 196 | subBlockNum := int64(-1) 197 | bgd.fs.dev.Seek(start, 0) 198 | for i := 0; i < int(bgd.fs.sb.BlockPer_group/8); i++ { 199 | b := make([]byte, 1) 200 | bgd.fs.dev.Read(b) 201 | if b[0] != 0xFF { 202 | bitNum := bits.TrailingZeros8(^b[0]) 203 | numFree := bits.TrailingZeros8(uint8((uint16(b[0]) | 0x100) >> uint(bitNum))) 204 | //log.Println(bgd.num, i, b[0], bitNum, numFree, n) 205 | if n > int64(numFree) { 206 | n = int64(numFree) 207 | } 208 | subBlockNum = int64(i)*8 + int64(bitNum) 209 | b[0] |= (1 << uint(bitNum+int(n))) - 1 210 | //log.Println("Found free blocks. GD", bgd.num, subBlockNum, bitNum, b[0], n) 211 | bgd.fs.dev.Seek(-1, 1) 212 | bgd.fs.dev.Write(b) 213 | break 214 | } 215 | } 216 | 217 | if subBlockNum < 0 { 218 | return 0, 0 219 | } 220 | 221 | // Update block bitmap checksum 222 | checksummer := NewChecksummer(bgd.fs.sb) 223 | checksummer.Write(bgd.fs.sb.Uuid[:]) 224 | bgd.fs.dev.Seek(start, 0) 225 | b := make([]byte, int64(bgd.fs.sb.ClusterPer_group) / 8) 226 | bgd.fs.dev.Read(b) 227 | checksummer.Write(b) 228 | bgd.Block_bitmap_csum_lo = uint16(checksummer.Get() & 0xFFFF) 229 | bgd.Block_bitmap_csum_hi = uint16(checksummer.Get() >> 16) 230 | 231 | newFreeBlocks := ((uint32(bgd.Free_blocks_count_hi) << 16) | uint32(bgd.Free_blocks_count_lo)) - uint32(n) 232 | bgd.Free_blocks_count_hi = uint16(newFreeBlocks >> 16) 233 | bgd.Free_blocks_count_lo = uint16(newFreeBlocks) 234 | bgd.UpdateCsumAndWriteback() 235 | 236 | bgd.fs.sb.Free_blockCount_lo-=uint32(n) 237 | bgd.fs.sb.UpdateCsumAndWriteback() 238 | 239 | return bgd.address / bgd.fs.sb.GetBlockSize() + subBlockNum - 1, n 240 | } -------------------------------------------------------------------------------- /inode.go: -------------------------------------------------------------------------------- 1 | package gexto 2 | 3 | import ( 4 | "github.com/lunixbochs/struc" 5 | "encoding/binary" 6 | "log" 7 | "io" 8 | "bytes" 9 | ) 10 | 11 | type MoveExtent struct { 12 | Reserved uint32 `struc:"uint32,little"` 13 | Donor_fd uint32 `struc:"uint32,little"` 14 | Orig_start uint64 `struc:"uint64,little"` 15 | Donor_start uint64 `struc:"uint64,little"` 16 | Len uint64 `struc:"uint64,little"` 17 | Moved_len uint64 `struc:"uint64,little"` 18 | }; 19 | 20 | type ExtentHeader struct { 21 | Magic uint16 `struc:"uint16,little"` 22 | Entries uint16 `struc:"uint16,little"` 23 | Max uint16 `struc:"uint16,little"` 24 | Depth uint16 `struc:"uint16,little"` 25 | Generation uint32 `struc:"uint32,little"` 26 | } 27 | 28 | type ExtentInternal struct { 29 | Block uint32 `struc:"uint32,little"` 30 | Leaf_low uint32 `struc:"uint32,little"` 31 | Leaf_high uint16 `struc:"uint16,little"` 32 | Unused uint16 `struc:"uint16,little"` 33 | } 34 | 35 | type Extent struct { 36 | Block uint32 `struc:"uint32,little"` 37 | Len uint16 `struc:"uint16,little"` 38 | Start_hi uint16 `struc:"uint16,little"` 39 | Start_lo uint32 `struc:"uint32,little"` 40 | } 41 | 42 | type DirectoryEntry2 struct { 43 | Inode uint32 `struc:"uint32,little"` 44 | Rec_len uint16 `struc:"uint16,little"` 45 | Name_len uint8 `struc:"uint8,sizeof=Name"` 46 | Flags uint8 `struc:"uint8"` 47 | Name string `struc:"[]byte"` 48 | } 49 | 50 | type DirectoryEntryCsum struct { 51 | FakeInodeZero uint32 `struc:"uint32,little"` 52 | Rec_len uint16 `struc:"uint16,little"` 53 | FakeName_len uint8 `struc:"uint8"` 54 | FakeFileType uint8 `struc:"uint8"` 55 | Checksum uint32 `struc:"uint32,little"` 56 | } 57 | 58 | type Inode struct { 59 | Mode uint16 `struc:"uint16,little"` 60 | Uid uint16 `struc:"uint16,little"` 61 | Size_lo uint32 `struc:"uint32,little"` 62 | Atime uint32 `struc:"uint32,little"` 63 | Ctime uint32 `struc:"uint32,little"` 64 | Mtime uint32 `struc:"uint32,little"` 65 | Dtime uint32 `struc:"uint32,little"` 66 | Gid uint16 `struc:"uint16,little"` 67 | Links_count uint16 `struc:"uint16,little"` 68 | Blocks_lo uint32 `struc:"uint32,little"` 69 | Flags uint32 `struc:"uint32,little"` 70 | Osd1 uint32 `struc:"uint32,little"` 71 | BlockOrExtents [60]byte `struc:"[60]byte,little"` 72 | Generation uint32 `struc:"uint32,little"` 73 | File_acl_lo uint32 `struc:"uint32,little"` 74 | Size_high uint32 `struc:"uint32,little"` 75 | Obso_faddr uint32 `struc:"uint32,little"` 76 | // OSD2 - linux only starts 77 | Blocks_high uint16 `struc:"uint16,little"` 78 | File_acl_high uint16 `struc:"uint16,little"` 79 | Uid_high uint16 `struc:"uint16,little"` 80 | Gid_high uint16 `struc:"uint16,little"` 81 | Checksum_low uint16 `struc:"uint16,little"` 82 | Unused uint16 `struc:"uint16,little"` 83 | // OSD2 - linux only ends 84 | Extra_isize uint16 `struc:"uint16,little"` 85 | Checksum_hi uint16 `struc:"uint16,little"` 86 | Ctime_extra uint32 `struc:"uint32,little"` 87 | Mtime_extra uint32 `struc:"uint32,little"` 88 | Atime_extra uint32 `struc:"uint32,little"` 89 | Crtime uint32 `struc:"uint32,little"` 90 | Crtime_extra uint32 `struc:"uint32,little"` 91 | Version_hi uint32 `struc:"uint32,little"` 92 | Projid uint32 `struc:"uint32,little"` 93 | fs *fs 94 | address int64 95 | num int64 96 | }; 97 | 98 | 99 | func (inode *Inode) UsesExtents() bool { 100 | return (inode.Flags & EXTENTS_FL) != 0 101 | } 102 | 103 | func (inode *Inode) UsesDirectoryHashTree() bool { 104 | return (inode.Flags & INDEX_FL) != 0 105 | } 106 | 107 | func (inode *Inode) ReadDirectory() []DirectoryEntry2 { 108 | if inode.UsesDirectoryHashTree() { 109 | log.Fatalf("Not implemented") 110 | } 111 | 112 | f := &File{extFile{ 113 | fs: inode.fs, 114 | inode: inode, 115 | pos: 0, 116 | }} 117 | 118 | ret := []DirectoryEntry2{} 119 | for { 120 | start, _ := f.Seek(0, 1) 121 | dirEntry := DirectoryEntry2{} 122 | err := struc.Unpack(f, &dirEntry) 123 | if err == io.EOF { 124 | break 125 | } else if err != nil { 126 | log.Fatalf(err.Error()) 127 | } 128 | //log.Printf("dirEntry %s: %+v", string(dirEntry.Name), dirEntry) 129 | f.Seek(int64(dirEntry.Rec_len) + start, 0) 130 | if dirEntry.Rec_len < 9 { 131 | log.Fatalf("corrupt direntry") 132 | } 133 | ret = append(ret, dirEntry) 134 | } 135 | return ret 136 | } 137 | 138 | func (inode *Inode) AddBlocks(n int64) (blockNum int64, contiguousBlocks int64) { 139 | if !inode.UsesExtents() { 140 | log.Fatalf("Not implemented") 141 | } 142 | 143 | r := inode.fs.dev 144 | r.Seek(inode.address + 40, 0) 145 | 146 | for { 147 | headerPos, _ := r.Seek(0,1) 148 | extentHeader := &ExtentHeader{} 149 | struc.Unpack(r, &extentHeader) 150 | //log.Printf("extent header: %+v", extentHeader) 151 | if extentHeader.Depth == 0 { // Leaf 152 | max := int64(0) 153 | for i := uint16(0); i < extentHeader.Entries; i++ { 154 | extent := &Extent{} 155 | struc.Unpack(r, &extent) 156 | upper := int64(extent.Block) + int64(extent.Len) 157 | if upper > max { 158 | max = upper 159 | } 160 | } 161 | if extentHeader.Entries < extentHeader.Max { 162 | savePos, _ := r.Seek(0, 1) 163 | blockNum, numBlocks := inode.fs.GetFreeBlocks(int(n)) 164 | newExtent := &Extent{ 165 | Block: uint32(max), 166 | Len: uint16(numBlocks), 167 | Start_hi: uint16(blockNum >> 32), 168 | Start_lo: uint32(blockNum & 0xFFFFFFFF), 169 | } 170 | r.Seek(savePos, 0) 171 | struc.Pack(r, &newExtent) 172 | extentHeader.Entries++ 173 | //log.Println("Extended to", extentHeader.Entries, headerPos) 174 | r.Seek(headerPos, 0) 175 | struc.Pack(r, extentHeader) 176 | r.Seek(inode.address, 0) 177 | struc.Unpack(r, inode) 178 | inode.Blocks_lo += uint32(numBlocks*inode.fs.sb.GetBlockSize()/512) 179 | inode.UpdateCsumAndWriteback() 180 | 181 | //log.Println("AddBlocks", n, numBlocks) 182 | 183 | return blockNum, numBlocks 184 | } else { 185 | log.Fatalf("Unable to extend no room") 186 | } 187 | } else { 188 | max := uint32(0) 189 | var best *ExtentInternal 190 | for i := uint16(0); i < extentHeader.Entries; i++ { 191 | extent := &ExtentInternal{} 192 | struc.Unpack(r, &extent) 193 | //log.Printf("extent internal: %+v", extent) 194 | if extent.Block > max { 195 | best = extent 196 | } 197 | } 198 | 199 | newBlock := int64(best.Leaf_high<<32) + int64(best.Leaf_low) 200 | r.Seek(newBlock*inode.fs.sb.GetBlockSize(), 0) 201 | } 202 | } 203 | 204 | //log.Println("AddBlocks", n, 0) 205 | return 0,0 206 | } 207 | 208 | func (inode *Inode) UpdateCsumAndWriteback() { 209 | if inode.fs.sb.Inode_size != 128 { 210 | log.Fatalln("Unsupported inode size", inode.fs.sb.Inode_size) 211 | } 212 | 213 | cs := NewChecksummer(inode.fs.sb) 214 | 215 | cs.Write(inode.fs.sb.Uuid[:]) 216 | cs.WriteUint32(uint32(inode.num)) 217 | cs.WriteUint32(uint32(inode.Generation)) 218 | inode.Checksum_low = 0 219 | struc.Pack(LimitWriter(cs, 128), inode) 220 | inode.Checksum_low = uint16(cs.Get() & 0xFFFF) 221 | 222 | inode.fs.dev.Seek(inode.address, 0) 223 | struc.Pack(LimitWriter(inode.fs.dev, 128), inode) 224 | } 225 | 226 | // Returns the blockId of the file block, and the number of contiguous blocks 227 | func (inode *Inode) GetBlockPtr(num int64) (int64, int64, bool) { 228 | if inode.UsesExtents() { 229 | //log.Println("Finding", num) 230 | r := io.Reader(bytes.NewReader(inode.BlockOrExtents[:])) 231 | 232 | for { 233 | extentHeader := &ExtentHeader{} 234 | struc.Unpack(r, &extentHeader) 235 | //log.Printf("extent header: %+v", extentHeader) 236 | if extentHeader.Depth == 0 { // Leaf 237 | for i := uint16(0); i < extentHeader.Entries; i++ { 238 | extent := &Extent{} 239 | struc.Unpack(r, &extent) 240 | //log.Printf("extent leaf: %+v", extent) 241 | if int64(extent.Block) <= num && int64(extent.Block)+int64(extent.Len) > num { 242 | //log.Println("Found") 243 | return int64(extent.Start_hi<<32) + int64(extent.Start_lo) + num - int64(extent.Block), int64(extent.Block) + int64(extent.Len) - num, true 244 | } 245 | } 246 | return 0, 0, false 247 | } else { 248 | found := false 249 | for i := uint16(0); i < extentHeader.Entries; i++ { 250 | extent := &ExtentInternal{} 251 | struc.Unpack(r, &extent) 252 | //log.Printf("extent internal: %+v", extent) 253 | if int64(extent.Block) <= num { 254 | newBlock := int64(extent.Leaf_high<<32) + int64(extent.Leaf_low) 255 | inode.fs.dev.Seek(newBlock * inode.fs.sb.GetBlockSize(), 0) 256 | r = inode.fs.dev 257 | found = true 258 | break 259 | } 260 | } 261 | if !found { 262 | return 0,0, false 263 | } 264 | } 265 | } 266 | 267 | } 268 | 269 | if num < 12 { 270 | return int64(binary.LittleEndian.Uint32(inode.BlockOrExtents[4*num:])), 1, true 271 | } 272 | 273 | num -= 12 274 | 275 | indirectsPerBlock := inode.fs.sb.GetBlockSize() / 4 276 | if num < indirectsPerBlock { 277 | ptr := int64(binary.LittleEndian.Uint32(inode.BlockOrExtents[4*12:])) 278 | return inode.getIndirectBlockPtr(ptr, num),1, true 279 | } 280 | num -= indirectsPerBlock 281 | 282 | if num < indirectsPerBlock * indirectsPerBlock { 283 | ptr := int64(binary.LittleEndian.Uint32(inode.BlockOrExtents[4*13:])) 284 | l1 := inode.getIndirectBlockPtr(ptr, num / indirectsPerBlock) 285 | return inode.getIndirectBlockPtr(l1, num % indirectsPerBlock),1, true 286 | } 287 | 288 | num -= indirectsPerBlock * indirectsPerBlock 289 | 290 | if num < indirectsPerBlock * indirectsPerBlock * indirectsPerBlock { 291 | log.Println("Triple indirection") 292 | 293 | ptr := int64(binary.LittleEndian.Uint32(inode.BlockOrExtents[4*14:])) 294 | l1 := inode.getIndirectBlockPtr(ptr, num / (indirectsPerBlock * indirectsPerBlock)) 295 | l2 := inode.getIndirectBlockPtr(l1, (num / indirectsPerBlock) % indirectsPerBlock) 296 | return inode.getIndirectBlockPtr(l2, num % (indirectsPerBlock * indirectsPerBlock)),1, true 297 | } 298 | 299 | log.Fatalf("Exceeded maximum possible block count") 300 | return 0,0,false 301 | } 302 | 303 | func (inode *Inode) getIndirectBlockPtr(blockNum int64, offset int64) int64 { 304 | inode.fs.dev.Seek(blockNum * inode.fs.sb.GetBlockSize() + offset * 4, 0) 305 | x := make([]byte, 4) 306 | inode.fs.dev.Read(x) 307 | return int64(binary.LittleEndian.Uint32(x)) 308 | } 309 | 310 | func (inode *Inode) GetSize() int64 { 311 | return (int64(inode.Size_high) << 32) | int64(inode.Size_lo) 312 | } 313 | 314 | func (inode *Inode) SetSize(i int64) { 315 | inode.Size_high = uint32(i >> 32) 316 | inode.Size_lo = uint32(i & 0xFFFFFFFF) 317 | inode.UpdateCsumAndWriteback() 318 | } -------------------------------------------------------------------------------- /integration_test.go: -------------------------------------------------------------------------------- 1 | package gexto_test 2 | 3 | import ( 4 | "testing" 5 | "os/exec" 6 | "io/ioutil" 7 | "log" 8 | "os" 9 | 10 | "github.com/stretchr/testify/require" 11 | "github.com/nerd2/gexto" 12 | "math/rand" 13 | ) 14 | 15 | type TestFs struct { 16 | devFile string 17 | mntPath string 18 | t *testing.T 19 | } 20 | 21 | func NewTestFs(t *testing.T, sizeMb int, fsType string) *TestFs { 22 | f, err := ioutil.TempFile("", "gextotest") 23 | require.Nil(t, err) 24 | blank := make([]byte, 1024*1024) 25 | for i := 0; i < sizeMb; i++ { 26 | _, err = f.Write(blank) 27 | require.Nil(t, err) 28 | } 29 | err = f.Close() 30 | require.Nil(t, err) 31 | 32 | err = exec.Command("mkfs." + fsType, f.Name()).Run() 33 | require.Nil(t, err) 34 | 35 | tfs := &TestFs{f.Name(), "", t} 36 | return tfs 37 | } 38 | 39 | func (tfs *TestFs) Mount() { 40 | out, err := exec.Command("fsck", "-f", "-n", tfs.devFile).CombinedOutput() 41 | if err != nil { 42 | log.Println(string(out)) 43 | } 44 | require.Nil(tfs.t, err) 45 | 46 | td, err := ioutil.TempDir("", "gextotest") 47 | require.Nil(tfs.t, err) 48 | 49 | err = exec.Command("sudo", "mount", tfs.devFile, td).Run() 50 | require.Nil(tfs.t, err) 51 | 52 | err = exec.Command("sudo", "chmod", "-R", "777", td).Run() 53 | require.Nil(tfs.t, err) 54 | 55 | tfs.mntPath = td 56 | } 57 | 58 | func (tfs *TestFs) Unmount() { 59 | if tfs.mntPath != "" { 60 | exec.Command("sudo", "umount", tfs.mntPath).Run() 61 | exec.Command("sudo", "rm", "-rf", tfs.mntPath).Run() 62 | tfs.mntPath = "" 63 | } 64 | } 65 | 66 | func (tfs *TestFs) Close() { 67 | tfs.Unmount() 68 | if true { 69 | os.Remove(tfs.devFile) 70 | } else { 71 | log.Println(tfs.devFile) 72 | } 73 | } 74 | 75 | func (tfs *TestFs) WriteSmallFile(path string, file string, b []byte) { 76 | err := os.MkdirAll(tfs.mntPath + path, 0777) 77 | require.Nil(tfs.t, err) 78 | err = ioutil.WriteFile(tfs.mntPath + path + "/" + file, b, 0777) 79 | require.Nil(tfs.t, err) 80 | } 81 | 82 | func (tfs *TestFs) WriteLargeFile(path string, file string, size int) *os.File { 83 | largefile, _ := ioutil.TempFile("", "gexto") 84 | for size > 0 { 85 | dataLen := 512*1024 86 | if dataLen > size { 87 | dataLen = size 88 | } 89 | data := make([]byte, dataLen) 90 | n, err := rand.Read(data) 91 | require.Nil(tfs.t, err) 92 | m, err := largefile.Write(data[:n]) 93 | require.Nil(tfs.t, err) 94 | size -= m 95 | } 96 | err := largefile.Close() 97 | require.Nil(tfs.t, err) 98 | err = os.MkdirAll(tfs.mntPath + path, 0777) 99 | require.Nil(tfs.t, err) 100 | err = exec.Command("cp", largefile.Name(), tfs.mntPath + path + file).Run() 101 | require.Nil(tfs.t, err) 102 | return largefile 103 | } 104 | 105 | func doTestRead(t *testing.T, fsType string) { 106 | tfs := NewTestFs(t, 1100, fsType) 107 | tfs.Mount() 108 | defer func(){tfs.Close()}() 109 | 110 | text := []byte("hello world") 111 | tfs.WriteSmallFile("/", "smallfile", text) 112 | tfs.WriteSmallFile("/dir1", "smallfile", text) 113 | largefile := tfs.WriteLargeFile("/", "largefile", 987654321) 114 | defer os.Remove(largefile.Name()) 115 | tfs.Unmount() 116 | 117 | fs, err := gexto.NewFileSystem(tfs.devFile) 118 | require.Nil(t, err) 119 | 120 | { 121 | file, err := fs.Open("/smallfile") 122 | require.Nil(t, err) 123 | out, err := ioutil.ReadAll(file) 124 | require.Nil(t, err) 125 | require.Equal(t, text, out) 126 | } 127 | 128 | { 129 | file, err := fs.Open("/dir1/smallfile") 130 | require.Nil(t, err) 131 | out, err := ioutil.ReadAll(file) 132 | require.Nil(t, err) 133 | require.Equal(t, text, out) 134 | } 135 | 136 | { 137 | file, err := fs.Open("/largefile") 138 | require.Nil(t, err) 139 | comparefile, err := os.Open(largefile.Name()) 140 | for err == nil { 141 | a := make([]byte, 1024*1024) 142 | b := make([]byte, 1024*1024) 143 | var na int 144 | na, err = file.Read(a) 145 | nb, err2 := comparefile.Read(b) 146 | require.Equal(t, na, nb) 147 | log.Printf("Read %d (%d)", na, nb) 148 | require.Equal(t, a[:na], b[:nb]) 149 | require.Equal(t, na, nb) 150 | require.Equal(t, err, err2) 151 | } 152 | } 153 | } 154 | 155 | func TestIntegrationRead(t *testing.T) { 156 | log.SetFlags(log.LstdFlags | log.Lshortfile) 157 | doTestRead(t, "ext2") 158 | doTestRead(t, "ext4") 159 | } 160 | 161 | func TestIntegrationWrite(t *testing.T) { 162 | log.SetFlags(log.LstdFlags | log.Lshortfile) 163 | 164 | tfs := NewTestFs(t, 100, "ext4") 165 | defer func(){tfs.Close()}() 166 | 167 | fs, err := gexto.NewFileSystem(tfs.devFile) 168 | require.Nil(t, err) 169 | recursiveFillDisk(t, fs, "", 5, rand.New(rand.NewSource(1))) 170 | err = fs.Mkdir("/newtestdir", 0777) 171 | require.Nil(t, err) 172 | err = fs.Mkdir("/newtestdir/newsubdir", 0777) 173 | require.Nil(t, err) 174 | f, err := fs.Create("/newtestdir/newsubdir/file") 175 | require.Nil(t, err) 176 | testcontents := make([]byte, 12345) 177 | rand.Read(testcontents) 178 | f.Write(testcontents) 179 | //f.Close() 180 | fs.Close() 181 | 182 | { 183 | fs, err := gexto.NewFileSystem(tfs.devFile) 184 | require.Nil(t, err) 185 | _, err = fs.Open("/newtestdir") 186 | require.Nil(t, err) 187 | _, err = fs.Open("/newtestdir/newsubdir") 188 | require.Nil(t, err) 189 | _, err = fs.Open("/newtestdir/newsubdir/file") 190 | require.Nil(t, err) 191 | fs.Close() 192 | } 193 | 194 | tfs.Mount() 195 | stat1, err := os.Stat(tfs.mntPath + "/newtestdir") 196 | require.Nil(t, err) 197 | require.Equal(t, os.FileMode(0777), stat1.Mode() & 0777) 198 | stat2, err := os.Stat(tfs.mntPath + "/newtestdir/newsubdir") 199 | require.Nil(t, err) 200 | require.Equal(t, os.FileMode(0777), stat2.Mode() & 0777) 201 | contents, err := ioutil.ReadFile(tfs.mntPath + "/newtestdir/newsubdir/file") 202 | require.Nil(t, err) 203 | require.Equal(t, testcontents, contents) 204 | } 205 | 206 | func randomName(len int, rand *rand.Rand) string { 207 | len++ 208 | name := make([]byte, len) 209 | for i := 0; i < len; i++ { 210 | name[i] = byte('a' + rand.Intn('z'-'a') + rand.Intn(1) * ('A'-'a')) 211 | } 212 | return string(name) 213 | } 214 | 215 | func recursiveFillDisk(t *testing.T, fs gexto.FileSystem, path string, depth int, rand *rand.Rand) { 216 | if depth < 0 { 217 | return 218 | } 219 | nSubDirs := 3 + rand.Intn(2) 220 | for i := 0; i < nSubDirs; i++ { 221 | name := randomName(12+i, rand) 222 | err := fs.Mkdir(path + "/" + string(name), 0777) 223 | require.Nil(t, err) 224 | recursiveFillDisk(t, fs, path + "/" + string(name), depth-1, rand) 225 | } 226 | nFiles := 3 + rand.Intn(4) 227 | for i := 0; i < nFiles; i++ { 228 | name := randomName(12+nSubDirs+i, rand) 229 | f, err := fs.Create(path + "/" + string(name)) 230 | require.Nil(t, err) 231 | b := make([]byte, rand.Intn(10000)) 232 | rand.Read(b) 233 | f.Write(b) 234 | } 235 | } -------------------------------------------------------------------------------- /superblock.go: -------------------------------------------------------------------------------- 1 | package gexto 2 | 3 | import ( 4 | "github.com/lunixbochs/struc" 5 | ) 6 | 7 | type Superblock struct { 8 | InodeCount uint32 `struc:"uint32,little"` 9 | BlockCount_lo uint32 `struc:"uint32,little"` 10 | R_blockCount_lo uint32 `struc:"uint32,little"` 11 | Free_blockCount_lo uint32 `struc:"uint32,little"` 12 | Free_inodeCount uint32 `struc:"uint32,little"` 13 | First_data_block uint32 `struc:"uint32,little"` 14 | Log_block_size uint32 `struc:"uint32,little"` 15 | Log_cluster_size uint32 `struc:"uint32,little"` 16 | BlockPer_group uint32 `struc:"uint32,little"` 17 | ClusterPer_group uint32 `struc:"uint32,little"` 18 | InodePer_group uint32 `struc:"uint32,little"` 19 | Mtime uint32 `struc:"uint32,little"` 20 | Wtime uint32 `struc:"uint32,little"` 21 | Mnt_count uint16 `struc:"uint16,little"` 22 | Max_mnt_count uint16 `struc:"uint16,little"` 23 | Magic uint16 `struc:"uint16,little"` 24 | State uint16 `struc:"uint16,little"` 25 | Errors uint16 `struc:"uint16,little"` 26 | Minor_rev_level uint16 `struc:"uint16,little"` 27 | Lastcheck uint32 `struc:"uint32,little"` 28 | Checkinterval uint32 `struc:"uint32,little"` 29 | Creator_os uint32 `struc:"uint32,little"` 30 | Rev_level uint32 `struc:"uint32,little"` 31 | Def_resuid uint16 `struc:"uint16,little"` 32 | Def_resgid uint16 `struc:"uint16,little"` 33 | // Dynamic_rev superblocks only 34 | First_ino uint32 `struc:"uint32,little"` 35 | Inode_size uint16 `struc:"uint16,little"` 36 | Block_group_nr uint16 `struc:"uint16,little"` 37 | Feature_compat uint32 `struc:"uint32,little"` 38 | Feature_incompat uint32 `struc:"uint32,little"` 39 | Feature_ro_compat uint32 `struc:"uint32,little"` 40 | Uuid [16]byte `struc:"[16]byte"` 41 | Volume_name [16]byte `struc:"[16]byte"` 42 | Last_mounted [64]byte `struc:"[64]byte"` 43 | Algorithm_usage_bitmap uint32 `struc:"uint32,little"` 44 | // Performance hints 45 | Prealloc_blocks byte `struc:"byte"` 46 | Prealloc_dir_blocks byte `struc:"byte"` 47 | Reserved_gdt_blocks uint16 `struc:"uint16,little"` 48 | // Journal 49 | 50 | Journal_Uuid [16]byte `struc:"[16]byte"` 51 | Journal_inum uint32 `struc:"uint32,little"` 52 | Journal_dev uint32 `struc:"uint32,little"` 53 | Last_orphan uint32 `struc:"uint32,little"` 54 | Hash_seed [4]uint32 `struc:"[4]uint32,little"` 55 | Def_hash_version byte `struc:"byte"` 56 | Jnl_backup_type byte `struc:"byte"` 57 | Desc_size uint16 `struc:"uint16,little"` 58 | Default_mount_opts uint32 `struc:"uint32,little"` 59 | First_meta_bg uint32 `struc:"uint32,little"` 60 | MkfTime uint32 `struc:"uint32,little"` 61 | Jnl_blocks [17]uint32 `struc:"[17]uint32,little"` 62 | 63 | BlockCount_hi uint32 `struc:"uint32,little"` 64 | R_blockCount_hi uint32 `struc:"uint32,little"` 65 | Free_blockCount_hi uint32 `struc:"uint32,little"` 66 | Min_extra_isize uint16 `struc:"uint16,little"` 67 | Want_extra_isize uint16 `struc:"uint16,little"` 68 | Flags uint32 `struc:"uint32,little"` 69 | Raid_stride uint16 `struc:"uint16,little"` 70 | Mmp_update_interval uint16 `struc:"uint16,little"` 71 | Mmp_block uint64 `struc:"uint64,little"` 72 | Raid_stripe_width uint32 `struc:"uint32,little"` 73 | Log_groupPer_flex byte `struc:"byte"` 74 | Checksum_type byte `struc:"byte"` 75 | Encryption_level byte `struc:"byte"` 76 | Reserved_pad byte `struc:"byte"` 77 | KbyteWritten uint64 `struc:"uint64,little"` 78 | Snapshot_inum uint32 `struc:"uint32,little"` 79 | Snapshot_id uint32 `struc:"uint32,little"` 80 | Snapshot_r_blockCount uint64 `struc:"uint64,little"` 81 | Snapshot_list uint32 `struc:"uint32,little"` 82 | Error_count uint32 `struc:"uint32,little"` 83 | First_error_time uint32 `struc:"uint32,little"` 84 | First_error_ino uint32 `struc:"uint32,little"` 85 | First_error_block uint64 `struc:"uint64,little"` 86 | First_error_func [32]byte `struc:"[32]pad"` 87 | First_error_line uint32 `struc:"uint32,little"` 88 | Last_error_time uint32 `struc:"uint32,little"` 89 | Last_error_ino uint32 `struc:"uint32,little"` 90 | Last_error_line uint32 `struc:"uint32,little"` 91 | Last_error_block uint64 `struc:"uint64,little"` 92 | Last_error_func [32]byte `struc:"[32]pad"` 93 | Mount_opts [64]byte `struc:"[64]pad"` 94 | Usr_quota_inum uint32 `struc:"uint32,little"` 95 | Grp_quota_inum uint32 `struc:"uint32,little"` 96 | Overhead_clusters uint32 `struc:"uint32,little"` 97 | Backup_bgs [2]uint32 `struc:"[2]uint32,little"` 98 | Encrypt_algos [4]byte `struc:"[4]pad"` 99 | Encrypt_pw_salt [16]byte `struc:"[16]pad"` 100 | Lpf_ino uint32 `struc:"uint32,little"` 101 | Prj_quota_inum uint32 `struc:"uint32,little"` 102 | Checksum_seed uint32 `struc:"uint32,little"` 103 | Reserved [98]uint32 `struc:"[98]uint32,little"` 104 | Checksum uint32 `struc:"uint32,little"` 105 | address int64 106 | fs *fs 107 | numBlockGroups int64 108 | }; 109 | 110 | func (sb *Superblock) FeatureCompatDir_prealloc() bool { return (sb.Feature_compat&FEATURE_COMPAT_DIR_PREALLOC != 0) } 111 | func (sb *Superblock) FeatureCompatImagic_inodes() bool { return (sb.Feature_compat&FEATURE_COMPAT_IMAGIC_INODES != 0) } 112 | func (sb *Superblock) FeatureCompatHas_journal() bool { return (sb.Feature_compat&FEATURE_COMPAT_HAS_JOURNAL != 0) } 113 | func (sb *Superblock) FeatureCompatExt_attr() bool { return (sb.Feature_compat&FEATURE_COMPAT_EXT_ATTR != 0) } 114 | func (sb *Superblock) FeatureCompatResize_inode() bool { return (sb.Feature_compat&FEATURE_COMPAT_RESIZE_INODE != 0) } 115 | func (sb *Superblock) FeatureCompatDir_index() bool { return (sb.Feature_compat&FEATURE_COMPAT_DIR_INDEX != 0) } 116 | func (sb *Superblock) FeatureCompatSparse_super2() bool { return (sb.Feature_compat&FEATURE_COMPAT_SPARSE_SUPER2 != 0) } 117 | 118 | func (sb *Superblock) FeatureRoCompatSparse_super() bool { return (sb.Feature_ro_compat&FEATURE_RO_COMPAT_SPARSE_SUPER != 0) } 119 | func (sb *Superblock) FeatureRoCompatLarge_file() bool { return (sb.Feature_ro_compat&FEATURE_RO_COMPAT_LARGE_FILE != 0) } 120 | func (sb *Superblock) FeatureRoCompatBtree_dir() bool { return (sb.Feature_ro_compat&FEATURE_RO_COMPAT_BTREE_DIR != 0) } 121 | func (sb *Superblock) FeatureRoCompatHuge_file() bool { return (sb.Feature_ro_compat&FEATURE_RO_COMPAT_HUGE_FILE != 0) } 122 | func (sb *Superblock) FeatureRoCompatGdt_csum() bool { return (sb.Feature_ro_compat&FEATURE_RO_COMPAT_GDT_CSUM != 0) } 123 | func (sb *Superblock) FeatureRoCompatDir_nlink() bool { return (sb.Feature_ro_compat&FEATURE_RO_COMPAT_DIR_NLINK != 0) } 124 | func (sb *Superblock) FeatureRoCompatExtra_isize() bool { return (sb.Feature_ro_compat&FEATURE_RO_COMPAT_EXTRA_ISIZE != 0) } 125 | func (sb *Superblock) FeatureRoCompatQuota() bool { return (sb.Feature_ro_compat&FEATURE_RO_COMPAT_QUOTA != 0) } 126 | func (sb *Superblock) FeatureRoCompatBigalloc() bool { return (sb.Feature_ro_compat&FEATURE_RO_COMPAT_BIGALLOC != 0) } 127 | func (sb *Superblock) FeatureRoCompatMetadata_csum() bool { return (sb.Feature_ro_compat&FEATURE_RO_COMPAT_METADATA_CSUM != 0) } 128 | func (sb *Superblock) FeatureRoCompatReadonly() bool { return (sb.Feature_ro_compat&FEATURE_RO_COMPAT_READONLY != 0) } 129 | func (sb *Superblock) FeatureRoCompatProject() bool { return (sb.Feature_ro_compat&FEATURE_RO_COMPAT_PROJECT != 0) } 130 | 131 | func (sb *Superblock) FeatureIncompat64bit() bool { return (sb.Feature_incompat&FEATURE_INCOMPAT_64BIT != 0) } 132 | func (sb *Superblock) FeatureIncompatCompression() bool { return (sb.Feature_incompat&FEATURE_INCOMPAT_COMPRESSION != 0) } 133 | func (sb *Superblock) FeatureIncompatFiletype() bool { return (sb.Feature_incompat&FEATURE_INCOMPAT_FILETYPE != 0) } 134 | func (sb *Superblock) FeatureIncompatRecover() bool { return (sb.Feature_incompat&FEATURE_INCOMPAT_RECOVER != 0) } 135 | func (sb *Superblock) FeatureIncompatJournal_dev() bool { return (sb.Feature_incompat&FEATURE_INCOMPAT_JOURNAL_DEV != 0) } 136 | func (sb *Superblock) FeatureIncompatMeta_bg() bool { return (sb.Feature_incompat&FEATURE_INCOMPAT_META_BG != 0) } 137 | func (sb *Superblock) FeatureIncompatExtents() bool { return (sb.Feature_incompat&FEATURE_INCOMPAT_EXTENTS != 0) } 138 | func (sb *Superblock) FeatureIncompatMmp() bool { return (sb.Feature_incompat&FEATURE_INCOMPAT_MMP != 0) } 139 | func (sb *Superblock) FeatureIncompatFlex_bg() bool { return (sb.Feature_incompat&FEATURE_INCOMPAT_FLEX_BG != 0) } 140 | func (sb *Superblock) FeatureIncompatEa_inode() bool { return (sb.Feature_incompat&FEATURE_INCOMPAT_EA_INODE != 0) } 141 | func (sb *Superblock) FeatureIncompatDirdata() bool { return (sb.Feature_incompat&FEATURE_INCOMPAT_DIRDATA != 0) } 142 | func (sb *Superblock) FeatureIncompatCsum_seed() bool { return (sb.Feature_incompat&FEATURE_INCOMPAT_CSUM_SEED != 0) } 143 | func (sb *Superblock) FeatureIncompatLargedir() bool { return (sb.Feature_incompat&FEATURE_INCOMPAT_LARGEDIR != 0) } 144 | func (sb *Superblock) FeatureIncompatInline_data() bool { return (sb.Feature_incompat&FEATURE_INCOMPAT_INLINE_DATA != 0) } 145 | func (sb *Superblock) FeatureIncompatEncrypt() bool { return (sb.Feature_incompat&FEATURE_INCOMPAT_ENCRYPT != 0) } 146 | 147 | func (sb *Superblock) GetBlockCount() int64 { 148 | if sb.FeatureIncompat64bit() { 149 | return (int64(sb.BlockCount_hi) << 32) | int64(sb.BlockCount_lo) 150 | } else { 151 | return int64(sb.BlockCount_lo) 152 | } 153 | } 154 | 155 | func (sb *Superblock) GetBlockSize() int64 { 156 | return int64(1024 << uint(sb.Log_block_size)) 157 | } 158 | 159 | func (sb *Superblock) UpdateCsumAndWriteback() { 160 | cs := NewChecksummer(sb) 161 | 162 | size, _ := struc.Sizeof(sb) 163 | struc.Pack(LimitWriter(cs, int64(size) - 4), sb) 164 | sb.Checksum = cs.Get() 165 | 166 | sb.fs.dev.Seek(sb.address, 0) 167 | struc.Pack(sb.fs.dev, sb) 168 | } 169 | 170 | func (sb *Superblock) GetGroupsPerFlex() int64 { 171 | return 1 << sb.Log_groupPer_flex 172 | } -------------------------------------------------------------------------------- /util.go: -------------------------------------------------------------------------------- 1 | package gexto 2 | 3 | import "io" 4 | 5 | // A limitedWriter writes to W but limits the amount of 6 | // data written to just N bytes. Each call to Write 7 | // updates N to reflect the new amount remaining. 8 | // Write returns EOF when N <= 0 or when the underlying W returns EOF. 9 | type limitedWriter struct { 10 | W io.Writer 11 | N int64 12 | } 13 | 14 | func LimitWriter(w io.Writer, n int64) io.Writer { return &limitedWriter{w, n} } 15 | 16 | func (lw *limitedWriter) Write(p []byte) (n int, err error) { 17 | if lw.N <= 0 { 18 | return 0, io.EOF 19 | } 20 | if int64(len(p)) > lw.N { 21 | p = p[0:lw.N] 22 | } 23 | n, err = lw.W.Write(p) 24 | lw.N -= int64(n) 25 | return 26 | } --------------------------------------------------------------------------------