├── LICENSE ├── README.md ├── async_unix_bsd.go ├── async_unix_bsd_aio.go ├── async_unix_bsd_aio_test.go ├── async_unix_notbsd.go ├── async_unix_notbsd_aio.go ├── async_unix_notbsd_aio_test.go ├── async_unix_notbsd_io_uring.go ├── async_unix_notbsd_io_uring_test.go ├── async_windows.go ├── ctx.go ├── ctx_unix_bsd.go ├── ctx_unix_bsd_test.go ├── ctx_unix_notbsd.go ├── ctx_unix_notbsd_test.go ├── ctx_windows.go ├── ctx_windows_test.go ├── file.go ├── file_test.go ├── file_unix.go ├── file_windows.go ├── go.mod ├── go.sum ├── iovec_32.go └── iovec_64.go /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2021, Oja Multimedia Space 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Asyncfs is a library for asynchronous file I/O 2 | # Description 3 | There is a problem in the Go runtime: since the goroutine scheduler works only in the context of the Go code itself, it needs to make a separate OS thread to call external functions such as cgo or syscalls. A typical example of syscalls is I/O operations with files. Unfortunately, epoll/kqueue can’t work with regular files, thus Go’s netpoller can’t help the Go's scheduler. The scheduler can’t switch a goroutine which execute the syscall and it is not clear how long it takes. Performing such operations in a separate OS thread is the only option the Go's scheduler has. If all the threads are busy, the new thread will be made. That is why if cgo is used frequently as well as with long syscalls (except for sockets, since there is a netpoller there) your Go program can create a lot of unnecessary OS threads. 4 | Asyncfs allows you to perform I/O operations with files using asynchronous interfaces: 5 | - **Linux, Android** - aio/io_uring; 6 | - **FreeBSD, MacOS** - aio; 7 | - **Windows** - OVERLAPPED; 8 | 9 | So syscalls are processed in a short time and unnecessary OS threads are not created. The work with the files occurs at the core OS level and the goroutines are not blocked. Also you can work using synchronous mode. In this case you have an ability to set a limit that regulates the need to switch to a separate thread for the I/O operation. 10 | 11 | # Getting the Source 12 | ```sh 13 | go get -u github.com/ojaai/asyncfs 14 | ``` 15 | 16 | # Restrictions 17 | - The library is incompatible with the race detector 18 | - Using aio on Linux requires 512-bytes alignment 19 | 20 | # Example 21 | ```go 22 | import( 23 | "fmt" 24 | "github.com/ojaai/asyncfs" 25 | "sync" 26 | ) 27 | 28 | func main() { 29 | pool := sync.Pool{ 30 | New: func() interface{} { 31 | b := make([]uint8, 0, 1024*128) 32 | return b 33 | }, 34 | } 35 | 36 | allocator := func(sz int) []uint8 { 37 | buf, ok := pool.Get().([]uint8) 38 | if !ok || cap(buf) != sz { 39 | return make([]uint8, 0, sz) 40 | } 41 | return buf 42 | } 43 | 44 | releaser := func(buf []uint8) { 45 | for i := range buf { 46 | buf[i] = 0x0 47 | } 48 | buf = buf[:0] 49 | pool.Put(buf) 50 | } 51 | 52 | // main ctx initialization 53 | // 1024 - asynchronous operations queue size; 54 | // 1024*4 - the byte limit that determines whether to switch to a new OS thread for synchronous I/O operation. 55 | // If the buffer size for I/O does not exceed this limit, then the Go's scheduler will not switch to a new OS thread; 56 | // allocator, releaser - memory operations for a buffer that can be used in an I/O operation; 57 | // allocator && releaser can be nil, if you don't use special allocators 58 | if err := asyncfs.NewCtx(1024, 1024*4, allocator, releaser); err != nil { 59 | panic(err.Error()) 60 | } 61 | // the last argument is the mode of working with the file: synchronous or asynchronous 62 | f, err := asyncfs.Create("/tmp/test_asyncfs", asyncfs.ModeAsync) 63 | if err != nil { 64 | panic(err.Error()) 65 | } 66 | 67 | // aio on Linux requires I/O to be 512-byte aligned 68 | // AllocBuf make and align byte array 69 | buf := asyncfs.AllocBuf(4096) 70 | defer asyncfs.ReleaseBuf(buf) 71 | 72 | // fill 'buf' with your payload... 73 | // async mode: first 'n' will always be 0 74 | _, err = f.Write(buf) 75 | if err != nil { 76 | panic(err.Error()) 77 | } 78 | // do something ... 79 | for { 80 | n, ok, err := f.LastOp() 81 | if !ok { 82 | // do something ... 83 | continue 84 | } 85 | if err != nil { 86 | panic(err.Error) 87 | } 88 | fmt.Printf("%d bytes has been written\n", n) 89 | break 90 | } 91 | 92 | // ok, let's try to read 93 | f.Seek(0, 0) 94 | out := asyncfs.AllocBuf(4096) 95 | defer asyncfs.ReleaseBuf(out) 96 | _, err = f.Read(out) 97 | if err != nil { 98 | panic(err.Error()) 99 | } 100 | // do something ... 101 | for { 102 | n, ok, err := f.LastOp() 103 | if !ok { 104 | // do something ... 105 | continue 106 | } 107 | if err != nil { 108 | panic(err.Error) 109 | } 110 | fmt.Printf("%d bytes has been read\n", n) 111 | break 112 | } 113 | // out has been filled 114 | } 115 | ``` 116 | -------------------------------------------------------------------------------- /async_unix_bsd.go: -------------------------------------------------------------------------------- 1 | //+build freebsd darwin 2 | 3 | package asyncfs 4 | 5 | import ( 6 | "os" 7 | ) 8 | 9 | func openAsync(path string, flag int, perm os.FileMode) (*os.File, error) { 10 | return os.OpenFile(path, flag, perm) 11 | } 12 | 13 | func (f *File) writeAsync(data []uint8) (int, error) { 14 | f.mtx.Lock() 15 | f.lastSyncSeek = true 16 | n, err := f.asyncRWAio(OpWrite, data) 17 | if err == nil { 18 | f.lastAsyncOpState.complete = false 19 | f.lastAsyncOpState.lastOp = OpWrite 20 | } 21 | f.mtx.Unlock() 22 | return n, err 23 | } 24 | 25 | func (f *File) readAsync(data []uint8) (int, error) { 26 | f.mtx.Lock() 27 | f.lastSyncSeek = true 28 | n, err := f.asyncRWAio(OpRead, data) 29 | if err == nil { 30 | f.lastAsyncOpState.complete = false 31 | f.lastAsyncOpState.lastOp = OpRead 32 | } 33 | f.mtx.Unlock() 34 | return n, err 35 | } 36 | 37 | func (f *File) fillOpState() error { 38 | return f.fillStateAio() 39 | } 40 | 41 | func (f *File) close() error { 42 | return nil 43 | } 44 | -------------------------------------------------------------------------------- /async_unix_bsd_aio.go: -------------------------------------------------------------------------------- 1 | //+build freebsd darwin 2 | 3 | package asyncfs 4 | 5 | //#include 6 | //#include 7 | //#include 8 | //#include 9 | //#cgo LDFLAGS: -lrt 10 | import "C" 11 | import ( 12 | "fmt" 13 | "reflect" 14 | "unsafe" 15 | ) 16 | 17 | // https://github.com/freebsd/freebsd-src/blob/098dbd7ff7f3da9dda03802cdb2d8755f816eada/tests/sys/aio/aio_test.c#L260 18 | func (f *File) asyncRWAio(op uint16, data []uint8) (int, error) { 19 | var cb *C.struct_aiocb 20 | cb = (*C.struct_aiocb)(C.malloc(C.size_t(C.sizeof_struct_aiocb))) 21 | C.bzero(unsafe.Pointer(cb), C.size_t(C.sizeof_struct_aiocb)) 22 | cb.aio_buf = unsafe.Pointer(&data[0]) 23 | cb.aio_nbytes = C.size_t(len(data)) 24 | cb.aio_fildes = C.int(f.fd.Fd()) 25 | cb.aio_offset = C.off_t(f.pos) 26 | 27 | var opRes C.int 28 | switch op { 29 | case OpRead: 30 | opRes = C.aio_read(cb) 31 | case OpWrite: 32 | opRes = C.aio_write(cb) 33 | default: 34 | return 0, fmt.Errorf("undefined operation '%d'", op) 35 | } 36 | 37 | if int(opRes) < 0 { 38 | return 0, ErrNotSubmittedAio 39 | } 40 | 41 | c.Lock() 42 | c.operationsFd[unsafe.Pointer(cb)] = opCap{ 43 | f: f, 44 | data: int64(cap(data)), 45 | } 46 | c.Unlock() 47 | 48 | return 0, nil 49 | } 50 | 51 | func (f *File) fillStateAio() error { 52 | c.Lock() 53 | defer c.Unlock() 54 | 55 | if len(c.operationsFd) == 0 { 56 | return nil 57 | } 58 | 59 | for k, fd := range c.operationsFd { 60 | errState := C.aio_error((*C.struct_aiocb)(k)) 61 | if (int)(errState) == -1 { 62 | return ErrAioError 63 | } 64 | if errState == C.EINPROGRESS { 65 | continue 66 | } 67 | 68 | cb := (*C.struct_aiocb)(k) 69 | 70 | aioResult := (int)(C.aio_return(cb)) 71 | if aioResult < 0 { 72 | return ErrAioError 73 | } 74 | 75 | fd.f.mtx.Lock() 76 | if fd.f.lastAsyncOpState.lastOp == OpRead { 77 | var b []uint8 78 | sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) 79 | sh.Data = uintptr(cb.aio_buf) 80 | sh.Len = int(cb.aio_nbytes) 81 | sh.Cap = int(fd.data) 82 | fd.f.lastAsyncOpState.data = b 83 | fd.f.lastAsyncOpState.eof = cb.aio_nbytes > 0 && aioResult == 0 84 | } 85 | fd.f.lastAsyncOpState.result = int64(aioResult) 86 | fd.f.pos = int64(cb.aio_offset) + int64(aioResult) 87 | fd.f.lastAsyncOpState.complete = true 88 | fd.f.mtx.Unlock() 89 | 90 | C.free(k) 91 | delete(c.operationsFd, k) 92 | } 93 | 94 | return nil 95 | } 96 | -------------------------------------------------------------------------------- /async_unix_bsd_aio_test.go: -------------------------------------------------------------------------------- 1 | // +build freebsd darwin 2 | 3 | package asyncfs 4 | 5 | import ( 6 | "github.com/stretchr/testify/assert" 7 | "io/ioutil" 8 | "os" 9 | "syscall" 10 | "testing" 11 | "time" 12 | ) 13 | 14 | func Test_bsd_asyncRWAio(t *testing.T) { 15 | prepare(t, 0) 16 | 17 | f, err := Open("/tmp/aio", syscall.O_RDWR|syscall.O_CREAT, 0644, ModeAsync) 18 | assert.NoError(t, err) 19 | defer os.Remove(f.path) 20 | 21 | buf := make([]uint8, 2000) 22 | buf[0] = 'a' 23 | n, err := f.asyncRWAio(OpWrite, buf) 24 | assert.Equal(t, 0, n) 25 | assert.NoError(t, err) 26 | assert.NotEqual(t, 0, len(c.operationsFd)) 27 | } 28 | 29 | func Test_bsd_asyncWrite(t *testing.T) { 30 | prepare(t, 0) 31 | 32 | f, err := Open("/tmp/aio", syscall.O_RDWR|syscall.O_CREAT, 0644, ModeAsync) 33 | assert.NoError(t, err) 34 | defer os.Remove(f.path) 35 | 36 | buf := make([]uint8, 2000) 37 | for i := range buf { 38 | buf[i] = uint8(i) 39 | } 40 | 41 | _, err = f.asyncRWAio(OpWrite, buf) 42 | assert.NoError(t, err) 43 | 44 | t1 := time.Now() 45 | for i := 0; ; i++ { 46 | n, ok, err := f.LastOp() 47 | if !ok { 48 | if time.Now().Sub(t1) > time.Second { 49 | t.Fatal("too long") 50 | } 51 | continue 52 | } 53 | assert.Equal(t, int64(0), n) 54 | assert.NoError(t, err) 55 | break 56 | } 57 | 58 | out, err := ioutil.ReadFile(f.path) 59 | assert.NoError(t, err) 60 | for i := range out { 61 | assert.Equal(t, uint8(i), buf[i]) 62 | } 63 | } 64 | 65 | func Test_bsd_asyncRead(t *testing.T) { 66 | prepare(t, 0) 67 | 68 | f, err := Open("/tmp/aio", syscall.O_RDWR|syscall.O_CREAT, 0644, ModeAsync) 69 | assert.NoError(t, err) 70 | defer os.Remove(f.path) 71 | 72 | buf := make([]uint8, 2000) 73 | for i := range buf { 74 | buf[i] = uint8(i) 75 | } 76 | 77 | _, err = f.asyncRWAio(OpWrite, buf) 78 | f.lastAsyncOpState.lastOp = OpWrite 79 | f.lastAsyncOpState.complete = false 80 | assert.NoError(t, err) 81 | t1 := time.Now() 82 | for i := 0; ; i++ { 83 | n, ok, err := f.LastOp() 84 | if !ok { 85 | if time.Now().Sub(t1) > time.Second { 86 | t.Fatal("too long") 87 | } 88 | continue 89 | } 90 | assert.Equal(t, int64(2000), n) 91 | assert.NoError(t, err) 92 | break 93 | } 94 | 95 | buf = make([]uint8, 2000) 96 | f.pos = 0 97 | _, err = f.asyncRWAio(OpRead, buf) 98 | assert.NoError(t, err) 99 | f.lastAsyncOpState.complete = false 100 | f.lastAsyncOpState.lastOp = OpRead 101 | t1 = time.Now() 102 | for i := 0; ; i++ { 103 | err := f.fillOpState() 104 | assert.NoError(t, err) 105 | if !f.lastAsyncOpState.complete { 106 | if time.Now().Sub(t1) > time.Second { 107 | t.Fatal("too long") 108 | } 109 | continue 110 | } 111 | break 112 | } 113 | 114 | for i := range buf { 115 | assert.Equal(t, uint8(i), buf[i]) 116 | } 117 | } 118 | 119 | func Test_fillStatesAio(t *testing.T) { 120 | prepare(t, 0) 121 | 122 | f, err := Open("/tmp/aio", syscall.O_RDWR|syscall.O_CREAT, 0644, ModeAsync) 123 | assert.NoError(t, err) 124 | defer os.Remove(f.path) 125 | 126 | buf := make([]uint8, 4000) 127 | n, err := f.writeAsync(buf) 128 | assert.Equal(t, 0, n) 129 | assert.NoError(t, err) 130 | assert.False(t, f.lastAsyncOpState.complete) 131 | 132 | t1 := time.Now() 133 | for i := 0; ; i++ { 134 | err := f.fillOpState() 135 | assert.NoError(t, err) 136 | if !f.lastAsyncOpState.complete { 137 | if time.Now().Sub(t1) > time.Second { 138 | t.Fatal("too long") 139 | } 140 | continue 141 | } 142 | assert.Equal(t, int64(4000), f.pos) 143 | break 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /async_unix_notbsd.go: -------------------------------------------------------------------------------- 1 | // +build linux android 2 | 3 | package asyncfs 4 | 5 | import ( 6 | "fmt" 7 | "os" 8 | "syscall" 9 | ) 10 | 11 | func openAsync(path string, flag int, perm os.FileMode) (*os.File, error) { 12 | if c.asyncMode == asyncAio { 13 | flag |= syscall.O_DIRECT 14 | } 15 | return os.OpenFile(path, flag, perm) 16 | } 17 | 18 | func (f *File) writeAsync(data []uint8) (int, error) { 19 | if c.busy() { 20 | return 0, ErrCtxBusy 21 | } 22 | 23 | f.lastSyncSeek = true 24 | 25 | f.mtx.Lock() 26 | defer f.mtx.Unlock() 27 | 28 | var err error 29 | switch c.asyncMode { 30 | case asyncIoUring: 31 | _, err = f.asyncWriteIoUring(data) 32 | case asyncAio: 33 | _, err = f.asyncWriteAio(data) 34 | default: 35 | return 0, fmt.Errorf("unknown async mode '%v'", c.asyncMode) 36 | } 37 | if err != nil { 38 | return 0, err 39 | } 40 | 41 | f.lastAsyncOpState.complete = false 42 | f.lastAsyncOpState.lastOp = OpWrite 43 | 44 | return 0, nil 45 | } 46 | 47 | func (f *File) readAsync(data []uint8) (int, error) { 48 | if c.busy() { 49 | return 0, ErrCtxBusy 50 | } 51 | 52 | f.mtx.Lock() 53 | defer f.mtx.Unlock() 54 | 55 | f.lastSyncSeek = true 56 | 57 | var err error 58 | switch c.asyncMode { 59 | case asyncIoUring: 60 | _, err = f.asyncReadIoUring(data) 61 | case asyncAio: 62 | _, err = f.asyncReadAio(data) 63 | default: 64 | return 0, fmt.Errorf("unknown async mode '%v'", c.asyncMode) 65 | } 66 | if err != nil { 67 | return 0, err 68 | } 69 | 70 | f.lastAsyncOpState.complete = false 71 | f.lastAsyncOpState.lastOp = OpRead 72 | 73 | return 0, nil 74 | } 75 | 76 | func (f *File) fillOpState() error { 77 | var err error 78 | switch c.asyncMode { 79 | case asyncIoUring: 80 | fillStatesIoUring(c) 81 | case asyncAio: 82 | err = fillStatesAio(c) 83 | default: 84 | err = fmt.Errorf("unknown async mode '%v'", c.asyncMode) 85 | } 86 | return err 87 | } 88 | 89 | func (f *File) close() error { 90 | switch c.asyncMode { 91 | case asyncIoUring, asyncAio: 92 | return f.fd.Close() 93 | default: 94 | return fmt.Errorf("unknown async mode '%v'", c.asyncMode) 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /async_unix_notbsd_aio.go: -------------------------------------------------------------------------------- 1 | // +build linux android 2 | 3 | package asyncfs 4 | 5 | import ( 6 | "errors" 7 | "reflect" 8 | "syscall" 9 | "unsafe" 10 | ) 11 | 12 | const ( 13 | iocbCmdPread = uint16(0) 14 | iocbCmdPwrite = uint16(1) 15 | iocbCmdFsync = uint16(2) 16 | iocbCmdFdsync = uint16(3) 17 | ) 18 | 19 | var bigEndian = (*(*[2]uint8)(unsafe.Pointer(&[]uint16{1}[0])))[0] == 0 20 | var ( 21 | ErrUnalignedData = errors.New("data is unaligned") 22 | ) 23 | 24 | type ( 25 | // https://github.com/torvalds/linux/blob/fcadab740480e0e0e9fa9bd272acd409884d431a/include/uapi/linux/aio_abi.h#L73 26 | // https://android.googlesource.com/kernel/mediatek/+/android-mediatek-sprout-3.4-kitkat-mr2/include/linux/aio_abi.h 27 | aiocb struct { 28 | aioData uint64 // data to be returned in event's data 29 | 30 | // key consists of two fields: the key and the flags, both are 32bits ; order is depends of endianness 31 | // https://github.com/torvalds/linux/blob/fcadab740480e0e0e9fa9bd272acd409884d431a/include/uapi/linux/aio_abi.h#L77 32 | // we MUST serialize it before syscall 33 | aioKey uint64 // the kernel sets aio_key to the req 34 | 35 | aioOpcode uint16 // Operation to be performed 36 | aioReqPrio int16 // Request priority 37 | aioFields uint32 // File descriptor 38 | aioBuf uint64 // Location of buffer 39 | aioNbytes uint64 // Length of transfer 40 | aioOffset int64 // File offset 41 | aioReserved2 uint64 // reserved 42 | aioFlags int32 // flags for the "struct iocb" 43 | aioResfd uint32 // if the IOCB_FLAG_RESFD flag of "aio_flags" is set, this is an eventfd to signal AIO readiness to 44 | } 45 | 46 | aioEvent struct { 47 | data uint64 // the data field from the iocb 48 | obj uint64 // what iocb this event came from 49 | res int64 // result code for this event 50 | res2 int64 // secondary result 51 | } 52 | ) 53 | 54 | func (f *File) asyncRWAio(op uint16, data []uint8) (int, error) { 55 | sz := len(data) 56 | if sz == 0 { 57 | return 0, nil 58 | } 59 | 60 | if len(data)%c.align != 0 || uint64(uintptr(unsafe.Pointer(&data[0])))%uint64(c.align) != 0 { 61 | return 0, ErrUnalignedData 62 | } 63 | 64 | cb := &aiocb{ 65 | aioFields: uint32(f.fd.Fd()), 66 | aioOpcode: op, 67 | aioBuf: uint64(uintptr(unsafe.Pointer(&data[0]))), 68 | aioNbytes: uint64(sz), 69 | aioOffset: f.pos, 70 | aioData: uint64(cap(data)), 71 | } 72 | c.Lock() 73 | c.operationsFd[unsafe.Pointer(cb)] = f 74 | c.Unlock() 75 | n, _, e1 := syscall.RawSyscall(syscall.SYS_IO_SUBMIT, uintptr(c.aio), 1, uintptr(unsafe.Pointer(&cb))) 76 | if e1 != 0 { 77 | return 0, e1 78 | } 79 | if n != 1 { 80 | return 0, ErrNotSubmittedAio 81 | } 82 | return 0, nil 83 | } 84 | 85 | func (f *File) asyncWriteAio(data []uint8) (int, error) { 86 | return f.asyncRWAio(iocbCmdPwrite, data) 87 | } 88 | 89 | func (f *File) asyncReadAio(data []uint8) (int, error) { 90 | return f.asyncRWAio(iocbCmdPread, data) 91 | } 92 | 93 | func fillStatesAio(c *ctx) error { 94 | var ts = unsafe.Pointer(&syscall.Timespec{ 95 | Sec: 0, 96 | Nsec: 0, 97 | }) 98 | for { 99 | sz := uintptr(c.sz) 100 | if c.sz > 64 { 101 | sz = uintptr(64) 102 | } 103 | e := make([]aioEvent, sz) 104 | n, _, e1 := syscall.Syscall6(syscall.SYS_IO_GETEVENTS, uintptr(c.aio), 1, sz, uintptr(unsafe.Pointer(&e[0])), uintptr(ts), 0) 105 | if e1 != 0 { 106 | return e1 107 | } 108 | if n > 0 { 109 | c.Lock() 110 | for i := 0; i < int(n); i++ { 111 | objId := e[i].obj 112 | aioCbPtr := unsafe.Pointer(uintptr(objId)) 113 | fd, ok := c.operationsFd[aioCbPtr] 114 | if !ok { 115 | continue 116 | } 117 | aioCb := (*aiocb)(aioCbPtr) 118 | 119 | fd.mtx.Lock() 120 | if fd.lastAsyncOpState.lastOp == OpRead { 121 | var b []uint8 122 | sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) 123 | sh.Data = uintptr(aioCb.aioBuf) 124 | sh.Len = int(aioCb.aioNbytes) 125 | sh.Cap = int(e[i].data) 126 | fd.lastAsyncOpState.data = b 127 | fd.lastAsyncOpState.eof = aioCb.aioNbytes > 0 && e[i].res == 0 128 | } 129 | fd.lastAsyncOpState.result = e[i].res 130 | fd.pos = aioCb.aioOffset + e[i].res 131 | fd.lastAsyncOpState.complete = true 132 | fd.mtx.Unlock() 133 | 134 | delete(c.operationsFd, aioCbPtr) 135 | } 136 | c.Unlock() 137 | if n >= sz { 138 | continue 139 | } 140 | } 141 | break 142 | } 143 | return nil 144 | } 145 | -------------------------------------------------------------------------------- /async_unix_notbsd_aio_test.go: -------------------------------------------------------------------------------- 1 | // +build linux android 2 | 3 | package asyncfs 4 | 5 | import ( 6 | "github.com/stretchr/testify/assert" 7 | "os" 8 | "runtime" 9 | "syscall" 10 | "testing" 11 | "time" 12 | "unsafe" 13 | ) 14 | 15 | func Test_asyncRWAio(t *testing.T) { 16 | if runtime.GOOS != "linux" && runtime.GOOS != "android" { 17 | t.Skip("skipping; linux-only test") 18 | } 19 | const sz = 8 20 | var c1 ctx 21 | err := c1.initAio(sz) 22 | assert.NoError(t, err) 23 | c1.sz = sz 24 | c1.align = 512 25 | c1.operationsFd = make(map[unsafe.Pointer]*File, sz) 26 | c1.alignedBuffers = make(map[unsafe.Pointer]slice) 27 | c = &c1 28 | 29 | f, err := Open("/tmp/aio", syscall.O_RDWR|syscall.O_CREAT, 0644, ModeAsync) 30 | assert.NoError(t, err) 31 | defer os.Remove(f.path) 32 | 33 | buf := make([]uint8, 4095) 34 | n, err := f.asyncRWAio(iocbCmdPwrite, buf) 35 | assert.Equal(t, 0, n) 36 | assert.EqualError(t, err, ErrUnalignedData.Error()) 37 | 38 | c.align = 11 39 | buf = make([]uint8, 4096) 40 | n, err = f.asyncRWAio(iocbCmdPwrite, buf) 41 | assert.Equal(t, 0, n) 42 | assert.EqualError(t, err, ErrUnalignedData.Error()) 43 | 44 | c.align = 512 45 | buf = AllocBuf(4096) 46 | n, err = f.asyncRWAio(iocbCmdPwrite, buf) 47 | assert.Equal(t, 0, n) 48 | assert.NoError(t, err) 49 | assert.Equal(t, 1, len(c.operationsFd)) 50 | } 51 | 52 | func Test_fillStatesAio(t *testing.T) { 53 | if runtime.GOOS != "linux" && runtime.GOOS != "android" { 54 | t.Skip("skipping; linux-only test") 55 | } 56 | 57 | const sz = 8 58 | var c1 ctx 59 | err := c1.initAio(sz) 60 | assert.NoError(t, err) 61 | c = &c1 62 | c.sz = sz 63 | c.align = 512 64 | c.operationsFd = make(map[unsafe.Pointer]*File, sz) 65 | c.alignedBuffers = make(map[unsafe.Pointer]slice) 66 | 67 | f, err := Open("/tmp/aio", syscall.O_RDWR|syscall.O_CREAT, 0644, ModeAsync) 68 | assert.NoError(t, err) 69 | defer os.Remove(f.path) 70 | 71 | buf := make([]uint8, 4096) 72 | n, err := f.writeAsync(buf) 73 | assert.Equal(t, 0, n) 74 | assert.NoError(t, err) 75 | assert.False(t, f.lastAsyncOpState.complete) 76 | 77 | time.Sleep(time.Millisecond * 10) 78 | 79 | err = fillStatesAio(c) 80 | assert.NoError(t, err) 81 | assert.True(t, f.lastAsyncOpState.complete) 82 | assert.Equal(t, int64(4096), f.pos) 83 | 84 | for i := 0; i < sz+1; i++ { 85 | n, err = f.writeAsync(buf) 86 | assert.Equal(t, 0, n) 87 | assert.NoError(t, err) 88 | f.pos += int64(len(buf)) 89 | } 90 | f.pos = 0 91 | time.Sleep(time.Millisecond * 10) 92 | err = fillStatesAio(c) 93 | assert.NoError(t, err) 94 | assert.True(t, f.lastAsyncOpState.complete) 95 | assert.Equal(t, int64(4096*(sz+1)+4096), f.pos) 96 | 97 | c.aio = 0 98 | err = fillStatesAio(c) 99 | assert.Error(t, err) 100 | assert.EqualError(t, syscall.EINVAL, err.Error()) 101 | } 102 | -------------------------------------------------------------------------------- /async_unix_notbsd_io_uring.go: -------------------------------------------------------------------------------- 1 | // +build linux android 2 | 3 | package asyncfs 4 | 5 | // #include 6 | // #include 7 | // 8 | // int nsig() { 9 | // return _NSIG; 10 | // } 11 | // 12 | // int syscall_nums(int *io_uring_setup, int *io_uring_enter) { 13 | // #if defined(__NR_io_uring_setup) && defined(__NR_io_uring_enter) 14 | // *io_uring_setup = __NR_io_uring_setup; 15 | // *io_uring_enter = __NR_io_uring_enter; 16 | // #else 17 | // *io_uring_setup = -1; 18 | // *io_uring_enter = -1; 19 | // #endif 20 | // } 21 | import "C" 22 | 23 | import ( 24 | "errors" 25 | "reflect" 26 | "syscall" 27 | "unsafe" 28 | ) 29 | 30 | const ( 31 | ioringOpNop = 0 32 | ioringOpReadv = 1 33 | ioringOpWritev = 2 34 | ) 35 | 36 | const ( 37 | ioringFeatSingleMmap = uint32(0x1) 38 | ) 39 | 40 | const ( 41 | ioringOffSqRing = uint64(0x0) 42 | ioringOffCqRing = uint64(0x8000000) 43 | ioringOffSqes = uint64(0x10000000) 44 | ) 45 | 46 | var ( 47 | nSig int 48 | ioUringSetupSys int 49 | ioUringEnterSys int 50 | ) 51 | 52 | var errFailedSq = errors.New("bad sync internal state with kernel ring state on the SQ side") 53 | var errBadSize = errors.New("bad size of the queue") 54 | 55 | func init() { 56 | nSig = (int)(C.nsig()) 57 | var ioSetup, ioEnter C.int 58 | C.syscall_nums(&ioSetup, &ioEnter) 59 | ioUringSetupSys, ioUringEnterSys = (int)(ioSetup), (int)(ioEnter) 60 | } 61 | 62 | type ( 63 | sqe struct { 64 | opcode uint8 /* type of operation for this sqe */ 65 | flags uint8 /* IOSQE_ flags */ 66 | ioprio uint16 /* ioprio for the request */ 67 | fd int32 /* file descriptor to do IO on */ 68 | off uint64 /* offset into file */ 69 | addr uint64 /* pointer to buffer or iovecs */ 70 | len uint32 /* buffer size or number of iovecs */ 71 | sqeFlags uint32 72 | userData uint64 /* data to be passed back at completion time */ 73 | pad [3]uint64 74 | } 75 | 76 | sQueue struct { 77 | khead *uint32 78 | ktail *uint32 79 | kringMask *uint32 80 | kringEntries *uint32 81 | kflags *uint32 82 | kdropped *uint32 83 | array []uint32 84 | sqes []sqe 85 | sqeHead uint32 86 | sqeTail uint32 87 | ringSz uint32 88 | sqRingFd unsafe.Pointer 89 | pad [4]uint32 90 | } 91 | 92 | // IO completion data structure (Completion Queue Entry) 93 | cqe struct { 94 | userData uint64 95 | res int32 96 | flags uint32 97 | } 98 | 99 | cQueue struct { 100 | khead *uint32 101 | ktail *uint32 102 | kringMask *uint32 103 | kringEntries *uint32 104 | kflags *uint32 105 | koverflow *uint32 106 | cqes []cqe 107 | ringSz uint32 108 | cqRingFd unsafe.Pointer 109 | pad [4]uint32 110 | } 111 | 112 | // offsets for mmap 113 | ioSqOffsets struct { 114 | head uint32 115 | tail uint32 116 | ringMask uint32 117 | ringEntries uint32 118 | flags uint32 119 | dropped uint32 120 | array uint32 121 | resv1 uint32 122 | resv2 uint64 123 | } 124 | 125 | ioCqOffsets struct { 126 | head uint32 127 | tail uint32 128 | ringMask uint32 129 | ringEntries uint32 130 | overflow uint32 131 | cqes uint32 132 | resv [2]uint64 133 | } 134 | 135 | ioParams struct { 136 | sqEntries uint32 137 | cqEntries uint32 138 | flags uint32 139 | sqThreadCpu uint32 140 | sqThreadIdle uint32 141 | features uint32 142 | resv [4]uint32 143 | sqOff ioSqOffsets 144 | cqOff ioCqOffsets 145 | } 146 | 147 | ring struct { 148 | sq sQueue 149 | cq cQueue 150 | flags uint32 151 | ringFd int 152 | features uint32 153 | } 154 | 155 | cqUserData struct { 156 | buf syscall.Iovec 157 | cap int 158 | } 159 | ) 160 | 161 | func unmap(sq *sQueue, cq *cQueue) { 162 | _, _, _ = syscall.RawSyscall(syscall.SYS_MUNMAP, uintptr(sq.sqRingFd), uintptr(sq.ringSz), 0) 163 | if cq.cqRingFd != nil && cq.cqRingFd != sq.sqRingFd { 164 | _, _, _ = syscall.RawSyscall(syscall.SYS_MUNMAP, uintptr(cq.cqRingFd), uintptr(cq.ringSz), 0) 165 | } 166 | } 167 | 168 | func setup(entries uint32, r *ring, flags uint32) error { 169 | if entries != 0 && (entries&(entries-1)) != 0 { 170 | return errBadSize 171 | } 172 | 173 | if ioUringSetupSys <= 0 || ioUringEnterSys <= 0 { 174 | return ErrNotSupported 175 | } 176 | 177 | var p ioParams 178 | r1, _, e := syscall.RawSyscall(uintptr(ioUringSetupSys), uintptr(entries), uintptr(unsafe.Pointer(&p)), 0) 179 | if e != 0 { 180 | if e == syscall.ENOSYS { 181 | return ErrNotSupported 182 | } 183 | return e 184 | } 185 | 186 | r.ringFd = int(r1) 187 | r.sq.ringSz = p.sqOff.array + p.sqEntries*uint32(unsafe.Sizeof(uint32(0))) 188 | r.cq.ringSz = p.cqOff.cqes + p.cqEntries*uint32(unsafe.Sizeof(cqe{})) 189 | 190 | sqPtr, _, e := syscall.RawSyscall6( 191 | syscall.SYS_MMAP, 192 | 0, 193 | uintptr(r.sq.ringSz), 194 | syscall.PROT_READ|syscall.PROT_WRITE, 195 | syscall.MAP_SHARED|syscall.MAP_POPULATE, 196 | uintptr(r.ringFd), 197 | uintptr(ioringOffSqRing)) 198 | if e != 0 { 199 | return e 200 | } 201 | r.sq.sqRingFd = unsafe.Pointer(sqPtr) 202 | 203 | if p.features&ioringFeatSingleMmap != 0 { 204 | r.cq.cqRingFd = r.sq.sqRingFd 205 | } else { 206 | cqPtr, _, e := syscall.RawSyscall6( 207 | syscall.SYS_MMAP, 208 | 0, 209 | uintptr(r.cq.ringSz), 210 | syscall.PROT_READ|syscall.PROT_WRITE, 211 | syscall.MAP_SHARED|syscall.MAP_POPULATE, 212 | uintptr(r.ringFd), 213 | uintptr(ioringOffCqRing)) 214 | if e != 0 { 215 | unmap(&r.sq, &r.cq) 216 | return e 217 | } 218 | r.cq.cqRingFd = unsafe.Pointer(cqPtr) 219 | } 220 | 221 | sq := &r.sq 222 | sq.khead = (*uint32)(unsafe.Pointer(uintptr(r.sq.sqRingFd) + uintptr(p.sqOff.head))) 223 | sq.ktail = (*uint32)(unsafe.Pointer(uintptr(r.sq.sqRingFd) + uintptr(p.sqOff.tail))) 224 | sq.kringMask = (*uint32)(unsafe.Pointer(uintptr(r.sq.sqRingFd) + uintptr(p.sqOff.ringMask))) 225 | sq.kringEntries = (*uint32)(unsafe.Pointer(uintptr(r.sq.sqRingFd) + uintptr(p.sqOff.ringEntries))) 226 | sq.kflags = (*uint32)(unsafe.Pointer(uintptr(r.sq.sqRingFd) + uintptr(p.sqOff.flags))) 227 | sq.kdropped = (*uint32)(unsafe.Pointer(uintptr(r.sq.sqRingFd) + uintptr(p.sqOff.dropped))) 228 | var arr []uint32 229 | arrHdr := (*reflect.SliceHeader)(unsafe.Pointer(&arr)) 230 | arrHdr.Data = uintptr(r.sq.sqRingFd) + uintptr(p.sqOff.array) 231 | arrHdr.Cap = int(entries) 232 | arrHdr.Len = int(entries) 233 | sq.array = arr 234 | 235 | sqes, _, e := syscall.RawSyscall6( 236 | syscall.SYS_MMAP, 237 | 0, 238 | uintptr(p.sqEntries*uint32(unsafe.Sizeof(sqe{}))), 239 | syscall.PROT_READ|syscall.PROT_WRITE, 240 | syscall.MAP_SHARED|syscall.MAP_POPULATE, 241 | uintptr(r.ringFd), 242 | uintptr(ioringOffSqes)) 243 | if e != 0 { 244 | unmap(&r.sq, &r.cq) 245 | return e 246 | } 247 | var b []sqe 248 | hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b)) 249 | hdr.Data = sqes 250 | hdr.Cap = int(p.sqEntries) 251 | hdr.Len = int(p.sqEntries) 252 | sq.sqes = b 253 | 254 | cq := &r.cq 255 | cq.khead = (*uint32)(unsafe.Pointer(uintptr(r.cq.cqRingFd) + uintptr(p.cqOff.head))) 256 | cq.ktail = (*uint32)(unsafe.Pointer(uintptr(r.cq.cqRingFd) + uintptr(p.cqOff.tail))) 257 | cq.kringMask = (*uint32)(unsafe.Pointer(uintptr(r.cq.cqRingFd) + uintptr(p.cqOff.ringMask))) 258 | cq.kringEntries = (*uint32)(unsafe.Pointer(uintptr(r.cq.cqRingFd) + uintptr(p.cqOff.ringEntries))) 259 | cq.koverflow = (*uint32)(unsafe.Pointer(uintptr(r.cq.cqRingFd) + uintptr(p.cqOff.overflow))) 260 | var cqeSlice []cqe 261 | cqHdr := (*reflect.SliceHeader)(unsafe.Pointer(&cqeSlice)) 262 | cqHdr.Data = uintptr(r.cq.cqRingFd) + uintptr(p.cqOff.cqes) 263 | cqHdr.Len = int(p.cqEntries) 264 | cqHdr.Cap = int(p.cqEntries) 265 | cq.cqes = cqeSlice 266 | 267 | p.flags = flags 268 | r.features = p.features 269 | 270 | return nil 271 | } 272 | 273 | func getSqe(r *ring) *sqe { 274 | sq := &r.sq 275 | head := *sq.khead 276 | next := sq.sqeTail + 1 277 | var s *sqe 278 | if next-head <= *sq.kringEntries { 279 | s = &sq.sqes[sq.sqeTail&*sq.kringMask] 280 | sq.sqeTail = next 281 | } 282 | return s 283 | } 284 | 285 | func rw(op uint8, ioSqe *sqe, fd int, addr unsafe.Pointer, userData unsafe.Pointer, len uint32, offset int64) bool { 286 | if op != ioringOpReadv && op != ioringOpWritev { 287 | return false 288 | } 289 | if ioSqe == nil { 290 | return false 291 | } 292 | if addr == nil || userData == nil { 293 | return false 294 | } 295 | ioSqe.opcode = op 296 | ioSqe.flags = 0 297 | ioSqe.ioprio = 0 298 | ioSqe.fd = int32(fd) 299 | ioSqe.off = uint64(offset) 300 | ioSqe.addr = uint64(uintptr(addr)) 301 | ioSqe.len = len 302 | ioSqe.sqeFlags = 0 303 | ioSqe.userData = uint64(uintptr(userData)) 304 | ioSqe.pad[0] = 0 305 | ioSqe.pad[1] = 0 306 | ioSqe.pad[2] = 0 307 | return true 308 | } 309 | 310 | func (f *File) asyncRWIoUring(op uint8, data []uint8) (int, error) { 311 | userData, ok := c.ioUringUserDataPool.Get().(*cqUserData) 312 | if !ok || userData == nil { 313 | userData = &cqUserData{} 314 | } 315 | userData.buf = newIovec(&data[0], len(data)) 316 | userData.cap = cap(data) 317 | 318 | c.Lock() 319 | c.currentCnt++ 320 | if !rw(op, getSqe(c.r), int(f.fd.Fd()), unsafe.Pointer(&userData.buf), unsafe.Pointer(userData), 1, f.pos) { 321 | c.currentCnt-- 322 | c.Unlock() 323 | return 0, ErrNotSubmittedIoUring 324 | } 325 | c.operationsFd[unsafe.Pointer(userData)] = f 326 | submit := flushSq(c.r) 327 | c.Unlock() 328 | if submit == 0 { 329 | return 0, errFailedSq 330 | } 331 | _, _, e := syscall.RawSyscall6(uintptr(ioUringEnterSys), uintptr(c.r.ringFd), uintptr(submit), 0, 0, 0, uintptr(nSig/8)) 332 | if e != 0 { 333 | return 0, e 334 | } 335 | return 0, nil 336 | } 337 | 338 | func (f *File) asyncWriteIoUring(data []uint8) (int, error) { 339 | return f.asyncRWIoUring(ioringOpWritev, data) 340 | } 341 | 342 | func (f *File) asyncReadIoUring(data []uint8) (int, error) { 343 | return f.asyncRWIoUring(ioringOpReadv, data) 344 | } 345 | 346 | func flushSq(r *ring) int { 347 | sq := &r.sq 348 | mask := *sq.kringMask 349 | ktail := *sq.ktail 350 | toSubmit := sq.sqeTail - sq.sqeHead 351 | for ; toSubmit > 0; toSubmit-- { 352 | sq.array[ktail&mask] = sq.sqeHead & mask 353 | ktail++ 354 | sq.sqeHead++ 355 | } 356 | *sq.ktail = ktail 357 | return int(ktail - *sq.khead) 358 | } 359 | 360 | func fillStatesIoUring(c *ctx) { 361 | c.Lock() 362 | defer c.Unlock() 363 | 364 | r := c.r 365 | head := *r.cq.khead 366 | available := *r.cq.ktail - head 367 | 368 | for ; ; head++ { 369 | if *r.cq.ktail-head == 0 { 370 | break 371 | } 372 | cqe := r.cq.cqes[head&(*r.cq.kringMask)] 373 | ptrData := unsafe.Pointer(uintptr(cqe.userData)) 374 | fd, ok := c.operationsFd[ptrData] 375 | if !ok { 376 | continue 377 | } 378 | userData := (*cqUserData)(ptrData) 379 | 380 | fd.mtx.Lock() 381 | if fd.lastAsyncOpState.lastOp == OpRead { 382 | var b []uint8 383 | hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b)) 384 | hdr.Data = uintptr(unsafe.Pointer(userData.buf.Base)) 385 | hdr.Len = int(userData.buf.Len) 386 | hdr.Cap = userData.cap 387 | fd.lastAsyncOpState.data = b 388 | fd.lastAsyncOpState.eof = userData.buf.Len > 0 && cqe.res == 0 389 | } 390 | fd.lastAsyncOpState.result = int64(cqe.res) 391 | fd.pos += int64(cqe.res) 392 | fd.lastAsyncOpState.complete = true 393 | fd.mtx.Unlock() 394 | 395 | userData.cap = 0 396 | userData.buf.Len = 0 397 | userData.buf.Base = nil 398 | c.ioUringUserDataPool.Put(userData) 399 | 400 | delete(c.operationsFd, ptrData) 401 | } 402 | *r.cq.khead = *r.cq.khead + available 403 | 404 | c.currentCnt -= int(available) 405 | } 406 | -------------------------------------------------------------------------------- /async_unix_notbsd_io_uring_test.go: -------------------------------------------------------------------------------- 1 | // +build linux android 2 | 3 | package asyncfs 4 | 5 | import ( 6 | "github.com/stretchr/testify/assert" 7 | "os" 8 | "runtime" 9 | "sync" 10 | "syscall" 11 | "testing" 12 | "time" 13 | "unsafe" 14 | ) 15 | 16 | func TestFile_Setup(t *testing.T) { 17 | if runtime.GOOS != "linux" && runtime.GOOS != "android" { 18 | t.Skip("skipping; linux-only test") 19 | } 20 | if ioUringSetupSys <= 0 || ioUringEnterSys <= 0 { 21 | t.Skip("io_uring doesn't supported") 22 | } 23 | 24 | const sz = 16 25 | var r ring 26 | 27 | err := setup(sz, &r, 0) 28 | assert.NoError(t, err) 29 | assert.True(t, r.ringFd > 0) 30 | assert.True(t, r.sq.ringSz > 0) 31 | assert.True(t, r.cq.ringSz > 0) 32 | assert.NotNil(t, r.sq.sqRingFd) 33 | 34 | assert.NotNil(t, r.cq.cqRingFd) 35 | if r.features&ioringFeatSingleMmap != 0 { 36 | assert.Equal(t, r.sq.sqRingFd, r.cq.cqRingFd) 37 | } 38 | 39 | assert.Equal(t, sz, len(r.sq.array)) 40 | assert.Equal(t, sz, len(r.sq.sqes)) 41 | 42 | assert.Greater(t, len(r.cq.cqes), sz) 43 | assert.True(t, len(r.cq.cqes)%2 == 0) 44 | } 45 | 46 | func Test_rw(t *testing.T) { 47 | if runtime.GOOS != "linux" && runtime.GOOS != "android" { 48 | t.Skip("skipping; linux-only test") 49 | } 50 | if ioUringSetupSys <= 0 || ioUringEnterSys <= 0 { 51 | t.Skip("io_uring doesn't supported") 52 | } 53 | 54 | assert.False(t, rw(0x0, nil, 0, nil, nil, 0, 0)) 55 | assert.False(t, rw(ioringOpReadv, nil, 0, nil, nil, 0, 0)) 56 | assert.False(t, rw(ioringOpReadv, &sqe{}, 0, nil, nil, 0, 0)) 57 | assert.False(t, rw(ioringOpReadv, &sqe{}, 0, unsafe.Pointer(uintptr(0x1)), nil, 0, 0)) 58 | assert.False(t, rw(ioringOpReadv, &sqe{}, 0, nil, unsafe.Pointer(uintptr(0x1)), 0, 0)) 59 | assert.True(t, rw(ioringOpReadv, &sqe{}, 0, unsafe.Pointer(uintptr(0x1)), unsafe.Pointer(uintptr(0x1)), 0, 0)) 60 | } 61 | 62 | func Test_asyncRWIoUring(t *testing.T) { 63 | if runtime.GOOS != "linux" && runtime.GOOS != "android" { 64 | t.Skip("skipping; linux-only test") 65 | } 66 | if ioUringSetupSys <= 0 || ioUringEnterSys <= 0 { 67 | t.Skip("io_uring doesn't supported") 68 | } 69 | 70 | const sz = 16 71 | var r ring 72 | 73 | err := setup(sz, &r, 0) 74 | assert.NoError(t, err) 75 | 76 | c = new(ctx) 77 | c.ioUringUserDataPool = sync.Pool{ 78 | New: func() interface{} { 79 | return &cqUserData{} 80 | }, 81 | } 82 | c.r = &r 83 | 84 | f := &File{ 85 | pos: 100, 86 | } 87 | c.operationsFd = make(map[unsafe.Pointer]*File) 88 | n, err := f.asyncRWIoUring(ioringOpReadv, []uint8{0x0, 0x0, 0x0}) 89 | assert.Equal(t, 0, n) 90 | assert.NoError(t, err) 91 | assert.Equal(t, 1, c.currentCnt) 92 | assert.GreaterOrEqual(t, len(c.operationsFd), 1) 93 | 94 | c.ioUringUserDataPool = sync.Pool{ 95 | New: func() interface{} { 96 | return cqUserData{} // not a pointer 97 | }, 98 | } 99 | n, err = f.asyncRWIoUring(ioringOpNop, []uint8{0x0, 0x0, 0x0}) 100 | assert.Equal(t, 0, n) 101 | assert.Equal(t, ErrNotSubmittedIoUring, err) 102 | assert.Equal(t, 1, c.currentCnt) 103 | 104 | // flush -> toSubmit == 0 105 | *(c.r.sq.ktail) = 0 106 | *(c.r.sq.khead) = 1 107 | c.r.sq.sqeTail = 0 108 | c.r.sq.sqeHead = 0 109 | n, err = f.asyncRWIoUring(ioringOpWritev, []uint8{0x0, 0x0, 0x0}) 110 | assert.Equal(t, 0, n) 111 | assert.Equal(t, errFailedSq, err) 112 | assert.Equal(t, 2, c.currentCnt) 113 | } 114 | 115 | func TestFile_fullQueue(t *testing.T) { 116 | if runtime.GOOS != "linux" && runtime.GOOS != "android" { 117 | t.Skip("skipping; linux-only test") 118 | } 119 | if ioUringSetupSys <= 0 || ioUringEnterSys <= 0 { 120 | t.Skip("io_uring doesn't supported") 121 | } 122 | const sz = 8 123 | var err error 124 | err = NewCtx(sz, sameThreadLim, nil, nil) 125 | if c.r == nil { 126 | t.Skip("io_uring doesn't supported") 127 | } 128 | assert.NoError(t, err) 129 | 130 | f, err := Open("/tmp/io_uring_write", syscall.O_RDWR|syscall.O_CREAT, 0644, ModeAsync) 131 | assert.NoError(t, err) 132 | defer os.Remove(f.path) 133 | 134 | buf := make([]uint8, 4096) 135 | for i := 0; i < sz; i++ { 136 | _, err = f.writeAsync(buf) 137 | assert.NoError(t, err) 138 | } 139 | _, err = f.Write(buf) 140 | assert.Error(t, err) 141 | assert.Equal(t, sz, c.currentCnt) 142 | 143 | time.Sleep(time.Millisecond * 100) 144 | 145 | err = f.checkAsyncResult() 146 | assert.NoError(t, err) 147 | assert.Equal(t, 0, c.currentCnt) 148 | 149 | _, err = f.writeAsync(buf) 150 | assert.NoError(t, err) 151 | assert.Equal(t, 1, c.currentCnt) 152 | } 153 | 154 | func TestFile_cap(t *testing.T) { 155 | if runtime.GOOS != "linux" && runtime.GOOS != "android" { 156 | t.Skip("skipping; linux-only test") 157 | } 158 | if ioUringSetupSys <= 0 || ioUringEnterSys <= 0 { 159 | t.Skip("io_uring doesn't supported") 160 | } 161 | const sz = 8 162 | var err error 163 | err = NewCtx(sz, sameThreadLim, nil, nil) 164 | if c.r == nil { 165 | t.Skip("io_uring doesn't supported") 166 | } 167 | assert.NoError(t, err) 168 | 169 | f, err := Open("/tmp/io_uring_read", syscall.O_RDWR|syscall.O_CREAT, 0644, ModeAsync) 170 | assert.NoError(t, err) 171 | defer os.Remove(f.path) 172 | 173 | n, err := f.WriteSync(make([]uint8, 4096)) 174 | assert.Equal(t, 4096, n) 175 | assert.NoError(t, err) 176 | assert.Equal(t, int64(4096), f.pos) 177 | 178 | buf := make([]uint8, 2048, 3000) 179 | m, err := f.Seek(0, 0) 180 | assert.Equal(t, int64(0), m) 181 | assert.NoError(t, err) 182 | assert.Equal(t, int64(0), f.pos) 183 | 184 | _, err = f.Read(buf) 185 | assert.NoError(t, err) 186 | time.Sleep(time.Millisecond * 100) 187 | err = f.checkAsyncResult() 188 | assert.NoError(t, err) 189 | } 190 | -------------------------------------------------------------------------------- /async_windows.go: -------------------------------------------------------------------------------- 1 | package asyncfs 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "syscall" 7 | "unsafe" 8 | ) 9 | 10 | func openAsync(path string, flag int, perm os.FileMode) (*os.File, error) { 11 | p, err := syscall.UTF16PtrFromString(path) 12 | if err != nil { 13 | return nil, err 14 | } 15 | h, err := syscall.CreateFile(p, 16 | syscall.GENERIC_READ|syscall.GENERIC_WRITE, 17 | syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, 18 | nil, 19 | syscall.OPEN_ALWAYS, 20 | syscall.FILE_ATTRIBUTE_NORMAL|syscall.FILE_FLAG_OVERLAPPED, 21 | 0) 22 | 23 | if err != nil { 24 | return nil, err 25 | } 26 | 27 | return os.NewFile(uintptr(h), path), nil 28 | } 29 | 30 | func (f *File) setOverlapped(r bool, w bool) { 31 | if r { 32 | f.ro = &syscall.Overlapped{} 33 | } 34 | if w { 35 | f.wo = &syscall.Overlapped{} 36 | } 37 | } 38 | 39 | func (f *File) rwAsync(op uint64, data []uint8) (int, error) { 40 | var n uint64 41 | var lastOp int 42 | var ov *syscall.Overlapped 43 | 44 | switch op { 45 | case c.nWriteFile: 46 | lastOp = OpWrite 47 | ov = f.wo 48 | case c.nReadFile: 49 | lastOp = OpRead 50 | ov = f.ro 51 | default: 52 | return 0, ErrUnknownOperation 53 | } 54 | 55 | f.mtx.Lock() 56 | defer f.mtx.Unlock() 57 | 58 | f.wait = false 59 | f.processedBytes = 0 60 | f.lastSyncSeek = true 61 | f.toRwBytes = uint64(len(data)) 62 | f.lastAsyncOpState.complete = false 63 | f.lastAsyncOpState.lastOp = lastOp 64 | 65 | ov.Internal = 0 66 | ov.InternalHigh = 0 67 | ov.HEvent = 0 68 | ov.Offset = uint32(f.pos) 69 | ov.OffsetHigh = uint32(f.pos >> 32) 70 | 71 | r1, _, e := syscall.Syscall6(uintptr(op), 5, f.fd.Fd(), uintptr(unsafe.Pointer(&data[0])), uintptr(len(data)), uintptr(unsafe.Pointer(&n)), uintptr(unsafe.Pointer(ov)), 0) 72 | if r1 == 0 { 73 | if e == syscall.ERROR_IO_PENDING { 74 | f.processedBytes = n 75 | return int(n), nil 76 | } else if e == syscall.ERROR_HANDLE_EOF { 77 | f.lastAsyncOpState.eof = true 78 | return 0, ErrEOF 79 | } else { 80 | return 0, fmt.Errorf("async error: '%s'", e.Error()) 81 | } 82 | } 83 | 84 | f.processedBytes = n 85 | f.lastAsyncOpState.complete = true 86 | f.lastAsyncOpState.result = int64(len(data)) 87 | f.lastAsyncOpState.eof = f.lastAsyncOpState.lastOp == OpRead && n == 0 && f.toRwBytes > 0 88 | 89 | return int(n), nil 90 | } 91 | 92 | func (f *File) writeAsync(data []uint8) (int, error) { 93 | return f.rwAsync(c.nWriteFile, data) 94 | } 95 | 96 | func (f *File) readAsync(data []uint8) (int, error) { 97 | return f.rwAsync(c.nReadFile, data) 98 | } 99 | -------------------------------------------------------------------------------- /ctx.go: -------------------------------------------------------------------------------- 1 | package asyncfs 2 | 3 | import ( 4 | "sync" 5 | "unsafe" 6 | ) 7 | 8 | type ( 9 | asyncOpState struct { 10 | data []uint8 11 | result int64 12 | lastCap int 13 | lastOp int 14 | complete bool 15 | eof bool 16 | } 17 | 18 | slice struct { 19 | b unsafe.Pointer 20 | len int 21 | cap int 22 | } 23 | 24 | baseCtx struct { 25 | bufPoller func(int) []uint8 26 | bufReleaser func([]uint8) 27 | newThread func(int) bool 28 | asyncMode int 29 | align int 30 | sz int 31 | currentCnt int 32 | sync.Mutex 33 | } 34 | ) 35 | 36 | const ( 37 | OpUnknown = 0x0 38 | OpRead = 0x1 39 | OpWrite = 0x2 40 | ) 41 | 42 | var c *ctx 43 | 44 | func NewCtx(sz int, sameThreadLim int, bufPoller func(int) []uint8, bufReleaser func([]uint8)) error { 45 | c = new(ctx) 46 | if err := c.initCtx(sz); err != nil { 47 | return err 48 | } 49 | c.sz = sz 50 | c.newThread = func(sz int) bool { 51 | return sz > sameThreadLim 52 | } 53 | c.bufPoller = bufPoller 54 | c.bufReleaser = bufReleaser 55 | return nil 56 | } 57 | 58 | func AllocBuf(sz int) []uint8 { 59 | return c.allocBuf(sz) 60 | } 61 | 62 | func ReleaseBuf(b []uint8) { 63 | c.releaseBuf(b) 64 | } 65 | 66 | func Align() int { 67 | return c.align 68 | } 69 | -------------------------------------------------------------------------------- /ctx_unix_bsd.go: -------------------------------------------------------------------------------- 1 | // +build freebsd darwin 2 | 3 | package asyncfs 4 | 5 | import ( 6 | "unsafe" 7 | ) 8 | 9 | type ( 10 | opCap struct { 11 | f *File 12 | data int64 13 | } 14 | 15 | ctx struct { 16 | baseCtx 17 | operationsFd map[unsafe.Pointer]opCap 18 | } 19 | ) 20 | 21 | func (c *ctx) initCtx(sz int) error { 22 | c.asyncMode = asyncAio 23 | c.operationsFd = make(map[unsafe.Pointer]opCap, sz) 24 | return nil 25 | } 26 | 27 | func (c *ctx) allocBuf(sz int) []uint8 { 28 | var buf []uint8 29 | if c.bufPoller != nil { 30 | buf = c.bufPoller(sz) 31 | if len(buf) != sz { 32 | buf = append(buf, make([]uint8, sz-len(buf))...) 33 | } 34 | } else { 35 | buf = make([]uint8, sz) 36 | } 37 | return buf 38 | } 39 | 40 | func (c *ctx) releaseBuf(b []uint8) { 41 | buf := b 42 | for i := range buf { 43 | buf[i] = 0x0 44 | } 45 | buf = buf[:0] 46 | if c.bufReleaser != nil { 47 | c.bufReleaser(buf) 48 | } 49 | } 50 | 51 | func (c *ctx) busy() bool { 52 | return false 53 | } 54 | -------------------------------------------------------------------------------- /ctx_unix_bsd_test.go: -------------------------------------------------------------------------------- 1 | // +build freebsd darwin 2 | 3 | package asyncfs 4 | 5 | import ( 6 | "github.com/stretchr/testify/assert" 7 | "testing" 8 | "unsafe" 9 | ) 10 | 11 | func steps() []int { 12 | return []int{asyncAio} 13 | } 14 | 15 | func prepare(t *testing.T, mode int) { 16 | var sz int 17 | 18 | if mode == 0 { // windows/bsd 19 | sz = 8 20 | err := NewCtx(sz, sameThreadLim, nil, nil) 21 | assert.NoError(t, err) 22 | return 23 | } 24 | 25 | sz = 8 26 | c = new(ctx) 27 | c.asyncMode = asyncAio 28 | c.operationsFd = make(map[unsafe.Pointer]opCap, sz) 29 | c.sz = sz 30 | c.currentCnt = 0 31 | c.align = 512 32 | c.asyncMode = mode 33 | c.newThread = func(int) bool { 34 | return false 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /ctx_unix_notbsd.go: -------------------------------------------------------------------------------- 1 | // +build linux android 2 | 3 | package asyncfs 4 | 5 | import ( 6 | "reflect" 7 | "sync" 8 | "syscall" 9 | "unsafe" 10 | ) 11 | 12 | type ( 13 | ctx struct { 14 | baseCtx 15 | operationsFd map[unsafe.Pointer]*File 16 | alignedBuffers map[unsafe.Pointer]slice 17 | aio uint64 18 | r *ring 19 | ioUringUserDataPool sync.Pool 20 | } 21 | ) 22 | 23 | func (c *ctx) initCtx(sz int) error { 24 | c.operationsFd = make(map[unsafe.Pointer]*File, sz) 25 | 26 | // check io_uring 27 | if err := c.initIoUring(sz); err == nil { 28 | c.ioUringUserDataPool = sync.Pool{ 29 | New: func() interface{} { 30 | return &cqUserData{} 31 | }, 32 | } 33 | return nil 34 | } 35 | 36 | // use aio 37 | return c.initAio(sz) 38 | } 39 | 40 | func (c *ctx) initIoUring(sz int) error { 41 | if ioUringSetupSys == -1 || ioUringEnterSys == -1 { 42 | return ErrNotSupported 43 | } 44 | var r ring 45 | if err := setup(uint32(sz), &r, 0); err != nil { 46 | return err 47 | } 48 | c.r = &r 49 | c.asyncMode = asyncIoUring 50 | return nil 51 | } 52 | 53 | func (c *ctx) initAio(sz int) error { 54 | var aio uint64 55 | _, _, e1 := syscall.RawSyscall(syscall.SYS_IO_SETUP, uintptr(sz), uintptr(unsafe.Pointer(&aio)), 0) 56 | if e1 != 0 { 57 | return e1 58 | } 59 | c.aio = aio 60 | c.asyncMode = asyncAio 61 | c.align = 512 62 | c.alignedBuffers = make(map[unsafe.Pointer]slice) 63 | return nil 64 | } 65 | 66 | func (c *ctx) allocBuf(sz int) []uint8 { 67 | alloc := func(fullSz int) []uint8 { 68 | var buf []uint8 69 | if c.bufPoller != nil { 70 | buf = c.bufPoller(fullSz) 71 | if len(buf) != sz { 72 | buf = append(buf, make([]uint8, sz-len(buf))...) 73 | } 74 | } else { 75 | buf = make([]uint8, fullSz) 76 | } 77 | return buf 78 | } 79 | if c.asyncMode == asyncIoUring { 80 | return alloc(sz) 81 | } 82 | buf := alloc(sz + c.align - 1) 83 | bufLen := len(buf) 84 | bufCap := cap(buf) 85 | rawBuf := unsafe.Pointer(&buf[0]) 86 | hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 87 | hdr.Data = uintptr((uint64(hdr.Data) + uint64(c.align-1)) & ^(uint64(c.align) - 1)) 88 | hdr.Cap -= c.align - 1 89 | hdr.Len -= c.align - 1 90 | c.alignedBuffers[unsafe.Pointer(&buf[0])] = slice{ 91 | b: rawBuf, 92 | len: bufLen, 93 | cap: bufCap, 94 | } 95 | return buf 96 | } 97 | 98 | func (c *ctx) releaseBuf(b []uint8) { 99 | buf := b 100 | if c.alignedBuffers != nil { 101 | toRelease, ok := c.alignedBuffers[unsafe.Pointer(&b[0])] 102 | if ok { 103 | hdr := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) 104 | hdr.Data = uintptr(toRelease.b) 105 | hdr.Len = toRelease.len 106 | hdr.Cap = toRelease.cap 107 | delete(c.alignedBuffers, unsafe.Pointer(&b[0])) 108 | } 109 | } 110 | for i := range buf { 111 | buf[i] = 0x0 112 | } 113 | buf = buf[:0] 114 | if c.bufReleaser != nil { 115 | c.bufReleaser(buf) 116 | } 117 | } 118 | 119 | func (c *ctx) busy() bool { 120 | if c.asyncMode == asyncIoUring { 121 | c.Lock() 122 | res := c.currentCnt >= c.sz 123 | c.Unlock() 124 | return res 125 | } 126 | return false 127 | } 128 | -------------------------------------------------------------------------------- /ctx_unix_notbsd_test.go: -------------------------------------------------------------------------------- 1 | // +build linux android 2 | 3 | package asyncfs 4 | 5 | import ( 6 | "github.com/stretchr/testify/assert" 7 | "runtime" 8 | "sync" 9 | "testing" 10 | "unsafe" 11 | ) 12 | 13 | func steps() []int { 14 | return []int{asyncAio, asyncIoUring} 15 | } 16 | 17 | func prepare(t *testing.T, mode int) { 18 | var sz int 19 | 20 | if mode == 0 { // windows/bsd 21 | sz = 8 22 | err := NewCtx(sz, sameThreadLim, nil, nil) 23 | assert.NoError(t, err) 24 | return 25 | } 26 | 27 | if mode == asyncAio { 28 | sz = 8 29 | c = new(ctx) 30 | err := c.initAio(sz) 31 | assert.NoError(t, err) 32 | } else { 33 | if ioUringSetupSys <= 0 || ioUringEnterSys <= 0 { 34 | t.Skip("io_uring doesn't supported") 35 | } 36 | sz = 128 37 | var r ring 38 | err := setup(uint32(sz), &r, 0) 39 | assert.NoError(t, err) 40 | c = new(ctx) 41 | c.ioUringUserDataPool = sync.Pool{ 42 | New: func() interface{} { 43 | return &cqUserData{} 44 | }, 45 | } 46 | c.r = &r 47 | } 48 | 49 | c.sz = sz 50 | c.currentCnt = 0 51 | c.align = 512 52 | c.asyncMode = mode 53 | c.operationsFd = make(map[unsafe.Pointer]*File, sz) 54 | c.alignedBuffers = make(map[unsafe.Pointer]slice) 55 | c.newThread = func(int) bool { 56 | return false 57 | } 58 | } 59 | 60 | func TestCtx_initIoUring(t *testing.T) { 61 | if runtime.GOOS != "linux" && runtime.GOOS != "android" { 62 | t.Skip("skipping; linux-only test") 63 | } 64 | var c ctx 65 | err := c.initIoUring(7) 66 | assert.EqualError(t, err, errBadSize.Error()) 67 | 68 | err = c.initIoUring(8) 69 | assert.NoError(t, err) 70 | assert.NotNil(t, c.r) 71 | assert.Equal(t, asyncIoUring, c.asyncMode) 72 | } 73 | 74 | func TestCtx_initAio(t *testing.T) { 75 | if runtime.GOOS != "linux" && runtime.GOOS != "android" { 76 | t.Skip("skipping; linux-only test") 77 | } 78 | var c ctx 79 | err := c.initAio(8) 80 | assert.NoError(t, err) 81 | assert.NotEqual(t, 0, c.aio) 82 | assert.Equal(t, asyncAio, c.asyncMode) 83 | } 84 | -------------------------------------------------------------------------------- /ctx_windows.go: -------------------------------------------------------------------------------- 1 | // +build windows 2 | 3 | package asyncfs 4 | 5 | import ( 6 | "fmt" 7 | "syscall" 8 | "unsafe" 9 | ) 10 | 11 | type ( 12 | ctx struct { 13 | baseCtx 14 | nCreateEvent uint64 15 | nWriteFile uint64 16 | nReadFile uint64 17 | nGetOverlappedResult uint64 18 | operationsFd map[unsafe.Pointer]*File 19 | } 20 | ) 21 | 22 | func (c *ctx) initCtx(sz int) error { 23 | k32, err := syscall.LoadLibrary("kernel32.dll") 24 | if err != nil { 25 | return err 26 | } 27 | 28 | ce, err := getProcAddr(k32, "CreateEventW") 29 | if err != nil { 30 | return fmt.Errorf("CreateEvent: '%s'", err) 31 | } 32 | c.nCreateEvent = ce 33 | 34 | wf, err := getProcAddr(k32, "WriteFile") 35 | if err != nil { 36 | return fmt.Errorf("WriteFile: '%s'", err) 37 | } 38 | c.nWriteFile = wf 39 | 40 | rf, err := getProcAddr(k32, "ReadFile") 41 | if err != nil { 42 | return fmt.Errorf("ReadFile: '%s'", err) 43 | } 44 | c.nReadFile = rf 45 | 46 | gor, err := getProcAddr(k32, "GetOverlappedResult") 47 | if err != nil { 48 | return fmt.Errorf("GetOverlappedResult: '%s'", err) 49 | } 50 | c.nGetOverlappedResult = gor 51 | 52 | c.operationsFd = make(map[unsafe.Pointer]*File, sz) 53 | 54 | return nil 55 | } 56 | 57 | func (c *ctx) haveToAlign() bool { 58 | return false 59 | } 60 | 61 | func (c *ctx) busy() bool { 62 | return false 63 | } 64 | 65 | func (c *ctx) allocBuf(sz int) []uint8 { 66 | var buf []uint8 67 | if c.bufPoller != nil { 68 | buf = c.bufPoller(sz) 69 | if len(buf) != sz { 70 | buf = append(buf, make([]uint8, sz-len(buf))...) 71 | } 72 | } else { 73 | buf = make([]uint8, sz) 74 | } 75 | return buf 76 | } 77 | 78 | func (c *ctx) releaseBuf(b []uint8) { 79 | buf := b 80 | for i := range buf { 81 | buf[i] = 0x0 82 | } 83 | buf = buf[:0] 84 | if c.bufReleaser != nil { 85 | c.bufReleaser(buf) 86 | } 87 | } 88 | 89 | func getProcAddr(lib syscall.Handle, name string) (uint64, error) { 90 | addr, err := syscall.GetProcAddress(lib, name) 91 | if err != nil { 92 | return 0, err 93 | } 94 | return uint64(addr), nil 95 | } 96 | -------------------------------------------------------------------------------- /ctx_windows_test.go: -------------------------------------------------------------------------------- 1 | // +build windows 2 | 3 | package asyncfs 4 | 5 | import ( 6 | "github.com/stretchr/testify/assert" 7 | "testing" 8 | ) 9 | 10 | func steps() []int { 11 | return []int{0x0} 12 | } 13 | 14 | func prepare(t *testing.T, mode int) { 15 | var sz int 16 | 17 | if mode == 0 { // windows/bsd 18 | sz = 8 19 | err := NewCtx(sz, sameThreadLim, nil, nil) 20 | assert.NoError(t, err) 21 | return 22 | } 23 | 24 | sz = 8 25 | c = new(ctx) 26 | if err := c.initCtx(sz); err != nil { 27 | t.Fatal(err.Error()) 28 | } 29 | c.sz = sz 30 | c.currentCnt = 0 31 | c.align = 512 32 | c.asyncMode = mode 33 | c.newThread = func(int) bool { 34 | return false 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /file.go: -------------------------------------------------------------------------------- 1 | package asyncfs 2 | 3 | import ( 4 | "errors" 5 | "io" 6 | "os" 7 | "runtime" 8 | "sync" 9 | "syscall" 10 | ) 11 | 12 | const ( 13 | ModeSync = 0x1 14 | ModeAsync = 0x2 15 | ) 16 | 17 | type ( 18 | File struct { 19 | fileCtx 20 | fd *os.File 21 | mtx sync.Mutex 22 | lastSyncSeek bool 23 | mode uint8 24 | pos int64 25 | path string 26 | lastAsyncOpState asyncOpState 27 | } 28 | ) 29 | 30 | var ErrNotCompleted = errors.New("operation isn't completed") 31 | var ErrCtxBusy = errors.New("ctx is busy") 32 | var ErrUnknownOperation = errors.New("unknown operation") 33 | var ErrNotSubmittedAio = errors.New("failed aio submit") 34 | var ErrAioError = errors.New("aio error") 35 | var ErrNotSubmittedIoUring = errors.New("failed io_uring submit") 36 | var ErrNotSupported = errors.New("not supported") 37 | var ErrNotImplemented = errors.New("not implemented") 38 | var ErrEOF = io.EOF 39 | 40 | func Open(path string, mode int, perm os.FileMode, openMode uint8) (*File, error) { 41 | return open(path, mode, perm, openMode) 42 | } 43 | 44 | func Create(path string, fsMode uint8) (*File, error) { 45 | return Open(path, syscall.O_RDWR|syscall.O_CREAT|syscall.O_TRUNC, 0666, fsMode) 46 | } 47 | 48 | func (f *File) Mode() uint8 { 49 | return f.mode 50 | } 51 | 52 | func (f *File) Path() string { 53 | return f.path 54 | } 55 | 56 | func (f *File) Write(data []uint8) (int, error) { 57 | var n int 58 | var err error 59 | switch f.mode { 60 | case ModeAsync: 61 | if err := f.checkAsyncResult(); err != nil { 62 | return 0, err 63 | } 64 | n, err = f.writeAsync(data) 65 | case ModeSync: 66 | n, err = f.writeSync(data) 67 | } 68 | runtime.KeepAlive(f) 69 | return n, err 70 | } 71 | 72 | func (f *File) WriteSync(data []uint8) (int, error) { 73 | return f.writeSync(data) 74 | } 75 | 76 | func (f *File) Read(data []uint8) (int, error) { 77 | var n int 78 | var err error 79 | switch f.mode { 80 | case ModeAsync: 81 | if err := f.checkAsyncResult(); err != nil { 82 | return 0, err 83 | } 84 | n, err = f.readAsync(data) 85 | case ModeSync: 86 | n, err = f.readSync(data) 87 | } 88 | runtime.KeepAlive(f) 89 | return n, err 90 | } 91 | 92 | func (f *File) ReadSync(data []uint8) (int, error) { 93 | return f.readSync(data) 94 | } 95 | 96 | func (f *File) LastOp() (int, bool, error) { 97 | if f.Mode() == ModeSync { 98 | return 0, true, nil 99 | } 100 | err := f.checkAsyncResult() 101 | if err != nil { 102 | if err == ErrNotCompleted { 103 | return 0, false, nil 104 | } 105 | return 0, false, err 106 | } 107 | f.mtx.Lock() 108 | complete := f.lastAsyncOpState.complete 109 | res := f.lastAsyncOpState.result 110 | eof := f.lastAsyncOpState.eof 111 | f.mtx.Unlock() 112 | if res < 0 { 113 | return 0, complete, syscall.Errno(res) 114 | } 115 | if eof { 116 | return 0, complete, ErrEOF 117 | } 118 | return int(res), complete, nil 119 | } 120 | 121 | func (f *File) Pos() int64 { 122 | if f.Mode() == ModeSync { 123 | return f.pos 124 | } 125 | _ = f.checkAsyncResult() 126 | return f.pos 127 | } 128 | 129 | func (f *File) Stat() (os.FileInfo, error) { 130 | if err := f.checkAsyncResult(); err != nil { 131 | return nil, err 132 | } 133 | 134 | f.mtx.Lock() 135 | defer f.mtx.Unlock() 136 | 137 | if !f.lastAsyncOpState.complete { 138 | return nil, ErrNotCompleted 139 | } 140 | 141 | return f.fd.Stat() 142 | } 143 | 144 | func (f *File) Seek(pos int64, whence int) (int64, error) { 145 | if f.Mode() == ModeSync { 146 | n, err := f.fd.Seek(pos, whence) 147 | if err != nil { 148 | return n, err 149 | } 150 | switch whence { 151 | case 0: 152 | f.pos = pos 153 | case 1: 154 | f.pos += pos 155 | case 2: 156 | f.pos = n 157 | } 158 | return f.pos, nil 159 | } 160 | 161 | if err := f.checkAsyncResult(); err != nil { 162 | return 0, err 163 | } 164 | 165 | f.mtx.Lock() 166 | defer f.mtx.Unlock() 167 | 168 | if !f.lastAsyncOpState.complete { 169 | return f.pos, nil 170 | } 171 | 172 | n, err := f.fd.Seek(pos, whence) 173 | if err != nil { 174 | return f.pos, err 175 | } 176 | 177 | switch whence { 178 | case 0: 179 | f.pos = pos 180 | case 1: 181 | f.pos += pos 182 | case 2: 183 | f.pos = n 184 | default: 185 | return 0, ErrNotImplemented 186 | } 187 | 188 | f.lastAsyncOpState.lastOp = OpUnknown 189 | return f.pos, nil 190 | } 191 | 192 | func (f *File) Close() error { 193 | return f.close() 194 | } 195 | 196 | func (f *File) checkAsyncSeek() (int, error) { 197 | if err := f.checkAsyncResult(); err == ErrNotCompleted { 198 | return 0, err 199 | } 200 | if f.lastSyncSeek { 201 | if _, err := f.fd.Seek(f.pos, 0); err != nil { 202 | return 0, err 203 | } 204 | f.lastSyncSeek = false 205 | } 206 | return 0, nil 207 | } 208 | -------------------------------------------------------------------------------- /file_test.go: -------------------------------------------------------------------------------- 1 | package asyncfs 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "github.com/stretchr/testify/assert" 7 | "io" 8 | "os" 9 | "strconv" 10 | "sync" 11 | "syscall" 12 | "testing" 13 | "time" 14 | ) 15 | 16 | const sameThreadLim = 1024 * 1024 17 | 18 | func TestFile_Write_Async(t *testing.T) { 19 | for _, x := range steps() { 20 | prepare(t, x) 21 | func() { 22 | path := "./asyncWrite" 23 | f, err := Open(path, syscall.O_RDWR|syscall.O_CREAT, 0644, ModeAsync) 24 | assert.NoError(t, err) 25 | defer func() { 26 | f.Close() 27 | os.Remove(f.path) 28 | }() 29 | 30 | buf := AllocBuf(2048) 31 | for i := range buf { 32 | buf[i] = uint8(i) 33 | } 34 | n, err := f.Write(buf) 35 | assert.Equal(t, 0, n) 36 | assert.NoError(t, err) 37 | assert.False(t, f.lastAsyncOpState.complete) 38 | assert.Equal(t, OpWrite, f.lastAsyncOpState.lastOp) 39 | assert.Equal(t, int64(0), f.pos) 40 | 41 | t1 := time.Now() 42 | for i := 0; ; i++ { 43 | n, ok, err := f.LastOp() 44 | assert.NoError(t, err) 45 | if !ok { 46 | if time.Now().Sub(t1) > time.Second { 47 | t.Fatal("too long") 48 | } 49 | continue 50 | } 51 | assert.Equal(t, len(buf), n) 52 | break 53 | } 54 | 55 | assert.True(t, f.lastAsyncOpState.complete) 56 | assert.Equal(t, OpWrite, f.lastAsyncOpState.lastOp) 57 | assert.Equal(t, int64(len(buf)), f.pos) 58 | 59 | f.pos = 0 60 | _, err = f.Seek(0, 0) 61 | assert.NoError(t, err) 62 | out := AllocBuf(2048) 63 | n, err = f.ReadSync(out) 64 | assert.NoError(t, err) 65 | assert.Equal(t, 2048, n) 66 | for i := range out { 67 | assert.Equal(t, buf[i], out[i]) 68 | } 69 | }() 70 | } 71 | } 72 | 73 | func TestFile_Write_Async_Concurrent(t *testing.T) { 74 | for _, x := range steps() { 75 | prepare(t, x) 76 | func() { 77 | sz := 100 78 | fds := make([]*File, 0, sz) 79 | path := "./asyncWrite_concurrent_" 80 | 81 | for i := 0; i < sz; i++ { 82 | f, err := Open(path+strconv.Itoa(i), syscall.O_RDWR|syscall.O_CREAT, 0644, ModeAsync) 83 | if err != nil { 84 | t.Error(err.Error()) 85 | continue 86 | } 87 | fds = append(fds, f) 88 | } 89 | defer func() { 90 | for i := range fds { 91 | fds[i].Close() 92 | assert.NoError(t, os.Remove(fds[i].path)) 93 | } 94 | }() 95 | 96 | buf := AllocBuf(2048) 97 | for i := range buf { 98 | buf[i] = uint8(i) 99 | } 100 | m := sync.Mutex{} 101 | cond := sync.NewCond(&m) 102 | 103 | wg := &sync.WaitGroup{} 104 | fn := func(f *File) { 105 | defer wg.Done() 106 | cond.L.Lock() 107 | cond.Wait() 108 | cond.L.Unlock() 109 | n, err := f.Write(buf) 110 | assert.NoError(t, err) 111 | assert.Equal(t, 0, n) 112 | for { 113 | n, ok, err := f.LastOp() 114 | assert.NoError(t, err) 115 | if !ok { 116 | continue 117 | } 118 | assert.Equal(t, len(buf), n) 119 | break 120 | } 121 | } 122 | 123 | wg.Add(sz) 124 | for i := 0; i < sz; i++ { 125 | go fn(fds[i]) 126 | } 127 | time.Sleep(time.Millisecond * 100) 128 | cond.L.Lock() 129 | cond.Broadcast() 130 | cond.L.Unlock() 131 | 132 | wg.Wait() 133 | 134 | for i := 0; i < sz; i++ { 135 | assert.True(t, fds[i].lastAsyncOpState.complete) 136 | assert.Equal(t, OpWrite, fds[i].lastAsyncOpState.lastOp) 137 | assert.Equal(t, int64(len(buf)), fds[i].pos) 138 | 139 | _, err := fds[i].Seek(0, 0) 140 | assert.NoError(t, err) 141 | out := AllocBuf(2048) 142 | n, err := fds[i].ReadSync(out) 143 | assert.NoError(t, err) 144 | assert.Equal(t, 2048, n) 145 | for i := range out { 146 | assert.Equal(t, buf[i], out[i]) 147 | } 148 | } 149 | }() 150 | } 151 | } 152 | 153 | func TestFile_Read_Async(t *testing.T) { 154 | for _, x := range steps() { 155 | prepare(t, x) 156 | func() { 157 | path := "./asyncRead" 158 | 159 | f, err := Open(path, syscall.O_RDWR|syscall.O_CREAT, 0644, ModeAsync) 160 | assert.NoError(t, err) 161 | defer func() { 162 | f.Close() 163 | os.Remove(f.path) 164 | }() 165 | 166 | buf := AllocBuf(2048) 167 | for i := range buf { 168 | buf[i] = uint8(i) 169 | } 170 | n, err := f.WriteSync(buf) 171 | assert.Equal(t, len(buf), n) 172 | assert.NoError(t, err) 173 | assert.Equal(t, int64(len(buf)), f.Pos()) 174 | assert.Equal(t, OpUnknown, f.lastAsyncOpState.lastOp) 175 | out := AllocBuf(len(buf)) 176 | f.pos = 0 177 | n, err = f.Read(out) 178 | assert.NoError(t, err) 179 | assert.Equal(t, 0, n) 180 | assert.Equal(t, OpRead, f.lastAsyncOpState.lastOp) 181 | assert.Equal(t, int64(0), f.pos) 182 | assert.False(t, f.lastAsyncOpState.complete) 183 | t1 := time.Now() 184 | for i := 0; ; i++ { 185 | n, ok, err := f.LastOp() 186 | assert.NoError(t, err) 187 | if !ok { 188 | if time.Now().Sub(t1) > time.Second { 189 | t.Fatal("too long") 190 | } 191 | continue 192 | } 193 | assert.Equal(t, len(buf), n) 194 | break 195 | } 196 | assert.True(t, f.lastAsyncOpState.complete) 197 | assert.Equal(t, OpRead, f.lastAsyncOpState.lastOp) 198 | assert.Equal(t, int64(len(out)), f.pos) 199 | for i := range out { 200 | assert.Equal(t, buf[i], out[i]) 201 | } 202 | 203 | out = AllocBuf(len(buf)) 204 | newPos := int64(len(buf) - 512) 205 | f.pos = newPos 206 | n, err = f.Read(out) 207 | assert.NoError(t, err) 208 | assert.Equal(t, 0, n) 209 | assert.Equal(t, OpRead, f.lastAsyncOpState.lastOp) 210 | assert.Equal(t, newPos, f.pos) 211 | assert.False(t, f.lastAsyncOpState.complete) 212 | t1 = time.Now() 213 | for i := 0; ; i++ { 214 | n, ok, err := f.LastOp() 215 | assert.NoError(t, err) 216 | if !ok { 217 | if time.Now().Sub(t1) > time.Second { 218 | t.Fatal("too long") 219 | } 220 | continue 221 | } 222 | assert.Equal(t, 512, n) 223 | break 224 | } 225 | assert.True(t, f.lastAsyncOpState.complete) 226 | assert.Equal(t, OpRead, f.lastAsyncOpState.lastOp) 227 | 228 | out = AllocBuf(len(buf)) 229 | n, err = f.Read(out) 230 | assert.NoError(t, err) 231 | assert.Equal(t, 0, n) 232 | assert.Equal(t, OpRead, f.lastAsyncOpState.lastOp) 233 | assert.False(t, f.lastAsyncOpState.complete) 234 | t1 = time.Now() 235 | for i := 0; ; i++ { 236 | n, ok, err := f.LastOp() 237 | if !ok { 238 | if time.Now().Sub(t1) > time.Second { 239 | t.Fatal("too long") 240 | } 241 | continue 242 | } 243 | assert.EqualError(t, err, io.EOF.Error()) 244 | assert.Equal(t, 0, n) 245 | break 246 | } 247 | }() 248 | } 249 | } 250 | 251 | func TestFile_Read_Sync_Async(t *testing.T) { 252 | for _, x := range steps() { 253 | prepare(t, x) 254 | func() { 255 | path := "./syncAsyncRead" 256 | fd, err := os.OpenFile(path, syscall.O_RDWR|syscall.O_CREAT, 0644) 257 | assert.NoError(t, err) 258 | defer func() { 259 | fd.Close() 260 | os.Remove(path) 261 | }() 262 | 263 | buf := make([]uint8, 2048) 264 | for i := range buf { 265 | buf[i] = uint8(i) 266 | } 267 | n, err := fd.Write(buf) 268 | assert.NoError(t, err) 269 | assert.Equal(t, 2048, n) 270 | 271 | f, err := Open(path, syscall.O_RDONLY, 0644, ModeAsync) 272 | defer f.Close() 273 | assert.NoError(t, err) 274 | 275 | out := AllocBuf(512) 276 | n, err = f.Read(out) 277 | assert.NoError(t, err) 278 | assert.Equal(t, 0, n) 279 | assert.Equal(t, OpRead, f.lastAsyncOpState.lastOp) 280 | assert.Equal(t, int64(0), f.pos) 281 | assert.False(t, f.lastAsyncOpState.complete) 282 | t1 := time.Now() 283 | for i := 0; ; i++ { 284 | n, ok, err := f.LastOp() 285 | assert.NoError(t, err) 286 | if !ok { 287 | if time.Now().Sub(t1) > time.Second { 288 | t.Fatal("too long") 289 | } 290 | continue 291 | } 292 | assert.Equal(t, len(out), n) 293 | break 294 | } 295 | assert.True(t, f.lastAsyncOpState.complete) 296 | assert.Equal(t, OpRead, f.lastAsyncOpState.lastOp) 297 | assert.Equal(t, int64(512), f.pos) 298 | for i := range out { 299 | assert.Equal(t, buf[i], out[i]) 300 | } 301 | 302 | n, err = f.ReadSync(out) 303 | assert.NoError(t, err) 304 | assert.Equal(t, 512, n) 305 | assert.Equal(t, int64(1024), f.pos) 306 | for i := range out { 307 | assert.Equal(t, buf[i+512], out[i]) 308 | } 309 | 310 | out = AllocBuf(1536) 311 | n, err = f.Read(out) 312 | assert.NoError(t, err) 313 | assert.Equal(t, 0, n) 314 | assert.Equal(t, OpRead, f.lastAsyncOpState.lastOp) 315 | assert.Equal(t, int64(1024), f.pos) 316 | assert.False(t, f.lastAsyncOpState.complete) 317 | t1 = time.Now() 318 | for i := 0; ; i++ { 319 | n, ok, err := f.LastOp() 320 | assert.NoError(t, err) 321 | if !ok { 322 | if time.Now().Sub(t1) > time.Second { 323 | t.Fatal("too long") 324 | } 325 | continue 326 | } 327 | assert.Equal(t, 1024, n) 328 | break 329 | } 330 | assert.True(t, f.lastAsyncOpState.complete) 331 | assert.Equal(t, OpRead, f.lastAsyncOpState.lastOp) 332 | assert.Equal(t, int64(2048), f.pos) 333 | for i := range out[:1024] { 334 | assert.Equal(t, buf[i+1024], out[i]) 335 | } 336 | 337 | n, err = f.ReadSync(out) 338 | assert.EqualError(t, err, ErrEOF.Error()) 339 | assert.Equal(t, 0, n) 340 | assert.Equal(t, int64(2048), f.pos) 341 | 342 | out = AllocBuf(len(buf)) 343 | n, err = f.Read(out) 344 | assert.NoError(t, err) 345 | assert.Equal(t, 0, n) 346 | assert.Equal(t, OpRead, f.lastAsyncOpState.lastOp) 347 | assert.False(t, f.lastAsyncOpState.complete) 348 | t1 = time.Now() 349 | for i := 0; ; i++ { 350 | n, ok, err := f.LastOp() 351 | if !ok { 352 | if time.Now().Sub(t1) > time.Second { 353 | t.Fatal("too long") 354 | } 355 | continue 356 | } 357 | assert.EqualError(t, err, io.EOF.Error()) 358 | assert.Equal(t, 0, n) 359 | break 360 | } 361 | }() 362 | } 363 | } 364 | 365 | func TestFile_Big(t *testing.T) { 366 | for _, x := range steps() { 367 | prepare(t, x) 368 | // 1mb, 128mb, 512mb, 1gb, 2gb 369 | for _, sz := range []int{1024 * 1024, 1024 * 1024 * 128, 1024 * 1024 * 512 /* 1024*1024*1024, /* 1024*1024*1024*2 */} { // check ur free RAM 370 | func() { 371 | defer func() { 372 | if e := recover(); e != nil { 373 | t.Skip(fmt.Sprintf("recovered: %+v", e)) 374 | } 375 | }() 376 | path := "./fileBig" 377 | f, err := Open(path, syscall.O_RDWR|syscall.O_CREAT, 0644, ModeAsync) 378 | assert.NoError(t, err) 379 | defer func() { 380 | f.Close() 381 | os.Remove(path) 382 | }() 383 | 384 | buf := AllocBuf(sz) 385 | for i := range buf { 386 | buf[i] = uint8(i) 387 | } 388 | n, err := f.Write(buf) 389 | assert.Equal(t, 0, n) 390 | assert.NoError(t, err) 391 | 392 | t1 := time.Now() 393 | for { 394 | n, ok, err := f.LastOp() 395 | assert.NoError(t, err) 396 | if !ok { 397 | if time.Now().Sub(t1) > time.Second*5 { 398 | t.Fatal("too long") 399 | } 400 | continue 401 | } 402 | assert.Equal(t, sz, n) 403 | break 404 | } 405 | _, err = f.Seek(0, 0) 406 | assert.NoError(t, err) 407 | out := AllocBuf(sz) 408 | n, err = f.ReadSync(out) 409 | assert.NoError(t, err) 410 | assert.Equal(t, sz, n) 411 | if !bytes.Equal(buf, out) { 412 | t.Fatal("buf isn't equal out") 413 | } 414 | }() 415 | } 416 | } 417 | } 418 | 419 | func TestFile_Sync(t *testing.T) { 420 | for _, x := range steps() { 421 | prepare(t, x) 422 | func() { 423 | path := "./sync" 424 | f, err := Open(path, syscall.O_RDWR|syscall.O_CREAT, 0644, ModeSync) 425 | assert.NoError(t, err) 426 | defer func() { 427 | f.Close() 428 | os.Remove(f.path) 429 | }() 430 | 431 | out := AllocBuf(1024) 432 | n, err := f.Read(out) 433 | assert.Equal(t, 0, n) 434 | assert.EqualError(t, err, ErrEOF.Error()) 435 | 436 | in := AllocBuf(1024) 437 | for i := 0; i < len(in); i++ { 438 | in[i] = uint8(i) 439 | } 440 | n, err = f.Write(in) 441 | assert.Equal(t, 1024, n) 442 | assert.NoError(t, err) 443 | 444 | n, err = f.Read(out) 445 | assert.Equal(t, 0, n) 446 | assert.EqualError(t, err, ErrEOF.Error()) 447 | assert.Equal(t, int64(1024), f.pos) 448 | 449 | pos, err := f.Seek(512, 0) 450 | assert.Equal(t, int64(512), pos) 451 | assert.Equal(t, int64(512), f.pos) 452 | assert.NoError(t, err) 453 | 454 | n, err = f.Read(out) 455 | assert.Equal(t, 512, n) 456 | assert.NoError(t, err) 457 | assert.Equal(t, int64(1024), f.pos) 458 | 459 | n, err = f.Read(out) 460 | assert.Equal(t, 0, n) 461 | assert.EqualError(t, err, ErrEOF.Error()) 462 | assert.Equal(t, int64(1024), f.pos) 463 | }() 464 | } 465 | } 466 | 467 | func TestFile_Stat(t *testing.T) { 468 | for _, x := range steps() { 469 | prepare(t, x) 470 | func() { 471 | path := "./stat" 472 | f, err := Open(path, syscall.O_RDWR|syscall.O_CREAT, 0644, ModeAsync) 473 | assert.NoError(t, err) 474 | defer func() { 475 | f.Close() 476 | os.Remove(f.path) 477 | }() 478 | 479 | buf := AllocBuf(1048576) 480 | for i := range buf { 481 | buf[i] = uint8(i) 482 | } 483 | n, err := f.writeSync(buf) 484 | assert.Equal(t, 1048576, n) 485 | assert.NoError(t, err) 486 | assert.Equal(t, int64(len(buf)), f.Pos()) 487 | assert.Equal(t, OpUnknown, f.lastAsyncOpState.lastOp) 488 | 489 | info, err := f.Stat() 490 | assert.NoError(t, err) 491 | assert.Equal(t, "stat", info.Name()) 492 | assert.Equal(t, int64(1048576), info.Size()) 493 | assert.Equal(t, false, info.IsDir()) 494 | 495 | _, _ = f.Seek(0, 0) 496 | 497 | out := AllocBuf(1048576) 498 | _, _ = f.Read(out) 499 | info, err = f.Stat() 500 | if err != nil { 501 | assert.EqualError(t, err, ErrNotCompleted.Error()) 502 | } 503 | }() 504 | } 505 | } 506 | 507 | func TestFile_Seek(t *testing.T) { 508 | for _, x := range steps() { 509 | prepare(t, x) 510 | func() { 511 | path := "./seek" 512 | 513 | f, err := Open(path, syscall.O_RDWR|syscall.O_CREAT, 0644, ModeAsync) 514 | assert.NoError(t, err) 515 | defer func() { 516 | f.Close() 517 | os.Remove(f.path) 518 | }() 519 | 520 | buf := AllocBuf(2048) 521 | for i := range buf { 522 | buf[i] = uint8(i) 523 | } 524 | n, err := f.Write(buf) 525 | assert.Equal(t, 0, n) 526 | assert.NoError(t, err) 527 | assert.False(t, f.lastAsyncOpState.complete) 528 | assert.Equal(t, OpWrite, f.lastAsyncOpState.lastOp) 529 | assert.Equal(t, int64(0), f.pos) 530 | 531 | t1 := time.Now() 532 | for i := 0; ; i++ { 533 | n, ok, err := f.LastOp() 534 | assert.NoError(t, err) 535 | if !ok { 536 | if time.Now().Sub(t1) > time.Second { 537 | t.Fatal("too long") 538 | } 539 | continue 540 | } 541 | assert.Equal(t, len(buf), n) 542 | break 543 | } 544 | 545 | assert.Equal(t, int64(len(buf)), f.pos) 546 | 547 | newPos := int64(512) 548 | pos, err := f.Seek(newPos, 0) 549 | assert.NoError(t, err) 550 | assert.Equal(t, newPos, pos) 551 | assert.Equal(t, OpUnknown, f.lastAsyncOpState.lastOp) 552 | 553 | buf1 := AllocBuf(512) 554 | for i := range buf1 { 555 | buf1[i] = 'f' 556 | } 557 | n, err = f.Write(buf1) 558 | assert.Equal(t, 0, n) 559 | assert.NoError(t, err) 560 | assert.False(t, f.lastAsyncOpState.complete) 561 | assert.Equal(t, OpWrite, f.lastAsyncOpState.lastOp) 562 | assert.Equal(t, newPos, f.pos) 563 | 564 | t1 = time.Now() 565 | for i := 0; ; i++ { 566 | n, ok, err := f.LastOp() 567 | assert.NoError(t, err) 568 | if !ok { 569 | if time.Now().Sub(t1) > time.Second { 570 | t.Fatal("too long") 571 | } 572 | continue 573 | } 574 | assert.Equal(t, len(buf1), n) 575 | break 576 | } 577 | time.Sleep(time.Millisecond * 100) 578 | 579 | f.pos = 0 580 | _, err = f.Seek(0, 0) 581 | assert.NoError(t, err) 582 | out := AllocBuf(2048) 583 | n, err = f.ReadSync(out) 584 | assert.NoError(t, err) 585 | assert.Equal(t, 2048, n) 586 | assert.Equal(t, len(buf), len(out)) 587 | 588 | for i := range out[:newPos] { 589 | assert.Equal(t, buf[i], out[i]) 590 | } 591 | for i, x := range out[newPos : int(newPos)+len(buf1)] { 592 | assert.Equal(t, buf1[i], x) 593 | } 594 | for i, x := range out[int(newPos)+len(buf1):] { 595 | assert.Equal(t, buf[int64(i)+newPos], x) 596 | } 597 | }() 598 | } 599 | } 600 | -------------------------------------------------------------------------------- /file_unix.go: -------------------------------------------------------------------------------- 1 | // +build linux android freebsd darwin 2 | 3 | package asyncfs 4 | 5 | import ( 6 | "fmt" 7 | "io" 8 | "os" 9 | "syscall" 10 | "unsafe" 11 | ) 12 | 13 | const ( 14 | asyncAio = 0x1 15 | asyncIoUring = 0x2 16 | ) 17 | 18 | type ( 19 | fileCtx struct{} 20 | ) 21 | 22 | func open(path string, flag int, perm os.FileMode, fType uint8) (*File, error) { 23 | var resFile File 24 | var fd *os.File 25 | var mode uint8 26 | 27 | switch fType { 28 | case ModeAsync: 29 | f, err := openAsync(path, flag, perm) 30 | if err != nil { 31 | return nil, err 32 | } 33 | fd = f 34 | mode = ModeAsync 35 | resFile.lastAsyncOpState.complete = true 36 | case ModeSync: 37 | f, err := os.OpenFile(path, flag, perm) 38 | if err != nil { 39 | return nil, err 40 | } 41 | fd = f 42 | mode = ModeSync 43 | default: 44 | return nil, fmt.Errorf("unknown type '%v'", fType) 45 | } 46 | 47 | resFile.path = path 48 | resFile.fd = fd 49 | resFile.mode = mode 50 | 51 | return &resFile, nil 52 | } 53 | 54 | func (f *File) writeSync(data []uint8) (int, error) { 55 | if f.mode == ModeAsync { 56 | if n, err := f.checkAsyncSeek(); err != nil { 57 | return n, err 58 | } 59 | } 60 | 61 | if !c.newThread(len(data)) { 62 | nn, _, e := syscall.RawSyscall(syscall.SYS_WRITE, f.fd.Fd(), uintptr(unsafe.Pointer(&data[0])), uintptr(len(data))) 63 | if e != 0 { 64 | return int(nn), e 65 | } 66 | if int(nn) != len(data) { 67 | return int(nn), io.ErrShortWrite 68 | } 69 | f.mtx.Lock() 70 | f.pos += int64(nn) 71 | f.mtx.Unlock() 72 | return int(nn), nil 73 | } 74 | 75 | n, err := f.fd.Write(data) 76 | if err != nil { 77 | return n, err 78 | } 79 | f.mtx.Lock() 80 | f.pos += int64(n) 81 | f.lastAsyncOpState.lastOp = OpUnknown 82 | f.mtx.Unlock() 83 | return n, nil 84 | } 85 | 86 | func (f *File) readSync(data []uint8) (int, error) { 87 | if f.mode == ModeAsync { 88 | if n, err := f.checkAsyncSeek(); err != nil { 89 | return n, err 90 | } 91 | } 92 | 93 | if !c.newThread(len(data)) { 94 | nn, _, e := syscall.RawSyscall(syscall.SYS_READ, f.fd.Fd(), uintptr(unsafe.Pointer(&data[0])), uintptr(len(data))) 95 | if e != 0 { 96 | return int(nn), e 97 | } 98 | if nn == 0 && len(data) > 0 { 99 | return 0, ErrEOF 100 | } 101 | f.mtx.Lock() 102 | f.pos += int64(nn) 103 | f.mtx.Unlock() 104 | return int(nn), nil 105 | } 106 | 107 | n, err := f.fd.Read(data) 108 | if err != nil { 109 | return n, err 110 | } 111 | f.mtx.Lock() 112 | f.pos += int64(n) 113 | f.lastAsyncOpState.lastOp = OpUnknown 114 | f.mtx.Unlock() 115 | return n, nil 116 | } 117 | 118 | func (f *File) checkAsyncResult() error { 119 | if f.Mode() == ModeSync { 120 | return nil 121 | } 122 | f.mtx.Lock() 123 | if f.lastAsyncOpState.lastOp == OpUnknown { 124 | f.mtx.Unlock() 125 | return nil 126 | } 127 | if !f.lastAsyncOpState.complete { 128 | f.mtx.Unlock() 129 | if err := f.fillOpState(); err != nil { 130 | return err 131 | } 132 | f.mtx.Lock() 133 | if !f.lastAsyncOpState.complete { 134 | f.mtx.Unlock() 135 | return ErrNotCompleted 136 | } 137 | } 138 | if f.lastAsyncOpState.result < 0 { 139 | f.mtx.Unlock() 140 | return fmt.Errorf("async error: %d", f.lastAsyncOpState.result) 141 | } 142 | f.mtx.Unlock() 143 | return nil 144 | } 145 | -------------------------------------------------------------------------------- /file_windows.go: -------------------------------------------------------------------------------- 1 | // +build windows 2 | 3 | package asyncfs 4 | 5 | import ( 6 | "fmt" 7 | "os" 8 | "runtime" 9 | "syscall" 10 | "unsafe" 11 | ) 12 | 13 | type ( 14 | fileCtx struct { 15 | ro *syscall.Overlapped 16 | wo *syscall.Overlapped 17 | processedBytes uint64 18 | toRwBytes uint64 19 | wait bool 20 | } 21 | ) 22 | 23 | func open(path string, flag int, perm os.FileMode, fType uint8) (*File, error) { 24 | var resFile File 25 | var fd *os.File 26 | var mode uint8 27 | 28 | switch fType { 29 | case ModeAsync: 30 | f, err := openAsync(path, flag, perm) 31 | if err != nil { 32 | return nil, err 33 | } 34 | fd = f 35 | mode = ModeAsync 36 | resFile.setOverlapped(true, true) 37 | resFile.lastAsyncOpState.complete = true 38 | case ModeSync: 39 | f, err := os.OpenFile(path, flag, perm) 40 | if err != nil { 41 | return nil, err 42 | } 43 | fd = f 44 | mode = ModeSync 45 | default: 46 | return nil, fmt.Errorf("unknown type '%v'", fType) 47 | } 48 | 49 | resFile.path = path 50 | resFile.fd = fd 51 | resFile.mode = mode 52 | 53 | return &resFile, nil 54 | } 55 | 56 | func (f *File) writeSync(data []uint8) (int, error) { 57 | if f.mode == ModeAsync { 58 | if n, err := f.checkAsyncSeek(); err != nil { 59 | return n, err 60 | } 61 | return f.rwSync(c.nWriteFile, data) 62 | } 63 | n, err := f.fd.Write(data) 64 | if err != nil { 65 | return n, err 66 | } 67 | f.mtx.Lock() 68 | f.pos += int64(n) 69 | f.lastAsyncOpState.lastOp = OpUnknown 70 | f.mtx.Unlock() 71 | return n, err 72 | } 73 | 74 | func (f *File) readSync(data []uint8) (int, error) { 75 | if f.mode == ModeAsync { 76 | if n, err := f.checkAsyncSeek(); err != nil { 77 | return n, err 78 | } 79 | return f.rwSync(c.nReadFile, data) 80 | } 81 | n, err := f.fd.Read(data) 82 | if err != nil { 83 | return n, err 84 | } 85 | f.mtx.Lock() 86 | f.pos += int64(n) 87 | f.lastAsyncOpState.lastOp = OpUnknown 88 | f.mtx.Unlock() 89 | return n, err 90 | } 91 | 92 | func (f *File) rwSync(nOp uint64, data []uint8) (int, error) { 93 | if err := f.checkAsyncResult(); err != nil { 94 | return 0, err 95 | } 96 | n, err := f.rwAsync(nOp, data) 97 | if err != nil { 98 | return 0, err 99 | } 100 | f.mtx.Lock() 101 | f.wait = true 102 | f.mtx.Unlock() 103 | for { 104 | _, ok, err := f.LastOp() 105 | if err != nil { 106 | return 0, err 107 | } 108 | if ok { 109 | break 110 | } 111 | runtime.Gosched() 112 | } 113 | f.mtx.Lock() 114 | f.pos += int64(n) 115 | f.lastAsyncOpState.lastOp = OpUnknown 116 | f.mtx.Unlock() 117 | return int(f.processedBytes), nil 118 | } 119 | 120 | func (f *File) checkAsyncResult() error { 121 | if f.Mode() == ModeSync { 122 | return nil 123 | } 124 | 125 | f.mtx.Lock() 126 | defer f.mtx.Unlock() 127 | 128 | if f.lastAsyncOpState.complete { 129 | f.processedBytes = 0 130 | return nil 131 | } 132 | 133 | var n uint64 134 | var ov *syscall.Overlapped 135 | 136 | switch f.lastAsyncOpState.lastOp { 137 | case OpRead: 138 | ov = f.ro 139 | case OpWrite: 140 | ov = f.wo 141 | default: 142 | return nil 143 | } 144 | 145 | var wait int32 146 | if f.wait { 147 | wait = 1 148 | } 149 | 150 | r1, _, e := syscall.Syscall6(uintptr(c.nGetOverlappedResult), 4, f.fd.Fd(), uintptr(unsafe.Pointer(ov)), uintptr(unsafe.Pointer(&n)), uintptr(wait), 0, 0) 151 | f.processedBytes += n 152 | if r1 == 0 { 153 | if e == syscall.ERROR_IO_PENDING || e == 996 { // WSA_IO_INCOMPLETE(996) 154 | if f.wait { 155 | return e 156 | } 157 | return ErrNotCompleted 158 | } 159 | 160 | f.lastAsyncOpState.complete = true 161 | 162 | if e == syscall.ERROR_HANDLE_EOF { 163 | f.lastAsyncOpState.eof = true 164 | return ErrEOF 165 | } 166 | 167 | return e 168 | } 169 | 170 | f.lastAsyncOpState.complete = true 171 | f.pos += int64(f.processedBytes) 172 | f.lastAsyncOpState.result = int64(n) 173 | f.lastAsyncOpState.eof = f.lastAsyncOpState.lastOp == OpRead && n == 0 && f.toRwBytes > 0 174 | 175 | return nil 176 | } 177 | 178 | func (f *File) close() error { 179 | if f.mode == ModeAsync { 180 | f.ro = nil 181 | f.wo = nil 182 | } 183 | return f.fd.Close() 184 | } 185 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ojaai/asyncfs 2 | 3 | go 1.16 4 | 5 | require github.com/stretchr/testify v1.7.0 6 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= 2 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 4 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 5 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 6 | github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= 7 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 8 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 9 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 10 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= 11 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 12 | -------------------------------------------------------------------------------- /iovec_32.go: -------------------------------------------------------------------------------- 1 | // +build 386 arm armbe mips mipsle ppc s390 sparc 2 | // +build linux android freebsd darwin 3 | 4 | package asyncfs 5 | 6 | import "syscall" 7 | 8 | func newIovec(b *uint8, l int) syscall.Iovec { 9 | return syscall.Iovec{ 10 | Base: b, 11 | Len: uint32(l), 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /iovec_64.go: -------------------------------------------------------------------------------- 1 | // +build amd64 amd64p32 arm64 arm64be ppc64 ppc64le mips64 mips64le mips64p32 mips64p32le s390x sparc64 2 | // +build linux android freebsd darwin 3 | 4 | package asyncfs 5 | 6 | import "syscall" 7 | 8 | func newIovec(b *uint8, l int) syscall.Iovec { 9 | return syscall.Iovec{ 10 | Base: b, 11 | Len: uint64(l), 12 | } 13 | } 14 | --------------------------------------------------------------------------------