├── go.mod ├── LICENSE ├── monitor_test.go ├── README.md ├── event_buffer.go ├── event_buffer_test.go ├── common.go └── monitor.go /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/0xef53/go-qmp/v2 2 | 3 | go 1.14 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Sergey Zhuravlev 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /monitor_test.go: -------------------------------------------------------------------------------- 1 | package qmp 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "time" 8 | ) 9 | 10 | // This example shows how to use the Monitor to communicate with a QEMU instance via QMP. 11 | func ExampleMonitor() { 12 | mon, err := NewMonitor("/var/run/qemu/alice.qmp", 60*time.Second) 13 | if err != nil { 14 | log.Fatalln(err) 15 | } 16 | defer mon.Close() 17 | 18 | done := make(chan struct{}) 19 | go func() { 20 | ts := time.Now() 21 | ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 22 | defer cancel() 23 | got, err := mon.GetEvents(ctx, "SHUTDOWN", uint64(ts.Unix())) 24 | if err != nil { 25 | log.Printf("Timeout error (type=%T): %s\n", err, err) 26 | } else { 27 | log.Printf("OK, got a SHUTDOWN event: %#v\n", got) 28 | } 29 | close(done) 30 | }() 31 | 32 | log.Println("Sleeping for three seconds ...") 33 | 34 | time.Sleep(3 * time.Second) 35 | 36 | log.Println("... and sending a 'system_powerdown' command.") 37 | 38 | if err := mon.Run(Command{"system_powerdown", nil}, nil); err != nil { 39 | log.Fatalln(err) 40 | } 41 | 42 | <-done 43 | } 44 | 45 | // An example of executing a command via human monitor. 46 | func ExampleMonitor_Run() { 47 | mon, err := NewMonitor("/var/run/qemu/alice.qmp", 60*time.Second) 48 | if err != nil { 49 | log.Fatalln(err) 50 | } 51 | 52 | var out string 53 | 54 | if err := mon.Run(Command{"human-monitor-command", &HumanCommand{"info vnc"}}, &out); err != nil { 55 | log.Fatalln(err) 56 | } 57 | 58 | fmt.Println(out) 59 | } 60 | 61 | // An example of removing a device from a guest. 62 | // Completion of the process is signaled with a DEVICE_DELETED event. 63 | func ExampleMonitor_WaitDeviceDeletedEvent() { 64 | mon, err := NewMonitor("/var/run/qemu/alice.qmp", 60*time.Second) 65 | if err != nil { 66 | log.Fatalln(err) 67 | } 68 | 69 | deviceID := struct { 70 | Id string `json:"id"` 71 | }{ 72 | "blk_alice", 73 | } 74 | 75 | ts := time.Now() 76 | if err := mon.Run(Command{"device_del", &deviceID}, nil); err != nil { 77 | log.Fatalln("device_del error:", err) 78 | } 79 | 80 | // ... and wait until the operation is completed 81 | ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) 82 | defer cancel() 83 | 84 | switch _, err := mon.WaitDeviceDeletedEvent(ctx, "blk_alice", uint64(ts.Unix())); { 85 | case err == nil: 86 | case err == context.DeadlineExceeded: 87 | log.Fatalln("device_del timeout error: failed to complete within 60 seconds") 88 | default: 89 | log.Fatalln(err) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | go-qmp 2 | ----------- 3 | 4 | [![GoDoc](https://godoc.org/github.com/0xef53/go-qmp?status.svg)](https://godoc.org/github.com/0xef53/go-qmp) 5 | 6 | Package go-qmp implements a [QEMU Machine Protocol](http://wiki.qemu.org/QMP) for the Go language. 7 | 8 | ### Installation 9 | 10 | go get github.com/0xef53/go-qmp 11 | 12 | ### Example 13 | 14 | #### Waiting for a virtual machine completion 15 | 16 | ```go 17 | mon, err := NewMonitor("/var/run/qemu/alice.qmp", 60*time.Second) 18 | if err != nil { 19 | log.Fatalln(err) 20 | } 21 | defer mon.Close() 22 | 23 | done := make(chan struct{}) 24 | go func() { 25 | ts := time.Now() 26 | ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 27 | defer cancel() 28 | got, err := mon.GetEvents(ctx, "SHUTDOWN", uint64(ts.Unix())) 29 | if err != nil { 30 | log.Printf("Timeout error (type=%T): %s\n", err, err) 31 | } else { 32 | log.Printf("OK, got a SHUTDOWN event: %#v\n", got) 33 | } 34 | close(done) 35 | }() 36 | 37 | log.Println("Sleeping for three seconds ...") 38 | 39 | time.Sleep(3 * time.Second) 40 | 41 | log.Println("... and sending a 'system_powerdown' command.") 42 | 43 | if err := mon.Run(Command{"system_powerdown", nil}, nil); err != nil { 44 | log.Fatalln(err) 45 | } 46 | 47 | <-done 48 | ``` 49 | 50 | #### Executing a command via human monitor 51 | 52 | ```go 53 | mon, err := NewMonitor("/var/run/qemu/alice.qmp", 60*time.Second) 54 | if err != nil { 55 | log.Fatalln(err) 56 | } 57 | 58 | var out string 59 | 60 | if err := mon.Run(Command{"human-monitor-command", &HumanCommand{"info vnc"}}, &out); err != nil { 61 | log.Fatalln(err) 62 | } 63 | 64 | fmt.Println(out) 65 | 66 | ``` 67 | 68 | #### Removing a device from a guest 69 | 70 | Completion of the process is signaled with a `DEVICE_DELETED` event. 71 | 72 | ```go 73 | mon, err := NewMonitor("/var/run/qemu/alice.qmp", 60*time.Second) 74 | if err != nil { 75 | log.Fatalln(err) 76 | } 77 | 78 | deviceID := struct { 79 | Id string `json:"id"` 80 | }{ 81 | "blk_alice", 82 | } 83 | 84 | ts := time.Now() 85 | if err := mon.Run(Command{"device_del", &deviceID}, nil); err != nil { 86 | log.Fatalln("device_del error:", err) 87 | } 88 | 89 | // ... and wait until the operation is completed 90 | ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) 91 | defer cancel() 92 | 93 | switch _, err := mon.WaitDeviceDeletedEvent(ctx, "blk_alice", uint64(ts.Unix())); { 94 | case err == nil: 95 | case err == context.DeadlineExceeded: 96 | log.Fatalln("device_del timeout error: failed to complete within 60 seconds") 97 | default: 98 | log.Fatalln(err) 99 | } 100 | ``` 101 | 102 | ### Documentation 103 | 104 | Use [Godoc documentation](https://godoc.org/github.com/0xef53/qmp-monitor) for reference and usage. 105 | -------------------------------------------------------------------------------- /event_buffer.go: -------------------------------------------------------------------------------- 1 | package qmp 2 | 3 | import ( 4 | "context" 5 | "sort" 6 | "sync" 7 | ) 8 | 9 | // This is a ring buffer with a given size where all occurred QMP events stored. 10 | type eventBuffer struct { 11 | mu sync.Mutex 12 | events []Event 13 | size int 14 | cur int 15 | waiters map[string][]chan Event 16 | ctx context.Context 17 | } 18 | 19 | func (eb *eventBuffer) find(t string, after uint64) ([]Event, bool) { 20 | i := sort.Search(len(eb.events), func(i int) bool { 21 | offset := (i + eb.cur) % len(eb.events) 22 | return eb.events[offset].Timestamp.Seconds >= after 23 | }) 24 | 25 | if i < len(eb.events) { 26 | offset := (i + eb.cur) % len(eb.events) 27 | 28 | out := make([]Event, 0) 29 | 30 | right := len(eb.events) 31 | if i+eb.cur >= right { 32 | // The buffer border is exceeded 33 | right = offset 34 | } 35 | 36 | for _, e := range eb.events[offset:right] { 37 | if e.Type == t || t == "" { 38 | out = append(out, e) 39 | } 40 | } 41 | 42 | left := offset 43 | if left >= eb.cur { 44 | left = 0 45 | } 46 | 47 | for _, e := range eb.events[left:eb.cur] { 48 | if e.Type == t || t == "" { 49 | out = append(out, e) 50 | } 51 | } 52 | 53 | if len(out) > 0 { 54 | return out, true 55 | } 56 | } 57 | 58 | // No matches found 59 | return nil, false 60 | } 61 | 62 | // Find tries to find at least one event of the specified type 63 | // that occurred after the specified Unix time (in seconds). 64 | // If no matches found, the second return value will be false. 65 | func (eb *eventBuffer) Find(t string, after uint64) ([]Event, bool) { 66 | eb.mu.Lock() 67 | defer eb.mu.Unlock() 68 | 69 | return eb.find(t, after) 70 | } 71 | 72 | // Get returns an event list of the specified type from the buffer. 73 | // If no events are found, the function subscribes and waits for the first new event 74 | // until the context is closed (manually or using context.WithTimeout). 75 | func (eb *eventBuffer) Get(ctx context.Context, t string, after uint64) ([]Event, error) { 76 | eb.mu.Lock() 77 | 78 | // Check existing events 79 | if ee, found := eb.find(t, after); found { 80 | eb.mu.Unlock() 81 | return ee, nil 82 | } 83 | 84 | // No matches found, subscribe and wait 85 | if eb.waiters == nil { 86 | eb.waiters = make(map[string][]chan Event) 87 | } 88 | ch := make(chan Event, 1) 89 | eb.waiters[t] = append(eb.waiters[t], ch) 90 | 91 | eb.mu.Unlock() 92 | 93 | defer func() { 94 | eb.mu.Lock() 95 | defer eb.mu.Unlock() 96 | if w := eb.waiters[t]; len(w) == 1 && w[0] == ch { // shortcut for 1-element slice 97 | delete(eb.waiters, t) 98 | return 99 | } 100 | for i, c := range eb.waiters[t] { 101 | if c != ch { 102 | continue 103 | } 104 | // remove eb.waiters[t][i] 105 | w := eb.waiters[t] 106 | w = append(w[:i], w[i+1:]...) 107 | eb.waiters[t] = w 108 | return 109 | } 110 | }() 111 | 112 | for { 113 | select { 114 | case <-ctx.Done(): 115 | // timeout 116 | return nil, ctx.Err() 117 | case <-eb.ctx.Done(): 118 | // global context 119 | return nil, ErrOperationCanceled 120 | case ev := <-ch: 121 | if ev.Timestamp.Seconds >= after { 122 | return []Event{ev}, nil 123 | } 124 | } 125 | } 126 | } 127 | 128 | // Put appends events to the buffer, and also sends them to all subscribers. 129 | // 130 | // Put assumes that events are added with non-decreasing timestamps (each next 131 | // has timestamp larger or equal to previous) 132 | func (eb *eventBuffer) Put(ee ...Event) { 133 | eb.mu.Lock() 134 | defer eb.mu.Unlock() 135 | 136 | append := func(e *Event) { 137 | if len(eb.events) < eb.size { 138 | eb.events = append(eb.events, *e) 139 | } else { 140 | eb.events[eb.cur] = *e 141 | } 142 | eb.cur++ 143 | if eb.cur == eb.size { 144 | eb.cur = 0 145 | } 146 | } 147 | 148 | for _, e := range ee { 149 | append(&e) 150 | } 151 | 152 | for _, e := range ee { 153 | for _, ch := range eb.waiters[e.Type] { 154 | select { 155 | case ch <- e: 156 | default: 157 | } 158 | } 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /event_buffer_test.go: -------------------------------------------------------------------------------- 1 | package qmp 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | "sync" 8 | "testing" 9 | "time" 10 | ) 11 | 12 | func toStr(ee []Event) string { 13 | s := []string{} 14 | for _, e := range ee { 15 | s = append(s, fmt.Sprintf("{%s %d}", e.Type, e.Timestamp.Seconds)) 16 | } 17 | return strings.Join(s, " ") 18 | } 19 | 20 | func TestBasic(t *testing.T) { 21 | names := []string{ 22 | "TEST_EVENT_A", // 0 23 | "TEST_EVENT_B", // 1 24 | "TEST_EVENT_C", // 2 25 | "TEST_EVENT_D", // 3 26 | "TEST_EVENT_E", // 4 27 | "TEST_EVENT_F", // 5 28 | "TEST_EVENT_G", // 6 29 | "TEST_EVENT_H", // 7 30 | "TEST_EVENT_I", // 8 31 | "TEST_EVENT_J", // 9 32 | "TEST_EVENT_K", // 10 33 | "TEST_EVENT_L", // 11 34 | "TEST_EVENT_X", // 12 35 | "TEST_EVENT_N", // 13 36 | "TEST_EVENT_O", // 0 37 | "TEST_EVENT_X", // 1 38 | "TEST_EVENT_Q", // 2 39 | "TEST_EVENT_R", // 3 40 | "TEST_EVENT_S", // 4 41 | } 42 | 43 | eb := &eventBuffer{size: 14} 44 | 45 | for i := 0; i < len(names); i++ { 46 | ev := Event{Type: names[i]} 47 | ev.Timestamp.Seconds = uint64(i) 48 | eb.Put(ev) 49 | } 50 | 51 | // [1] Looking for TEST_EVENT_X with ts >= 13 52 | if got, err := eb.Get(context.Background(), "TEST_EVENT_X", 13); err == nil { 53 | if l := len(got); l != 1 { 54 | t.Fatalf("[1] got %d records instead of 1: %+v", l, got) 55 | } 56 | want := "{TEST_EVENT_X 15}" 57 | if s := toStr(got); s != want { 58 | t.Fatalf("[1] got invalid record:\n\twant:\t%s\n\tgot:\t%s", want, s) 59 | } 60 | } else { 61 | t.Fatal(err) 62 | } 63 | 64 | // [2] Looking for TEST_EVENT_X with any ts value 65 | if got, err := eb.Get(context.Background(), "TEST_EVENT_X", 0); err == nil { 66 | if l := len(got); l != 2 { 67 | t.Fatalf("[2] got %d records instead of 2: %+v", l, got) 68 | } 69 | want := "{TEST_EVENT_X 12} {TEST_EVENT_X 15}" 70 | if s := toStr(got); s != want { 71 | t.Fatalf("[2] got invalid record:\n\twant:\t%s\n\tgot:\t%s", want, s) 72 | } 73 | } else { 74 | t.Fatal(err) 75 | } 76 | 77 | // [3] Looking for TEST_EVENT_O with ts == 14 78 | if got, err := eb.Get(context.Background(), "TEST_EVENT_O", 14); err == nil { 79 | if l := len(got); l != 1 { 80 | t.Fatalf("[3] got %d records instead of 1: %+v", l, got) 81 | } 82 | want := "{TEST_EVENT_O 14}" 83 | if s := toStr(got); s != want { 84 | t.Fatalf("[3] got invalid record:\n\twant:\t%s\n\tgot:\t%s", want, s) 85 | } 86 | } else { 87 | t.Fatal(err) 88 | } 89 | 90 | // [4] Looking for TEST_EVENT_J with ts == 9 91 | if got, err := eb.Get(context.Background(), "TEST_EVENT_J", 9); err == nil { 92 | if l := len(got); l != 1 { 93 | t.Fatalf("[4] got %d records instead of 1: %+v", l, got) 94 | } 95 | want := "{TEST_EVENT_J 9}" 96 | if s := toStr(got); s != want { 97 | t.Fatalf("[4] got invalid record:\n\twant:\t%s\n\tgot:\t%s", want, s) 98 | } 99 | } else { 100 | t.Fatal(err) 101 | } 102 | 103 | // [5] Looking for any events with ts >= 12 104 | if got, err := eb.Get(context.Background(), "", 12); err == nil { 105 | if l := len(got); l != 7 { 106 | t.Fatalf("[5] got %d records instead of 7: %+v", l, got) 107 | } 108 | want := "{TEST_EVENT_X 12} {TEST_EVENT_N 13} {TEST_EVENT_O 14} {TEST_EVENT_X 15} {TEST_EVENT_Q 16} {TEST_EVENT_R 17} {TEST_EVENT_S 18}" 109 | if s := toStr(got); s != want { 110 | t.Fatalf("[5] got invalid record:\n\twant:\t%s\n\tgot:\t%s", want, s) 111 | } 112 | } else { 113 | t.Fatal(err) 114 | } 115 | } 116 | 117 | func TestWaiting(t *testing.T) { 118 | eb := &eventBuffer{size: 10, ctx: context.Background()} 119 | 120 | var wg sync.WaitGroup 121 | 122 | ready := make(chan struct{}) 123 | 124 | wg.Add(1) 125 | 126 | go func() { 127 | defer wg.Done() 128 | 129 | ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) 130 | defer cancel() 131 | 132 | close(ready) 133 | 134 | got, err := eb.Get(ctx, "TEST_EVENT", 1) // will wait for single Event{Type:"TEST", Timestamp:1} 135 | if err != nil { 136 | t.Error(err) 137 | return 138 | } 139 | 140 | t.Logf("got %+v", got) 141 | 142 | if l := len(got); l != 1 { 143 | t.Errorf("got %d records instead of 1: %+v", l, got) 144 | } 145 | 146 | if got[0].Timestamp.Seconds != 1 { 147 | t.Errorf("got record with wrong timestamp (want TS=1): %+v", got[0]) 148 | } 149 | }() 150 | 151 | <-ready 152 | 153 | for i := 0; i < 3; i++ { 154 | ev := Event{Type: "TEST_EVENT"} 155 | ev.Timestamp.Seconds = uint64(i) 156 | eb.Put(ev) 157 | } 158 | 159 | wg.Wait() 160 | } 161 | -------------------------------------------------------------------------------- /common.go: -------------------------------------------------------------------------------- 1 | package qmp 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | ) 7 | 8 | // Command represents a QMP command. See https://wiki.qemu.org/QMP 9 | // and https://github.com/qemu/qemu/blob/master/docs/interop/qmp-spec.txt 10 | type Command struct { 11 | Name string `json:"execute"` 12 | Arguments interface{} `json:"arguments,omitempty"` 13 | } 14 | 15 | // Response represents a common structure of QMP response. 16 | type Response struct { 17 | // Contains the data returned by the command. 18 | Return *json.RawMessage `json:"return"` 19 | 20 | // Contains details about an error that occurred. 21 | Error *GenericError `json:"error"` 22 | 23 | // A status change notification message 24 | // that can be sent unilaterally by the QMP server. 25 | Event *json.RawMessage `json:"event"` 26 | 27 | // A greeting message that is sent once when 28 | // a new QMP connection is established. 29 | Greeting *json.RawMessage `json:"QMP"` 30 | } 31 | 32 | // Event represents a QMP asynchronous event. 33 | type Event struct { 34 | // Type or name of event. E.g., BLOCK_JOB_COMPLETE. 35 | Type string `json:"event"` 36 | 37 | // Arbitrary event data. 38 | Data json.RawMessage `json:"data"` 39 | 40 | // Event timestamp, provided by QEMU. 41 | Timestamp struct { 42 | Seconds uint64 `json:"seconds"` 43 | Microseconds uint64 `json:"microseconds"` 44 | } `json:"timestamp"` 45 | } 46 | 47 | // Version represents a QEMU version structure returned when a QMP connection is initiated. 48 | type Version struct { 49 | Package string `json:"package"` 50 | QEMU struct { 51 | Major int `json:"major"` 52 | Micro int `json:"micro"` 53 | Minor int `json:"minor"` 54 | } `json:"qemu"` 55 | } 56 | 57 | // HumanCommand represents a query struct to execute a command 58 | // over the human monitor. 59 | type HumanCommand struct { 60 | Cmd string `json:"command-line"` 61 | } 62 | 63 | // TransactionAction is a common structure of a QAPI command 64 | // that can be executed as a part of transaction. 65 | type TransactionAction struct { 66 | Type string `json:"type"` 67 | Data interface{} `json:"data"` 68 | } 69 | 70 | // TransactionProperties is a set of additional options 71 | // to control the execution of a transaction. 72 | type TransactionProperties struct { 73 | CompletionMode string `json:"completion-mode"` 74 | } 75 | 76 | // AllowedTransactionActions is the list of QAPI commands 77 | // that can be performed with transaction. 78 | var AllowedTransactionActions = map[string]struct{}{ 79 | "abort": struct{}{}, 80 | "block-dirty-bitmap-add": struct{}{}, 81 | "block-dirty-bitmap-clear": struct{}{}, 82 | "x-block-dirty-bitmap-enable": struct{}{}, 83 | "x-block-dirty-bitmap-disable": struct{}{}, 84 | "x-block-dirty-bitmap-merge": struct{}{}, 85 | "blockdev-backup": struct{}{}, 86 | "blockdev-snapshot": struct{}{}, 87 | "blockdev-snapshot-internal-sync": struct{}{}, 88 | "blockdev-snapshot-sync": struct{}{}, 89 | "drive-backup": struct{}{}, 90 | } 91 | 92 | // DeviceDeletedEventData describes the properties of the DEVICE_DELETED event. 93 | // 94 | // Emitted whenever the device removal completion is acknowledged by the guest. 95 | type DeviceDeletedEventData struct { 96 | Device string `json:"device"` 97 | Path string `json:"path"` 98 | } 99 | 100 | // BlockJobErrorEventData describes the properties of the BLOCK_JOB_ERROR event. 101 | // 102 | // Emitted when a block job encounters an error. 103 | type BlockJobErrorEventData struct { 104 | Device string `json:"device"` 105 | Operation string `json:"operation"` 106 | Action string `json:"acton"` 107 | } 108 | 109 | // BlockJobCompletedEventData describes the properties of the BLOCK_JOB_COMPLETED event. 110 | // 111 | // Emitted when a block job has completed. 112 | type BlockJobCompletedEventData struct { 113 | Device string `json:"device"` 114 | Type string `json:"type"` 115 | ErrMessage string `json:"error"` 116 | } 117 | 118 | // JobStatusChangeEventData describes the properties of the JOB_STATUS_CHANGE event. 119 | // 120 | // Emitted when a job transitions to a different status. 121 | type JobStatusChangeEventData struct { 122 | JobID string `json:"id"` 123 | Status string `json:"status"` 124 | } 125 | 126 | // DeviceTrayMovedEventData describes the properties of the DEVICE_TRAY_MOVED event. 127 | // 128 | // Emitted whenever the tray of a removable device is moved. 129 | type DeviceTrayMovedEventData struct { 130 | Device string `json:"device"` 131 | Open bool `json:"tray-open"` 132 | QdevID string `json:"id"` 133 | } 134 | 135 | // GenericError represents a common structure for the QMP errors 136 | // that could be accurred. This type also used for errors that doesn't have 137 | // a specific class (for most of them in fact). 138 | type GenericError struct { 139 | Class string `json:"class"` 140 | Desc string `json:"desc"` 141 | } 142 | 143 | func (err *GenericError) Error() string { 144 | return fmt.Sprintf("%s error: %s", err.Class, err.Desc) 145 | } 146 | 147 | // CommandNotFound occurs when a requested command has not been found. 148 | type CommandNotFound struct { 149 | *GenericError 150 | } 151 | 152 | // DeviceNotActive occurs when a device has failed to be become active. 153 | type DeviceNotActive struct { 154 | *GenericError 155 | } 156 | 157 | // DeviceNotFound occurs when a requested device has not been found. 158 | type DeviceNotFound struct { 159 | *GenericError 160 | } 161 | 162 | // KVMMissingCap occurs when a requested operation can't be 163 | // fulfilled because a required KVM capability is missing. 164 | type KVMMissingCap struct { 165 | *GenericError 166 | } 167 | -------------------------------------------------------------------------------- /monitor.go: -------------------------------------------------------------------------------- 1 | package qmp 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "io" 10 | "net" 11 | "os" 12 | "sync" 13 | "syscall" 14 | "time" 15 | ) 16 | 17 | var ( 18 | noDeadline = time.Time{} 19 | 20 | ErrHandshake = errors.New("QMP Handshake error: invalid greeting") 21 | ErrNegotiation = errors.New("QMP Handshake error: negotiations failed") 22 | 23 | ErrOperationCanceled = errors.New("Operation canceled: channel was closed") 24 | ) 25 | 26 | // Monitor represents a connection to communicate with the QMP interface using a UNIX socket. 27 | type Monitor struct { 28 | conn net.Conn 29 | 30 | reader *bufio.Reader 31 | writer *bufio.Writer 32 | 33 | resp chan []byte 34 | evbuf *eventBuffer 35 | 36 | mu sync.Mutex 37 | cancel context.CancelFunc 38 | released chan struct{} 39 | closed bool 40 | err error 41 | } 42 | 43 | // NewMonitor creates and configures a connection to the QEMU monitor using a UNIX socket. 44 | // An error is returned if the socket cannot be successfully dialed, or the dial attempt times out. 45 | // 46 | // Multiple connections to the same QMP socket are not permitted, 47 | // and will result in the monitor blocking until the existing connection is closed. 48 | func NewMonitor(path string, timeout time.Duration) (*Monitor, error) { 49 | conn, err := net.DialTimeout("unix", path, timeout) 50 | if err != nil { 51 | return nil, err 52 | } 53 | 54 | mon := Monitor{ 55 | conn: conn, 56 | reader: bufio.NewReader(conn), 57 | writer: bufio.NewWriter(conn), 58 | evbuf: &eventBuffer{size: 100}, 59 | resp: make(chan []byte), 60 | } 61 | 62 | if err := mon.handshake(); err != nil { 63 | conn.Close() 64 | return nil, err 65 | } 66 | 67 | ctx, cancel := context.WithCancel(context.Background()) 68 | 69 | mon.cancel = cancel 70 | mon.evbuf.ctx = ctx 71 | mon.released = make(chan struct{}) 72 | 73 | // collect() reads data by line from the connection 74 | // and can be manually interrupted only using ctx. 75 | go func() { 76 | var err error 77 | defer func() { 78 | conn.Close() 79 | }() 80 | 81 | switch err = mon.collect(ctx); { 82 | case err == nil, err == io.EOF: 83 | // Function has been closed using Close() method. 84 | // In this case we just make the error, 85 | // indicating that the connection was closed: 86 | // ESHUTDOWN: Cannot send after transport endpoint shutdown 87 | mon.err = &net.OpError{Op: "read", Net: "unix", Err: &os.SyscallError{"syscall", syscall.ESHUTDOWN}} 88 | default: 89 | mon.err = err 90 | cancel() 91 | } 92 | mon.closed = true 93 | 94 | close(mon.resp) 95 | close(mon.released) 96 | // At this stage: 97 | // - connection is closed 98 | // - mon.resp is closed 99 | // - all evbuf.Get() instances are interrupted by mon.cancel() 100 | // - map evbuf.waiters is cleared at the end of each evbuf.Get() respectively 101 | // - other evbuf variables will be deleted by GC 102 | }() 103 | 104 | return &mon, nil 105 | } 106 | 107 | func (m *Monitor) handshake() error { 108 | var r Response 109 | 110 | // Handshake 111 | b, err := m.reader.ReadBytes('\n') 112 | if err != nil { 113 | return err 114 | } 115 | if err := json.Unmarshal(b, &r); err != nil { 116 | return err 117 | } 118 | if r.Greeting == nil { 119 | return ErrHandshake 120 | } 121 | 122 | // Negotiation 123 | if err := m.write([]byte(`{"execute":"qmp_capabilities"}`)); err != nil { 124 | return err 125 | } 126 | res, err := m.reader.ReadBytes('\n') 127 | if err != nil { 128 | return err 129 | } 130 | if err := json.Unmarshal(res, &r); err != nil { 131 | return err 132 | } 133 | if r.Return == nil { 134 | return ErrNegotiation 135 | } 136 | 137 | return nil 138 | } 139 | 140 | // Close closes the QMP connection and releases all resources. 141 | // 142 | // After this call any interaction with the monitor 143 | // will generate an error of type net.OpError. 144 | func (m *Monitor) Close() error { 145 | m.mu.Lock() 146 | defer m.mu.Unlock() 147 | 148 | if m.closed { 149 | return nil 150 | } 151 | 152 | // Stop the background socket reading 153 | m.cancel() 154 | // This needs to "wake up" the socket 155 | m.write([]byte(`{"execute":"query-name"}`)) 156 | // And wait... 157 | <-m.resp 158 | <-m.released 159 | 160 | return m.conn.Close() 161 | } 162 | 163 | func (m *Monitor) write(b []byte) error { 164 | if _, err := m.writer.Write(append(b, '\x0a')); err != nil { 165 | return err 166 | } 167 | return m.writer.Flush() 168 | } 169 | 170 | func (m *Monitor) collect(ctx context.Context) error { 171 | loop: 172 | for { 173 | select { 174 | case <-ctx.Done(): 175 | break loop 176 | default: 177 | } 178 | 179 | data, err := m.reader.ReadBytes('\n') 180 | if err != nil { 181 | return err 182 | } 183 | 184 | var r Response 185 | if err := json.Unmarshal(data, &r); err != nil { 186 | return err 187 | } 188 | 189 | if r.Event != nil { 190 | var event Event 191 | if err := json.Unmarshal(data, &event); err != nil { 192 | return err 193 | } 194 | m.evbuf.Put(event) 195 | continue 196 | } 197 | 198 | m.resp <- data 199 | } 200 | 201 | return nil 202 | } 203 | 204 | // Run executes the given QAPI command. 205 | func (m *Monitor) Run(cmd interface{}, res interface{}) error { 206 | m.mu.Lock() 207 | defer m.mu.Unlock() 208 | 209 | if m.closed { 210 | //panic("unable to work with closed monitor") 211 | return m.err 212 | } 213 | 214 | b, err := json.Marshal(cmd) 215 | if err != nil { 216 | return err 217 | } 218 | if err := m.write(b); err != nil { 219 | return err 220 | } 221 | 222 | var data []byte 223 | select { 224 | case b, ok := <-m.resp: 225 | if !ok { 226 | // we can be here for two reasons: 227 | // - collect() ended with an error. 228 | // In this case m.err will be contain the corresponding error 229 | // (ECONNREFUSED or EPIPE or something else) 230 | // - close() was called. 231 | // In this case m.err will be equal to our special error ESHUTDOWN, 232 | // which means "transport is closed" 233 | return m.err 234 | } 235 | data = b 236 | } 237 | 238 | var r Response 239 | if err := json.Unmarshal(data, &r); err != nil { 240 | return err 241 | } 242 | if r.Error != nil { 243 | return NewQMPError(r.Error) 244 | } 245 | 246 | if res == nil { 247 | return nil 248 | } 249 | 250 | if err := json.Unmarshal(*r.Return, res); err != nil { 251 | return err 252 | } 253 | 254 | return nil 255 | } 256 | 257 | // RunHuman executes a command using "human-monitor-command". 258 | func (m *Monitor) RunHuman(cmdline string) (string, error) { 259 | var out string 260 | 261 | if err := m.Run(Command{"human-monitor-command", &HumanCommand{Cmd: cmdline}}, &out); err != nil { 262 | return "", err 263 | } 264 | 265 | return out, nil 266 | } 267 | 268 | // RunTransaction executes a number of transactionable QAPI commands atomically. 269 | func (m *Monitor) RunTransaction(cmds []Command, res interface{}, properties *TransactionProperties) error { 270 | args := struct { 271 | Actions []TransactionAction `json:"actions"` 272 | Properties *TransactionProperties `json:"properties,omitempty"` 273 | }{ 274 | Actions: make([]TransactionAction, 0, len(cmds)), 275 | Properties: properties, 276 | } 277 | 278 | for _, cmd := range cmds { 279 | if _, ok := AllowedTransactionActions[cmd.Name]; !ok { 280 | return fmt.Errorf("Unknown transaction command", cmd.Name) 281 | } 282 | action := TransactionAction{ 283 | Type: cmd.Name, 284 | Data: cmd.Arguments, 285 | } 286 | args.Actions = append(args.Actions, action) 287 | } 288 | 289 | return m.Run(Command{"transaction", &args}, &res) 290 | } 291 | 292 | // GetEvents returns an event list of the specified type 293 | // that occurred after the specified Unix time (in seconds). 294 | // If there are events in the buffer, then GetEvents will return them. 295 | // Otherwise, the function will wait for the first event until the context is closed 296 | // (manually or using context.WithTimeout). 297 | func (m *Monitor) GetEvents(ctx context.Context, t string, after uint64) ([]Event, error) { 298 | if m.closed { 299 | //panic("unable to work with closed monitor") 300 | return nil, m.err 301 | } 302 | 303 | // m.evbuf.Get() can be interrupted by the global m.ctx, 304 | // that is in m.evbuf. 305 | ee, err := m.evbuf.Get(ctx, t, after) 306 | switch err { 307 | case nil: 308 | case ErrOperationCanceled: 309 | // This means that m.evbuf.Get() was interrupted by the global m.ctx. 310 | // The reason of that is in the m.err variable. 311 | return nil, m.err 312 | default: 313 | return nil, err 314 | } 315 | 316 | return ee, nil 317 | } 318 | 319 | // FindEvents tries to find in the buffer at least one event 320 | // of the specified type that occurred after the specified Unix time (in seconds). 321 | // If no matches found, the second return value will be false. 322 | func (m *Monitor) FindEvents(t string, after uint64) ([]Event, bool) { 323 | if m.closed { 324 | //panic("unable to work with closed monitor") 325 | return nil, false 326 | } 327 | 328 | return m.evbuf.Find(t, after) 329 | } 330 | 331 | // WaitMachineResumeStateEvent waits a RESUME event. 332 | func (m *Monitor) WaitMachineResumeStateEvent(ctx context.Context, after uint64) (*Event, error) { 333 | var event *Event 334 | 335 | loop: 336 | for { 337 | events, err := m.GetEvents(ctx, "RESUME", after) 338 | if err != nil { 339 | return nil, err 340 | } 341 | for _, e := range events { 342 | event = &e 343 | break loop 344 | } 345 | } 346 | 347 | return event, nil 348 | } 349 | 350 | // WaitDeviceDeletedEvent waits a DEVICE_DELETED event for the specified device. 351 | func (m *Monitor) WaitDeviceDeletedEvent(ctx context.Context, device string, after uint64) (*Event, error) { 352 | var event *Event 353 | 354 | loop: 355 | for { 356 | events, err := m.GetEvents(ctx, "DEVICE_DELETED", after) 357 | if err != nil { 358 | return nil, err 359 | } 360 | for _, e := range events { 361 | var data DeviceDeletedEventData 362 | if err := json.Unmarshal(e.Data, &data); err != nil { 363 | return nil, err 364 | } 365 | if data.Device == device || data.Path == device { 366 | event = &e 367 | break loop 368 | } 369 | after = e.Timestamp.Seconds 370 | } 371 | } 372 | 373 | return event, nil 374 | } 375 | 376 | // WaitJobStatusChangeEvent waits a JOB_STATUS_CHANGE event for the specified job ID. 377 | func (m *Monitor) WaitJobStatusChangeEvent(ctx context.Context, jobID, status string, after uint64) (*Event, error) { 378 | var event *Event 379 | 380 | loop: 381 | for { 382 | events, err := m.GetEvents(ctx, "JOB_STATUS_CHANGE", after) 383 | if err != nil { 384 | return nil, err 385 | } 386 | for _, e := range events { 387 | var data JobStatusChangeEventData 388 | if err := json.Unmarshal(e.Data, &data); err != nil { 389 | return nil, err 390 | } 391 | if data.JobID == jobID && data.Status == status { 392 | event = &e 393 | break loop 394 | } 395 | after = e.Timestamp.Seconds 396 | } 397 | } 398 | 399 | return event, nil 400 | } 401 | 402 | // waitDeviceTrayMovedEvent waits a DEVICE_TRAY_MOVED event 403 | // with a specified state for the specified device. 404 | // 405 | // The state variable can be -1 (any states), 0 (closed) and 1 (opened). 406 | func (m *Monitor) waitDeviceTrayMovedEvent(ctx context.Context, device string, state int16, after uint64) (*Event, error) { 407 | if state < -1 || state > 1 { 408 | panic("incorrect state value") 409 | } 410 | 411 | var event *Event 412 | 413 | loop: 414 | for { 415 | events, err := m.GetEvents(ctx, "DEVICE_TRAY_MOVED", after) 416 | if err != nil { 417 | return nil, err 418 | } 419 | for _, e := range events { 420 | var data DeviceTrayMovedEventData 421 | if err := json.Unmarshal(e.Data, &data); err != nil { 422 | return nil, err 423 | } 424 | if data.Device == device || data.QdevID == device { 425 | switch state { 426 | case -1: 427 | event = &e 428 | case 0: 429 | if !data.Open { 430 | event = &e 431 | } 432 | case 1: 433 | if data.Open { 434 | event = &e 435 | } 436 | } 437 | if event != nil { 438 | break loop 439 | } 440 | } 441 | after = e.Timestamp.Seconds 442 | } 443 | } 444 | 445 | return event, nil 446 | } 447 | 448 | // WaitDeviceTrayMovedEvent waits a DEVICE_TRAY_MOVED event with any state for the specified device. 449 | func (m *Monitor) WaitDeviceTrayMovedEvent(ctx context.Context, device string, after uint64) (*Event, error) { 450 | return m.waitDeviceTrayMovedEvent(ctx, device, -1, after) 451 | } 452 | 453 | // WaitDeviceTrayClosedEvent waits a DEVICE_TRAY_MOVED event with state == "close" for the specified device. 454 | func (m *Monitor) WaitDeviceTrayClosedEvent(ctx context.Context, device string, after uint64) (*Event, error) { 455 | return m.waitDeviceTrayMovedEvent(ctx, device, 0, after) 456 | } 457 | 458 | // WaitDeviceTrayOpenedEvent waits a DEVICE_TRAY_MOVED event with state == "open" for the specified device. 459 | func (m *Monitor) WaitDeviceTrayOpenedEvent(ctx context.Context, device string, after uint64) (*Event, error) { 460 | return m.waitDeviceTrayMovedEvent(ctx, device, 1, after) 461 | } 462 | 463 | // FindBlockJobErrorEvent tries to find a BLOCK_JOB_ERROR for the specified device. 464 | func (m *Monitor) FindBlockJobErrorEvent(device string, after uint64) (*Event, bool, error) { 465 | events, found := m.FindEvents("BLOCK_JOB_ERROR", after) 466 | if found { 467 | for _, e := range events { 468 | var data BlockJobErrorEventData 469 | if err := json.Unmarshal(e.Data, &data); err != nil { 470 | return nil, false, err 471 | } 472 | if data.Device == device { 473 | return &e, true, nil 474 | } 475 | } 476 | } 477 | 478 | return nil, false, nil 479 | } 480 | 481 | // FindBlockJobCompletedEvent tries to find a BLOCK_JOB_COMPLETED event for the specified device. 482 | func (m *Monitor) FindBlockJobCompletedEvent(device string, after uint64) (*Event, bool, error) { 483 | events, found := m.FindEvents("BLOCK_JOB_COMPLETED", after) 484 | if found { 485 | for _, e := range events { 486 | var data BlockJobCompletedEventData 487 | if err := json.Unmarshal(e.Data, &data); err != nil { 488 | return nil, false, err 489 | } 490 | if data.Device == device { 491 | return &e, true, nil 492 | } 493 | } 494 | } 495 | 496 | return nil, false, nil 497 | } 498 | 499 | func NewQMPError(err *GenericError) error { 500 | switch err.Class { 501 | case "CommandNotFound": 502 | return &CommandNotFound{err} 503 | case "DeviceNotActive": 504 | return &DeviceNotActive{err} 505 | case "DeviceNotFound": 506 | return &DeviceNotFound{err} 507 | case "KVMMissingCap": 508 | return &KVMMissingCap{err} 509 | } 510 | return err 511 | } 512 | 513 | func IsSocketNotAvailable(err error) bool { 514 | switch err.(type) { 515 | case *net.OpError: 516 | err := err.(*net.OpError).Err 517 | switch err.(type) { 518 | case *os.SyscallError: 519 | if errno, ok := err.(*os.SyscallError).Err.(syscall.Errno); ok { 520 | return errno == syscall.ENOENT || errno == syscall.ECONNREFUSED || errno == syscall.ECONNRESET || errno == syscall.EPIPE 521 | } 522 | } 523 | } 524 | return false 525 | } 526 | 527 | func IsSocketClosed(err error) bool { 528 | switch err.(type) { 529 | case *net.OpError: 530 | err := err.(*net.OpError).Err 531 | switch err.(type) { 532 | case *os.SyscallError: 533 | if errno, ok := err.(*os.SyscallError).Err.(syscall.Errno); ok { 534 | return errno == syscall.ESHUTDOWN 535 | } 536 | } 537 | } 538 | return false 539 | } 540 | --------------------------------------------------------------------------------