├── Makefile ├── README ├── include ├── asm │ └── hweight.h ├── linux │ ├── bitops.h │ ├── byteorder.h │ ├── init.h │ ├── kernel.h │ ├── list.h │ ├── module.h │ ├── poison.h │ ├── prefetch.h │ └── types.h ├── virtio.h ├── virtio_client.h └── virtio_server.h ├── main.c ├── virtio_client.c └── virtio_server.c /Makefile: -------------------------------------------------------------------------------- 1 | CC=gcc 2 | CFLAGS=-Iinclude -lrt 3 | OBJ = virtio_server.o virtio_client.o main.o 4 | 5 | %.o: %.c $(DEPS) 6 | $(CC) -c -o $@ $< $(CFLAGS) 7 | 8 | uvirtio: $(OBJ) 9 | gcc -o $@ $^ $(CFLAGS) 10 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | Userspace Virtio (uvirtio) 2 | ---------------------------- 3 | 4 | This project is intended to run virtio in userspace. 5 | 6 | Userspace client parts are loaned from the KVM tool (https://github.com/penberg/linux-kvm). 7 | 8 | Userspace server is really kernel code slightly modified to work in userspace. 9 | 10 | Still very broken. 11 | -------------------------------------------------------------------------------- /include/asm/hweight.h: -------------------------------------------------------------------------------- 1 | #ifndef _KVM_ASM_HWEIGHT_H_ 2 | #define _KVM_ASM_HWEIGHT_H_ 3 | 4 | #include 5 | unsigned int hweight32(unsigned int w); 6 | unsigned long hweight64(__u64 w); 7 | 8 | #endif /* _KVM_ASM_HWEIGHT_H_ */ 9 | -------------------------------------------------------------------------------- /include/linux/bitops.h: -------------------------------------------------------------------------------- 1 | #ifndef _KVM_LINUX_BITOPS_H_ 2 | #define _KVM_LINUX_BITOPS_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #define BITS_PER_LONG __WORDSIZE 9 | #define BITS_PER_BYTE 8 10 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) 11 | 12 | static inline void set_bit(int nr, unsigned long *addr) 13 | { 14 | addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG); 15 | } 16 | 17 | static inline void clear_bit(int nr, unsigned long *addr) 18 | { 19 | addr[nr / BITS_PER_LONG] &= ~(1UL << (nr % BITS_PER_LONG)); 20 | } 21 | 22 | static __always_inline int test_bit(unsigned int nr, const unsigned long *addr) 23 | { 24 | return ((1UL << (nr % BITS_PER_LONG)) & 25 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; 26 | } 27 | 28 | static inline unsigned long hweight_long(unsigned long w) 29 | { 30 | return sizeof(w) == 4 ? hweight32(w) : hweight64(w); 31 | } 32 | 33 | #endif 34 | -------------------------------------------------------------------------------- /include/linux/byteorder.h: -------------------------------------------------------------------------------- 1 | #ifndef __BYTE_ORDER_H__ 2 | #define __BYTE_ORDER_H__ 3 | 4 | #include 5 | #include 6 | 7 | #endif 8 | -------------------------------------------------------------------------------- /include/linux/init.h: -------------------------------------------------------------------------------- 1 | #ifndef _LINUX_INIT_H 2 | #define _LINUX_INIT_H 3 | 4 | #include 5 | 6 | #ifndef notrace 7 | #define notrace 8 | #endif 9 | 10 | /* These macros are used to mark some functions or 11 | * initialized data (doesn't apply to uninitialized data) 12 | * as `initialization' functions. The kernel can take this 13 | * as hint that the function is used only during the initialization 14 | * phase and free up used memory resources after 15 | * 16 | * Usage: 17 | * For functions: 18 | * 19 | * You should add __init immediately before the function name, like: 20 | * 21 | * static void __init initme(int x, int y) 22 | * { 23 | * extern int z; z = x * y; 24 | * } 25 | * 26 | * If the function has a prototype somewhere, you can also add 27 | * __init between closing brace of the prototype and semicolon: 28 | * 29 | * extern int initialize_foobar_device(int, int, int) __init; 30 | * 31 | * For initialized data: 32 | * You should insert __initdata between the variable name and equal 33 | * sign followed by value, e.g.: 34 | * 35 | * static int init_variable __initdata = 0; 36 | * static const char linux_logo[] __initconst = { 0x32, 0x36, ... }; 37 | * 38 | * Don't forget to initialize data not at file scope, i.e. within a function, 39 | * as gcc otherwise puts the data into the bss section and not into the init 40 | * section. 41 | * 42 | * Also note, that this data cannot be "const". 43 | */ 44 | 45 | /* These are for everybody (although not all archs will actually 46 | discard it in modules) */ 47 | #define __init __section(.init.text) __cold notrace 48 | #define __initdata __section(.init.data) 49 | #define __initconst __section(.init.rodata) 50 | #define __exitdata __section(.exit.data) 51 | #define __exit_call __used __section(.exitcall.exit) 52 | 53 | /* 54 | * modpost check for section mismatches during the kernel build. 55 | * A section mismatch happens when there are references from a 56 | * code or data section to an init section (both code or data). 57 | * The init sections are (for most archs) discarded by the kernel 58 | * when early init has completed so all such references are potential bugs. 59 | * For exit sections the same issue exists. 60 | * 61 | * The following markers are used for the cases where the reference to 62 | * the *init / *exit section (code or data) is valid and will teach 63 | * modpost not to issue a warning. Intended semantics is that a code or 64 | * data tagged __ref* can reference code or data from init section without 65 | * producing a warning (of course, no warning does not mean code is 66 | * correct, so optimally document why the __ref is needed and why it's OK). 67 | * 68 | * The markers follow same syntax rules as __init / __initdata. 69 | */ 70 | #define __ref __section(.ref.text) noinline 71 | #define __refdata __section(.ref.data) 72 | #define __refconst __section(.ref.rodata) 73 | 74 | /* compatibility defines */ 75 | #define __init_refok __ref 76 | #define __initdata_refok __refdata 77 | #define __exit_refok __ref 78 | 79 | 80 | #ifdef MODULE 81 | #define __exitused 82 | #else 83 | #define __exitused __used 84 | #endif 85 | 86 | #define __exit __section(.exit.text) __exitused __cold notrace 87 | 88 | /* Used for HOTPLUG */ 89 | #define __devinit __section(.devinit.text) __cold notrace 90 | #define __devinitdata __section(.devinit.data) 91 | #define __devinitconst __section(.devinit.rodata) 92 | #define __devexit __section(.devexit.text) __exitused __cold notrace 93 | #define __devexitdata __section(.devexit.data) 94 | #define __devexitconst __section(.devexit.rodata) 95 | 96 | /* Used for HOTPLUG_CPU */ 97 | #define __cpuinit __section(.cpuinit.text) __cold notrace 98 | #define __cpuinitdata __section(.cpuinit.data) 99 | #define __cpuinitconst __section(.cpuinit.rodata) 100 | #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace 101 | #define __cpuexitdata __section(.cpuexit.data) 102 | #define __cpuexitconst __section(.cpuexit.rodata) 103 | 104 | /* Used for MEMORY_HOTPLUG */ 105 | #define __meminit __section(.meminit.text) __cold notrace 106 | #define __meminitdata __section(.meminit.data) 107 | #define __meminitconst __section(.meminit.rodata) 108 | #define __memexit __section(.memexit.text) __exitused __cold notrace 109 | #define __memexitdata __section(.memexit.data) 110 | #define __memexitconst __section(.memexit.rodata) 111 | 112 | /* For assembly routines */ 113 | #define __HEAD .section ".head.text","ax" 114 | #define __INIT .section ".init.text","ax" 115 | #define __FINIT .previous 116 | 117 | #define __INITDATA .section ".init.data","aw",%progbits 118 | #define __INITRODATA .section ".init.rodata","a",%progbits 119 | #define __FINITDATA .previous 120 | 121 | #define __DEVINIT .section ".devinit.text", "ax" 122 | #define __DEVINITDATA .section ".devinit.data", "aw" 123 | #define __DEVINITRODATA .section ".devinit.rodata", "a" 124 | 125 | #define __CPUINIT .section ".cpuinit.text", "ax" 126 | #define __CPUINITDATA .section ".cpuinit.data", "aw" 127 | #define __CPUINITRODATA .section ".cpuinit.rodata", "a" 128 | 129 | #define __MEMINIT .section ".meminit.text", "ax" 130 | #define __MEMINITDATA .section ".meminit.data", "aw" 131 | #define __MEMINITRODATA .section ".meminit.rodata", "a" 132 | 133 | /* silence warnings when references are OK */ 134 | #define __REF .section ".ref.text", "ax" 135 | #define __REFDATA .section ".ref.data", "aw" 136 | #define __REFCONST .section ".ref.rodata", "a" 137 | 138 | #ifndef __ASSEMBLY__ 139 | /* 140 | * Used for initialization calls.. 141 | */ 142 | typedef int (*initcall_t)(void); 143 | typedef void (*exitcall_t)(void); 144 | 145 | extern initcall_t __con_initcall_start[], __con_initcall_end[]; 146 | extern initcall_t __security_initcall_start[], __security_initcall_end[]; 147 | 148 | /* Used for contructor calls. */ 149 | typedef void (*ctor_fn_t)(void); 150 | 151 | /* Defined in init/main.c */ 152 | extern int do_one_initcall(initcall_t fn); 153 | extern char __initdata boot_command_line[]; 154 | extern char *saved_command_line; 155 | extern unsigned int reset_devices; 156 | 157 | /* used by init/main.c */ 158 | void setup_arch(char **); 159 | void prepare_namespace(void); 160 | 161 | extern void (*late_time_init)(void); 162 | 163 | extern int initcall_debug; 164 | 165 | #endif 166 | 167 | #ifndef MODULE 168 | 169 | #ifndef __ASSEMBLY__ 170 | 171 | /* initcalls are now grouped by functionality into separate 172 | * subsections. Ordering inside the subsections is determined 173 | * by link order. 174 | * For backwards compatibility, initcall() puts the call in 175 | * the device init subsection. 176 | * 177 | * The `id' arg to __define_initcall() is needed so that multiple initcalls 178 | * can point at the same handler without causing duplicate-symbol build errors. 179 | */ 180 | 181 | #define __define_initcall(level,fn,id) \ 182 | static initcall_t __initcall_##fn##id __used \ 183 | __attribute__((__section__(".initcall" level ".init"))) = fn 184 | 185 | /* 186 | * Early initcalls run before initializing SMP. 187 | * 188 | * Only for built-in code, not modules. 189 | */ 190 | #define early_initcall(fn) __define_initcall("early",fn,early) 191 | 192 | /* 193 | * A "pure" initcall has no dependencies on anything else, and purely 194 | * initializes variables that couldn't be statically initialized. 195 | * 196 | * This only exists for built-in code, not for modules. 197 | */ 198 | #define pure_initcall(fn) __define_initcall("0",fn,0) 199 | 200 | #define core_initcall(fn) __define_initcall("1",fn,1) 201 | #define core_initcall_sync(fn) __define_initcall("1s",fn,1s) 202 | #define postcore_initcall(fn) __define_initcall("2",fn,2) 203 | #define postcore_initcall_sync(fn) __define_initcall("2s",fn,2s) 204 | #define arch_initcall(fn) __define_initcall("3",fn,3) 205 | #define arch_initcall_sync(fn) __define_initcall("3s",fn,3s) 206 | #define subsys_initcall(fn) __define_initcall("4",fn,4) 207 | #define subsys_initcall_sync(fn) __define_initcall("4s",fn,4s) 208 | #define fs_initcall(fn) __define_initcall("5",fn,5) 209 | #define fs_initcall_sync(fn) __define_initcall("5s",fn,5s) 210 | #define rootfs_initcall(fn) __define_initcall("rootfs",fn,rootfs) 211 | #define device_initcall(fn) __define_initcall("6",fn,6) 212 | #define device_initcall_sync(fn) __define_initcall("6s",fn,6s) 213 | #define late_initcall(fn) __define_initcall("7",fn,7) 214 | #define late_initcall_sync(fn) __define_initcall("7s",fn,7s) 215 | 216 | #define __initcall(fn) device_initcall(fn) 217 | 218 | #define __exitcall(fn) \ 219 | static exitcall_t __exitcall_##fn __exit_call = fn 220 | 221 | #define console_initcall(fn) \ 222 | static initcall_t __initcall_##fn \ 223 | __used __section(.con_initcall.init) = fn 224 | 225 | #define security_initcall(fn) \ 226 | static initcall_t __initcall_##fn \ 227 | __used __section(.security_initcall.init) = fn 228 | 229 | struct obs_kernel_param { 230 | const char *str; 231 | int (*setup_func)(char *); 232 | int early; 233 | }; 234 | 235 | /* 236 | * Only for really core code. See moduleparam.h for the normal way. 237 | * 238 | * Force the alignment so the compiler doesn't space elements of the 239 | * obs_kernel_param "array" too far apart in .init.setup. 240 | */ 241 | #define __setup_param(str, unique_id, fn, early) \ 242 | static const char __setup_str_##unique_id[] __initconst \ 243 | __aligned(1) = str; \ 244 | static struct obs_kernel_param __setup_##unique_id \ 245 | __used __section(.init.setup) \ 246 | __attribute__((aligned((sizeof(long))))) \ 247 | = { __setup_str_##unique_id, fn, early } 248 | 249 | #define __setup(str, fn) \ 250 | __setup_param(str, fn, fn, 0) 251 | 252 | /* NOTE: fn is as per module_param, not __setup! Emits warning if fn 253 | * returns non-zero. */ 254 | #define early_param(str, fn) \ 255 | __setup_param(str, fn, fn, 1) 256 | 257 | /* Relies on boot_command_line being set */ 258 | void __init parse_early_param(void); 259 | void __init parse_early_options(char *cmdline); 260 | #endif /* __ASSEMBLY__ */ 261 | 262 | /** 263 | * module_init() - driver initialization entry point 264 | * @x: function to be run at kernel boot time or module insertion 265 | * 266 | * module_init() will either be called during do_initcalls() (if 267 | * builtin) or at module insertion time (if a module). There can only 268 | * be one per module. 269 | */ 270 | #define module_init(x) __initcall(x); 271 | 272 | /** 273 | * module_exit() - driver exit entry point 274 | * @x: function to be run when driver is removed 275 | * 276 | * module_exit() will wrap the driver clean-up code 277 | * with cleanup_module() when used with rmmod when 278 | * the driver is a module. If the driver is statically 279 | * compiled into the kernel, module_exit() has no effect. 280 | * There can only be one per module. 281 | */ 282 | #define module_exit(x) __exitcall(x); 283 | 284 | #else /* MODULE */ 285 | 286 | /* Don't use these in modules, but some people do... */ 287 | #define early_initcall(fn) module_init(fn) 288 | #define core_initcall(fn) module_init(fn) 289 | #define postcore_initcall(fn) module_init(fn) 290 | #define arch_initcall(fn) module_init(fn) 291 | #define subsys_initcall(fn) module_init(fn) 292 | #define fs_initcall(fn) module_init(fn) 293 | #define device_initcall(fn) module_init(fn) 294 | #define late_initcall(fn) module_init(fn) 295 | 296 | #define security_initcall(fn) module_init(fn) 297 | 298 | /* Each module must use one module_init(). */ 299 | #define module_init(initfn) \ 300 | static inline initcall_t __inittest(void) \ 301 | { return initfn; } \ 302 | int init_module(void) __attribute__((alias(#initfn))); 303 | 304 | /* This is only required if you want to be unloadable. */ 305 | #define module_exit(exitfn) \ 306 | static inline exitcall_t __exittest(void) \ 307 | { return exitfn; } \ 308 | void cleanup_module(void) __attribute__((alias(#exitfn))); 309 | 310 | #define __setup_param(str, unique_id, fn) /* nothing */ 311 | #define __setup(str, func) /* nothing */ 312 | #endif 313 | 314 | /* Data marked not to be saved by software suspend */ 315 | #define __nosavedata __section(.data..nosave) 316 | 317 | /* This means "can be init if no module support, otherwise module load 318 | may call it." */ 319 | #ifdef CONFIG_MODULES 320 | #define __init_or_module 321 | #define __initdata_or_module 322 | #define __initconst_or_module 323 | #define __INIT_OR_MODULE .text 324 | #define __INITDATA_OR_MODULE .data 325 | #define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits 326 | #else 327 | #define __init_or_module __init 328 | #define __initdata_or_module __initdata 329 | #define __initconst_or_module __initconst 330 | #define __INIT_OR_MODULE __INIT 331 | #define __INITDATA_OR_MODULE __INITDATA 332 | #define __INITRODATA_OR_MODULE __INITRODATA 333 | #endif /*CONFIG_MODULES*/ 334 | 335 | /* Functions marked as __devexit may be discarded at kernel link time, depending 336 | on config options. Newer versions of binutils detect references from 337 | retained sections to discarded sections and flag an error. Pointers to 338 | __devexit functions must use __devexit_p(function_name), the wrapper will 339 | insert either the function_name or NULL, depending on the config options. 340 | */ 341 | #if defined(MODULE) || defined(CONFIG_HOTPLUG) 342 | #define __devexit_p(x) x 343 | #else 344 | #define __devexit_p(x) NULL 345 | #endif 346 | 347 | #ifdef MODULE 348 | #define __exit_p(x) x 349 | #else 350 | #define __exit_p(x) NULL 351 | #endif 352 | 353 | #endif /* _LINUX_INIT_H */ 354 | -------------------------------------------------------------------------------- /include/linux/kernel.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef KVM__LINUX_KERNEL_H_ 3 | #define KVM__LINUX_KERNEL_H_ 4 | 5 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) 6 | 7 | #define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) 8 | #define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) 9 | 10 | #ifndef offsetof 11 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) 12 | #endif 13 | 14 | #ifndef container_of 15 | /** 16 | * container_of - cast a member of a structure out to the containing structure 17 | * @ptr: the pointer to the member. 18 | * @type: the type of the container struct this is embedded in. 19 | * @member: the name of the member within the struct. 20 | * 21 | */ 22 | #define container_of(ptr, type, member) ({ \ 23 | const typeof(((type *)0)->member) * __mptr = (ptr); \ 24 | (type *)((char *)__mptr - offsetof(type, member)); }) 25 | #endif 26 | 27 | #define min(x, y) ({ \ 28 | typeof(x) _min1 = (x); \ 29 | typeof(y) _min2 = (y); \ 30 | (void) (&_min1 == &_min2); \ 31 | _min1 < _min2 ? _min1 : _min2; }) 32 | 33 | #define max(x, y) ({ \ 34 | typeof(x) _max1 = (x); \ 35 | typeof(y) _max2 = (y); \ 36 | (void) (&_max1 == &_max2); \ 37 | _max1 > _max2 ? _max1 : _max2; }) 38 | 39 | #endif 40 | -------------------------------------------------------------------------------- /include/linux/list.h: -------------------------------------------------------------------------------- 1 | #ifndef _LINUX_LIST_H 2 | #define _LINUX_LIST_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | /* 10 | * Simple doubly linked list implementation. 11 | * 12 | * Some of the internal functions ("__xxx") are useful when 13 | * manipulating whole lists rather than single entries, as 14 | * sometimes we already know the next/prev entries and we can 15 | * generate better code by using them directly rather than 16 | * using the generic single-entry routines. 17 | */ 18 | 19 | #define LIST_HEAD_INIT(name) { &(name), &(name) } 20 | 21 | #define LIST_HEAD(name) \ 22 | struct list_head name = LIST_HEAD_INIT(name) 23 | 24 | static inline void INIT_LIST_HEAD(struct list_head *list) 25 | { 26 | list->next = list; 27 | list->prev = list; 28 | } 29 | 30 | /* 31 | * Insert a new entry between two known consecutive entries. 32 | * 33 | * This is only for internal list manipulation where we know 34 | * the prev/next entries already! 35 | */ 36 | #ifndef CONFIG_DEBUG_LIST 37 | static inline void __list_add(struct list_head *new, 38 | struct list_head *prev, 39 | struct list_head *next) 40 | { 41 | next->prev = new; 42 | new->next = next; 43 | new->prev = prev; 44 | prev->next = new; 45 | } 46 | #else 47 | extern void __list_add(struct list_head *new, 48 | struct list_head *prev, 49 | struct list_head *next); 50 | #endif 51 | 52 | /** 53 | * list_add - add a new entry 54 | * @new: new entry to be added 55 | * @head: list head to add it after 56 | * 57 | * Insert a new entry after the specified head. 58 | * This is good for implementing stacks. 59 | */ 60 | static inline void list_add(struct list_head *new, struct list_head *head) 61 | { 62 | __list_add(new, head, head->next); 63 | } 64 | 65 | 66 | /** 67 | * list_add_tail - add a new entry 68 | * @new: new entry to be added 69 | * @head: list head to add it before 70 | * 71 | * Insert a new entry before the specified head. 72 | * This is useful for implementing queues. 73 | */ 74 | static inline void list_add_tail(struct list_head *new, struct list_head *head) 75 | { 76 | __list_add(new, head->prev, head); 77 | } 78 | 79 | /* 80 | * Delete a list entry by making the prev/next entries 81 | * point to each other. 82 | * 83 | * This is only for internal list manipulation where we know 84 | * the prev/next entries already! 85 | */ 86 | static inline void __list_del(struct list_head * prev, struct list_head * next) 87 | { 88 | next->prev = prev; 89 | prev->next = next; 90 | } 91 | 92 | /** 93 | * list_del - deletes entry from list. 94 | * @entry: the element to delete from the list. 95 | * Note: list_empty() on entry does not return true after this, the entry is 96 | * in an undefined state. 97 | */ 98 | #ifndef CONFIG_DEBUG_LIST 99 | static inline void __list_del_entry(struct list_head *entry) 100 | { 101 | __list_del(entry->prev, entry->next); 102 | } 103 | 104 | static inline void list_del(struct list_head *entry) 105 | { 106 | __list_del(entry->prev, entry->next); 107 | entry->next = LIST_POISON1; 108 | entry->prev = LIST_POISON2; 109 | } 110 | #else 111 | extern void __list_del_entry(struct list_head *entry); 112 | extern void list_del(struct list_head *entry); 113 | #endif 114 | 115 | /** 116 | * list_replace - replace old entry by new one 117 | * @old : the element to be replaced 118 | * @new : the new element to insert 119 | * 120 | * If @old was empty, it will be overwritten. 121 | */ 122 | static inline void list_replace(struct list_head *old, 123 | struct list_head *new) 124 | { 125 | new->next = old->next; 126 | new->next->prev = new; 127 | new->prev = old->prev; 128 | new->prev->next = new; 129 | } 130 | 131 | static inline void list_replace_init(struct list_head *old, 132 | struct list_head *new) 133 | { 134 | list_replace(old, new); 135 | INIT_LIST_HEAD(old); 136 | } 137 | 138 | /** 139 | * list_del_init - deletes entry from list and reinitialize it. 140 | * @entry: the element to delete from the list. 141 | */ 142 | static inline void list_del_init(struct list_head *entry) 143 | { 144 | __list_del_entry(entry); 145 | INIT_LIST_HEAD(entry); 146 | } 147 | 148 | /** 149 | * list_move - delete from one list and add as another's head 150 | * @list: the entry to move 151 | * @head: the head that will precede our entry 152 | */ 153 | static inline void list_move(struct list_head *list, struct list_head *head) 154 | { 155 | __list_del_entry(list); 156 | list_add(list, head); 157 | } 158 | 159 | /** 160 | * list_move_tail - delete from one list and add as another's tail 161 | * @list: the entry to move 162 | * @head: the head that will follow our entry 163 | */ 164 | static inline void list_move_tail(struct list_head *list, 165 | struct list_head *head) 166 | { 167 | __list_del_entry(list); 168 | list_add_tail(list, head); 169 | } 170 | 171 | /** 172 | * list_is_last - tests whether @list is the last entry in list @head 173 | * @list: the entry to test 174 | * @head: the head of the list 175 | */ 176 | static inline int list_is_last(const struct list_head *list, 177 | const struct list_head *head) 178 | { 179 | return list->next == head; 180 | } 181 | 182 | /** 183 | * list_empty - tests whether a list is empty 184 | * @head: the list to test. 185 | */ 186 | static inline int list_empty(const struct list_head *head) 187 | { 188 | return head->next == head; 189 | } 190 | 191 | /** 192 | * list_empty_careful - tests whether a list is empty and not being modified 193 | * @head: the list to test 194 | * 195 | * Description: 196 | * tests whether a list is empty _and_ checks that no other CPU might be 197 | * in the process of modifying either member (next or prev) 198 | * 199 | * NOTE: using list_empty_careful() without synchronization 200 | * can only be safe if the only activity that can happen 201 | * to the list entry is list_del_init(). Eg. it cannot be used 202 | * if another CPU could re-list_add() it. 203 | */ 204 | static inline int list_empty_careful(const struct list_head *head) 205 | { 206 | struct list_head *next = head->next; 207 | return (next == head) && (next == head->prev); 208 | } 209 | 210 | /** 211 | * list_rotate_left - rotate the list to the left 212 | * @head: the head of the list 213 | */ 214 | static inline void list_rotate_left(struct list_head *head) 215 | { 216 | struct list_head *first; 217 | 218 | if (!list_empty(head)) { 219 | first = head->next; 220 | list_move_tail(first, head); 221 | } 222 | } 223 | 224 | /** 225 | * list_is_singular - tests whether a list has just one entry. 226 | * @head: the list to test. 227 | */ 228 | static inline int list_is_singular(const struct list_head *head) 229 | { 230 | return !list_empty(head) && (head->next == head->prev); 231 | } 232 | 233 | static inline void __list_cut_position(struct list_head *list, 234 | struct list_head *head, struct list_head *entry) 235 | { 236 | struct list_head *new_first = entry->next; 237 | list->next = head->next; 238 | list->next->prev = list; 239 | list->prev = entry; 240 | entry->next = list; 241 | head->next = new_first; 242 | new_first->prev = head; 243 | } 244 | 245 | /** 246 | * list_cut_position - cut a list into two 247 | * @list: a new list to add all removed entries 248 | * @head: a list with entries 249 | * @entry: an entry within head, could be the head itself 250 | * and if so we won't cut the list 251 | * 252 | * This helper moves the initial part of @head, up to and 253 | * including @entry, from @head to @list. You should 254 | * pass on @entry an element you know is on @head. @list 255 | * should be an empty list or a list you do not care about 256 | * losing its data. 257 | * 258 | */ 259 | static inline void list_cut_position(struct list_head *list, 260 | struct list_head *head, struct list_head *entry) 261 | { 262 | if (list_empty(head)) 263 | return; 264 | if (list_is_singular(head) && 265 | (head->next != entry && head != entry)) 266 | return; 267 | if (entry == head) 268 | INIT_LIST_HEAD(list); 269 | else 270 | __list_cut_position(list, head, entry); 271 | } 272 | 273 | static inline void __list_splice(const struct list_head *list, 274 | struct list_head *prev, 275 | struct list_head *next) 276 | { 277 | struct list_head *first = list->next; 278 | struct list_head *last = list->prev; 279 | 280 | first->prev = prev; 281 | prev->next = first; 282 | 283 | last->next = next; 284 | next->prev = last; 285 | } 286 | 287 | /** 288 | * list_splice - join two lists, this is designed for stacks 289 | * @list: the new list to add. 290 | * @head: the place to add it in the first list. 291 | */ 292 | static inline void list_splice(const struct list_head *list, 293 | struct list_head *head) 294 | { 295 | if (!list_empty(list)) 296 | __list_splice(list, head, head->next); 297 | } 298 | 299 | /** 300 | * list_splice_tail - join two lists, each list being a queue 301 | * @list: the new list to add. 302 | * @head: the place to add it in the first list. 303 | */ 304 | static inline void list_splice_tail(struct list_head *list, 305 | struct list_head *head) 306 | { 307 | if (!list_empty(list)) 308 | __list_splice(list, head->prev, head); 309 | } 310 | 311 | /** 312 | * list_splice_init - join two lists and reinitialise the emptied list. 313 | * @list: the new list to add. 314 | * @head: the place to add it in the first list. 315 | * 316 | * The list at @list is reinitialised 317 | */ 318 | static inline void list_splice_init(struct list_head *list, 319 | struct list_head *head) 320 | { 321 | if (!list_empty(list)) { 322 | __list_splice(list, head, head->next); 323 | INIT_LIST_HEAD(list); 324 | } 325 | } 326 | 327 | /** 328 | * list_splice_tail_init - join two lists and reinitialise the emptied list 329 | * @list: the new list to add. 330 | * @head: the place to add it in the first list. 331 | * 332 | * Each of the lists is a queue. 333 | * The list at @list is reinitialised 334 | */ 335 | static inline void list_splice_tail_init(struct list_head *list, 336 | struct list_head *head) 337 | { 338 | if (!list_empty(list)) { 339 | __list_splice(list, head->prev, head); 340 | INIT_LIST_HEAD(list); 341 | } 342 | } 343 | 344 | /** 345 | * list_entry - get the struct for this entry 346 | * @ptr: the &struct list_head pointer. 347 | * @type: the type of the struct this is embedded in. 348 | * @member: the name of the list_struct within the struct. 349 | */ 350 | #define list_entry(ptr, type, member) \ 351 | container_of(ptr, type, member) 352 | 353 | /** 354 | * list_first_entry - get the first element from a list 355 | * @ptr: the list head to take the element from. 356 | * @type: the type of the struct this is embedded in. 357 | * @member: the name of the list_struct within the struct. 358 | * 359 | * Note, that list is expected to be not empty. 360 | */ 361 | #define list_first_entry(ptr, type, member) \ 362 | list_entry((ptr)->next, type, member) 363 | 364 | /** 365 | * list_for_each - iterate over a list 366 | * @pos: the &struct list_head to use as a loop cursor. 367 | * @head: the head for your list. 368 | */ 369 | #define list_for_each(pos, head) \ 370 | for (pos = (head)->next; pos != (head); pos = pos->next) 371 | 372 | /** 373 | * __list_for_each - iterate over a list 374 | * @pos: the &struct list_head to use as a loop cursor. 375 | * @head: the head for your list. 376 | * 377 | * This variant doesn't differ from list_for_each() any more. 378 | * We don't do prefetching in either case. 379 | */ 380 | #define __list_for_each(pos, head) \ 381 | for (pos = (head)->next; pos != (head); pos = pos->next) 382 | 383 | /** 384 | * list_for_each_prev - iterate over a list backwards 385 | * @pos: the &struct list_head to use as a loop cursor. 386 | * @head: the head for your list. 387 | */ 388 | #define list_for_each_prev(pos, head) \ 389 | for (pos = (head)->prev; pos != (head); pos = pos->prev) 390 | 391 | /** 392 | * list_for_each_safe - iterate over a list safe against removal of list entry 393 | * @pos: the &struct list_head to use as a loop cursor. 394 | * @n: another &struct list_head to use as temporary storage 395 | * @head: the head for your list. 396 | */ 397 | #define list_for_each_safe(pos, n, head) \ 398 | for (pos = (head)->next, n = pos->next; pos != (head); \ 399 | pos = n, n = pos->next) 400 | 401 | /** 402 | * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry 403 | * @pos: the &struct list_head to use as a loop cursor. 404 | * @n: another &struct list_head to use as temporary storage 405 | * @head: the head for your list. 406 | */ 407 | #define list_for_each_prev_safe(pos, n, head) \ 408 | for (pos = (head)->prev, n = pos->prev; \ 409 | pos != (head); \ 410 | pos = n, n = pos->prev) 411 | 412 | /** 413 | * list_for_each_entry - iterate over list of given type 414 | * @pos: the type * to use as a loop cursor. 415 | * @head: the head for your list. 416 | * @member: the name of the list_struct within the struct. 417 | */ 418 | #define list_for_each_entry(pos, head, member) \ 419 | for (pos = list_entry((head)->next, typeof(*pos), member); \ 420 | &pos->member != (head); \ 421 | pos = list_entry(pos->member.next, typeof(*pos), member)) 422 | 423 | /** 424 | * list_for_each_entry_reverse - iterate backwards over list of given type. 425 | * @pos: the type * to use as a loop cursor. 426 | * @head: the head for your list. 427 | * @member: the name of the list_struct within the struct. 428 | */ 429 | #define list_for_each_entry_reverse(pos, head, member) \ 430 | for (pos = list_entry((head)->prev, typeof(*pos), member); \ 431 | &pos->member != (head); \ 432 | pos = list_entry(pos->member.prev, typeof(*pos), member)) 433 | 434 | /** 435 | * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() 436 | * @pos: the type * to use as a start point 437 | * @head: the head of the list 438 | * @member: the name of the list_struct within the struct. 439 | * 440 | * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). 441 | */ 442 | #define list_prepare_entry(pos, head, member) \ 443 | ((pos) ? : list_entry(head, typeof(*pos), member)) 444 | 445 | /** 446 | * list_for_each_entry_continue - continue iteration over list of given type 447 | * @pos: the type * to use as a loop cursor. 448 | * @head: the head for your list. 449 | * @member: the name of the list_struct within the struct. 450 | * 451 | * Continue to iterate over list of given type, continuing after 452 | * the current position. 453 | */ 454 | #define list_for_each_entry_continue(pos, head, member) \ 455 | for (pos = list_entry(pos->member.next, typeof(*pos), member); \ 456 | &pos->member != (head); \ 457 | pos = list_entry(pos->member.next, typeof(*pos), member)) 458 | 459 | /** 460 | * list_for_each_entry_continue_reverse - iterate backwards from the given point 461 | * @pos: the type * to use as a loop cursor. 462 | * @head: the head for your list. 463 | * @member: the name of the list_struct within the struct. 464 | * 465 | * Start to iterate over list of given type backwards, continuing after 466 | * the current position. 467 | */ 468 | #define list_for_each_entry_continue_reverse(pos, head, member) \ 469 | for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ 470 | &pos->member != (head); \ 471 | pos = list_entry(pos->member.prev, typeof(*pos), member)) 472 | 473 | /** 474 | * list_for_each_entry_from - iterate over list of given type from the current point 475 | * @pos: the type * to use as a loop cursor. 476 | * @head: the head for your list. 477 | * @member: the name of the list_struct within the struct. 478 | * 479 | * Iterate over list of given type, continuing from current position. 480 | */ 481 | #define list_for_each_entry_from(pos, head, member) \ 482 | for (; &pos->member != (head); \ 483 | pos = list_entry(pos->member.next, typeof(*pos), member)) 484 | 485 | /** 486 | * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry 487 | * @pos: the type * to use as a loop cursor. 488 | * @n: another type * to use as temporary storage 489 | * @head: the head for your list. 490 | * @member: the name of the list_struct within the struct. 491 | */ 492 | #define list_for_each_entry_safe(pos, n, head, member) \ 493 | for (pos = list_entry((head)->next, typeof(*pos), member), \ 494 | n = list_entry(pos->member.next, typeof(*pos), member); \ 495 | &pos->member != (head); \ 496 | pos = n, n = list_entry(n->member.next, typeof(*n), member)) 497 | 498 | /** 499 | * list_for_each_entry_safe_continue - continue list iteration safe against removal 500 | * @pos: the type * to use as a loop cursor. 501 | * @n: another type * to use as temporary storage 502 | * @head: the head for your list. 503 | * @member: the name of the list_struct within the struct. 504 | * 505 | * Iterate over list of given type, continuing after current point, 506 | * safe against removal of list entry. 507 | */ 508 | #define list_for_each_entry_safe_continue(pos, n, head, member) \ 509 | for (pos = list_entry(pos->member.next, typeof(*pos), member), \ 510 | n = list_entry(pos->member.next, typeof(*pos), member); \ 511 | &pos->member != (head); \ 512 | pos = n, n = list_entry(n->member.next, typeof(*n), member)) 513 | 514 | /** 515 | * list_for_each_entry_safe_from - iterate over list from current point safe against removal 516 | * @pos: the type * to use as a loop cursor. 517 | * @n: another type * to use as temporary storage 518 | * @head: the head for your list. 519 | * @member: the name of the list_struct within the struct. 520 | * 521 | * Iterate over list of given type from current point, safe against 522 | * removal of list entry. 523 | */ 524 | #define list_for_each_entry_safe_from(pos, n, head, member) \ 525 | for (n = list_entry(pos->member.next, typeof(*pos), member); \ 526 | &pos->member != (head); \ 527 | pos = n, n = list_entry(n->member.next, typeof(*n), member)) 528 | 529 | /** 530 | * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal 531 | * @pos: the type * to use as a loop cursor. 532 | * @n: another type * to use as temporary storage 533 | * @head: the head for your list. 534 | * @member: the name of the list_struct within the struct. 535 | * 536 | * Iterate backwards over list of given type, safe against removal 537 | * of list entry. 538 | */ 539 | #define list_for_each_entry_safe_reverse(pos, n, head, member) \ 540 | for (pos = list_entry((head)->prev, typeof(*pos), member), \ 541 | n = list_entry(pos->member.prev, typeof(*pos), member); \ 542 | &pos->member != (head); \ 543 | pos = n, n = list_entry(n->member.prev, typeof(*n), member)) 544 | 545 | /** 546 | * list_safe_reset_next - reset a stale list_for_each_entry_safe loop 547 | * @pos: the loop cursor used in the list_for_each_entry_safe loop 548 | * @n: temporary storage used in list_for_each_entry_safe 549 | * @member: the name of the list_struct within the struct. 550 | * 551 | * list_safe_reset_next is not safe to use in general if the list may be 552 | * modified concurrently (eg. the lock is dropped in the loop body). An 553 | * exception to this is if the cursor element (pos) is pinned in the list, 554 | * and list_safe_reset_next is called after re-taking the lock and before 555 | * completing the current iteration of the loop body. 556 | */ 557 | #define list_safe_reset_next(pos, n, member) \ 558 | n = list_entry(pos->member.next, typeof(*pos), member) 559 | 560 | /* 561 | * Double linked lists with a single pointer list head. 562 | * Mostly useful for hash tables where the two pointer list head is 563 | * too wasteful. 564 | * You lose the ability to access the tail in O(1). 565 | */ 566 | 567 | #define HLIST_HEAD_INIT { .first = NULL } 568 | #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } 569 | #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) 570 | static inline void INIT_HLIST_NODE(struct hlist_node *h) 571 | { 572 | h->next = NULL; 573 | h->pprev = NULL; 574 | } 575 | 576 | static inline int hlist_unhashed(const struct hlist_node *h) 577 | { 578 | return !h->pprev; 579 | } 580 | 581 | static inline int hlist_empty(const struct hlist_head *h) 582 | { 583 | return !h->first; 584 | } 585 | 586 | static inline void __hlist_del(struct hlist_node *n) 587 | { 588 | struct hlist_node *next = n->next; 589 | struct hlist_node **pprev = n->pprev; 590 | *pprev = next; 591 | if (next) 592 | next->pprev = pprev; 593 | } 594 | 595 | static inline void hlist_del(struct hlist_node *n) 596 | { 597 | __hlist_del(n); 598 | n->next = LIST_POISON1; 599 | n->pprev = LIST_POISON2; 600 | } 601 | 602 | static inline void hlist_del_init(struct hlist_node *n) 603 | { 604 | if (!hlist_unhashed(n)) { 605 | __hlist_del(n); 606 | INIT_HLIST_NODE(n); 607 | } 608 | } 609 | 610 | static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) 611 | { 612 | struct hlist_node *first = h->first; 613 | n->next = first; 614 | if (first) 615 | first->pprev = &n->next; 616 | h->first = n; 617 | n->pprev = &h->first; 618 | } 619 | 620 | /* next must be != NULL */ 621 | static inline void hlist_add_before(struct hlist_node *n, 622 | struct hlist_node *next) 623 | { 624 | n->pprev = next->pprev; 625 | n->next = next; 626 | next->pprev = &n->next; 627 | *(n->pprev) = n; 628 | } 629 | 630 | static inline void hlist_add_after(struct hlist_node *n, 631 | struct hlist_node *next) 632 | { 633 | next->next = n->next; 634 | n->next = next; 635 | next->pprev = &n->next; 636 | 637 | if(next->next) 638 | next->next->pprev = &next->next; 639 | } 640 | 641 | /* after that we'll appear to be on some hlist and hlist_del will work */ 642 | static inline void hlist_add_fake(struct hlist_node *n) 643 | { 644 | n->pprev = &n->next; 645 | } 646 | 647 | /* 648 | * Move a list from one list head to another. Fixup the pprev 649 | * reference of the first entry if it exists. 650 | */ 651 | static inline void hlist_move_list(struct hlist_head *old, 652 | struct hlist_head *new) 653 | { 654 | new->first = old->first; 655 | if (new->first) 656 | new->first->pprev = &new->first; 657 | old->first = NULL; 658 | } 659 | 660 | #define hlist_entry(ptr, type, member) container_of(ptr,type,member) 661 | 662 | #define hlist_for_each(pos, head) \ 663 | for (pos = (head)->first; pos ; pos = pos->next) 664 | 665 | #define hlist_for_each_safe(pos, n, head) \ 666 | for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ 667 | pos = n) 668 | 669 | /** 670 | * hlist_for_each_entry - iterate over list of given type 671 | * @tpos: the type * to use as a loop cursor. 672 | * @pos: the &struct hlist_node to use as a loop cursor. 673 | * @head: the head for your list. 674 | * @member: the name of the hlist_node within the struct. 675 | */ 676 | #define hlist_for_each_entry(tpos, pos, head, member) \ 677 | for (pos = (head)->first; \ 678 | pos && \ 679 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 680 | pos = pos->next) 681 | 682 | /** 683 | * hlist_for_each_entry_continue - iterate over a hlist continuing after current point 684 | * @tpos: the type * to use as a loop cursor. 685 | * @pos: the &struct hlist_node to use as a loop cursor. 686 | * @member: the name of the hlist_node within the struct. 687 | */ 688 | #define hlist_for_each_entry_continue(tpos, pos, member) \ 689 | for (pos = (pos)->next; \ 690 | pos && \ 691 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 692 | pos = pos->next) 693 | 694 | /** 695 | * hlist_for_each_entry_from - iterate over a hlist continuing from current point 696 | * @tpos: the type * to use as a loop cursor. 697 | * @pos: the &struct hlist_node to use as a loop cursor. 698 | * @member: the name of the hlist_node within the struct. 699 | */ 700 | #define hlist_for_each_entry_from(tpos, pos, member) \ 701 | for (; pos && \ 702 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 703 | pos = pos->next) 704 | 705 | /** 706 | * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry 707 | * @tpos: the type * to use as a loop cursor. 708 | * @pos: the &struct hlist_node to use as a loop cursor. 709 | * @n: another &struct hlist_node to use as temporary storage 710 | * @head: the head for your list. 711 | * @member: the name of the hlist_node within the struct. 712 | */ 713 | #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ 714 | for (pos = (head)->first; \ 715 | pos && ({ n = pos->next; 1; }) && \ 716 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 717 | pos = n) 718 | 719 | #endif 720 | -------------------------------------------------------------------------------- /include/linux/module.h: -------------------------------------------------------------------------------- 1 | #ifndef KVM__LINUX_MODULE_H 2 | #define KVM__LINUX_MODULE_H 3 | 4 | #define EXPORT_SYMBOL(name) 5 | 6 | #endif 7 | -------------------------------------------------------------------------------- /include/linux/poison.h: -------------------------------------------------------------------------------- 1 | #ifndef _LINUX_POISON_H 2 | #define _LINUX_POISON_H 3 | 4 | /********** include/linux/list.h **********/ 5 | 6 | /* 7 | * Architectures might want to move the poison pointer offset 8 | * into some well-recognized area such as 0xdead000000000000, 9 | * that is also not mappable by user-space exploits: 10 | */ 11 | #ifdef CONFIG_ILLEGAL_POINTER_VALUE 12 | # define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL) 13 | #else 14 | # define POISON_POINTER_DELTA 0 15 | #endif 16 | 17 | /* 18 | * These are non-NULL pointers that will result in page faults 19 | * under normal circumstances, used to verify that nobody uses 20 | * non-initialized list entries. 21 | */ 22 | #define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA) 23 | #define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA) 24 | 25 | /********** include/linux/timer.h **********/ 26 | /* 27 | * Magic number "tsta" to indicate a static timer initializer 28 | * for the object debugging code. 29 | */ 30 | #define TIMER_ENTRY_STATIC ((void *) 0x74737461) 31 | 32 | /********** mm/debug-pagealloc.c **********/ 33 | #define PAGE_POISON 0xaa 34 | 35 | /********** mm/slab.c **********/ 36 | /* 37 | * Magic nums for obj red zoning. 38 | * Placed in the first word before and the first word after an obj. 39 | */ 40 | #define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */ 41 | #define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */ 42 | 43 | #ifdef CONFIG_PHYS_ADDR_T_64BIT 44 | #define MEMBLOCK_INACTIVE 0x3a84fb0144c9e71bULL 45 | #else 46 | #define MEMBLOCK_INACTIVE 0x44c9e71bUL 47 | #endif 48 | 49 | #define SLUB_RED_INACTIVE 0xbb 50 | #define SLUB_RED_ACTIVE 0xcc 51 | 52 | /* ...and for poisoning */ 53 | #define POISON_INUSE 0x5a /* for use-uninitialised poisoning */ 54 | #define POISON_FREE 0x6b /* for use-after-free poisoning */ 55 | #define POISON_END 0xa5 /* end-byte of poisoning */ 56 | 57 | /********** arch/$ARCH/mm/init.c **********/ 58 | #define POISON_FREE_INITMEM 0xcc 59 | 60 | /********** arch/ia64/hp/common/sba_iommu.c **********/ 61 | /* 62 | * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a 63 | * value of "SBAIOMMU POISON\0" for spill-over poisoning. 64 | */ 65 | 66 | /********** fs/jbd/journal.c **********/ 67 | #define JBD_POISON_FREE 0x5b 68 | #define JBD2_POISON_FREE 0x5c 69 | 70 | /********** drivers/base/dmapool.c **********/ 71 | #define POOL_POISON_FREED 0xa7 /* !inuse */ 72 | #define POOL_POISON_ALLOCATED 0xa9 /* !initted */ 73 | 74 | /********** drivers/atm/ **********/ 75 | #define ATM_POISON_FREE 0x12 76 | #define ATM_POISON 0xdeadbeef 77 | 78 | /********** net/ **********/ 79 | #define NEIGHBOR_DEAD 0xdeadbeef 80 | #define NETFILTER_LINK_POISON 0xdead57ac 81 | 82 | /********** kernel/mutexes **********/ 83 | #define MUTEX_DEBUG_INIT 0x11 84 | #define MUTEX_DEBUG_FREE 0x22 85 | 86 | /********** lib/flex_array.c **********/ 87 | #define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */ 88 | 89 | /********** security/ **********/ 90 | #define KEY_DESTROY 0xbd 91 | 92 | /********** sound/oss/ **********/ 93 | #define OSS_POISON_FREE 0xAB 94 | 95 | #endif 96 | -------------------------------------------------------------------------------- /include/linux/prefetch.h: -------------------------------------------------------------------------------- 1 | #ifndef KVM__LINUX_PREFETCH_H 2 | #define KVM__LINUX_PREFETCH_H 3 | 4 | static inline void prefetch(void *a __attribute__((unused))) { } 5 | 6 | #endif 7 | -------------------------------------------------------------------------------- /include/linux/types.h: -------------------------------------------------------------------------------- 1 | #ifndef LINUX_TYPES_H 2 | #define LINUX_TYPES_H 3 | 4 | #include 5 | 6 | typedef __u64 u64; 7 | typedef __s64 s64; 8 | 9 | typedef __u32 u32; 10 | typedef __s32 s32; 11 | 12 | typedef __u16 u16; 13 | typedef __s16 s16; 14 | 15 | typedef __u8 u8; 16 | typedef __s8 s8; 17 | 18 | #ifdef __CHECKER__ 19 | #define __bitwise__ __attribute__((bitwise)) 20 | #else 21 | #define __bitwise__ 22 | #endif 23 | #ifdef __CHECK_ENDIAN__ 24 | #define __bitwise __bitwise__ 25 | #else 26 | #define __bitwise 27 | #endif 28 | 29 | 30 | typedef __u16 __bitwise __le16; 31 | typedef __u16 __bitwise __be16; 32 | typedef __u32 __bitwise __le32; 33 | typedef __u32 __bitwise __be32; 34 | typedef __u64 __bitwise __le64; 35 | typedef __u64 __bitwise __be64; 36 | 37 | struct list_head { 38 | struct list_head *next, *prev; 39 | }; 40 | 41 | struct hlist_head { 42 | struct hlist_node *first; 43 | }; 44 | 45 | struct hlist_node { 46 | struct hlist_node *next, **pprev; 47 | }; 48 | 49 | #endif /* LINUX_TYPES_H */ 50 | -------------------------------------------------------------------------------- /include/virtio.h: -------------------------------------------------------------------------------- 1 | #ifndef VIRTIO_H_ 2 | #define VIRTIO_H_ 3 | 4 | #include 5 | #include 6 | 7 | #define mb() asm volatile ("" : : : "memory") 8 | 9 | /** 10 | * virtqueue - a queue to register buffers for sending or receiving. 11 | * @list: the chain of virtqueues for this device 12 | * @callback: the function to call when buffers are consumed (can be NULL). 13 | * @name: the name of this virtqueue (mainly for debugging) 14 | * @vdev: the virtio device this queue was created for. 15 | * @priv: a pointer for the virtqueue implementation to use. 16 | */ 17 | struct virtqueue { 18 | struct list_head list; 19 | void (*callback)(struct virtqueue *vq); 20 | const char *name; 21 | struct virtio_device *vdev; 22 | void *priv; 23 | }; 24 | 25 | /** 26 | * virtio_device - representation of a device using virtio 27 | * @index: unique position on the virtio bus 28 | * @dev: underlying device. 29 | * @id: the device type identification (used to match it with a driver). 30 | * @config: the configuration ops for this device. 31 | * @vqs: the list of virtqueues for this device. 32 | * @features: the features supported by both driver and device. 33 | * @priv: private pointer for the driver's use. 34 | */ 35 | struct virtio_device { 36 | int index; 37 | struct virtio_config_ops *config; 38 | struct list_head vqs; 39 | /* Note that this is a Linux set_bit-style bitmap. */ 40 | unsigned long features[1]; 41 | void *priv; 42 | }; 43 | 44 | struct vring_virtqueue 45 | { 46 | struct virtqueue vq; 47 | 48 | /* Actual memory layout for this queue */ 49 | struct vring vring; 50 | 51 | /* Other side has made a mess, don't try any more. */ 52 | bool broken; 53 | 54 | /* Host supports indirect buffers */ 55 | bool indirect; 56 | 57 | /* Host publishes avail event idx */ 58 | bool event; 59 | 60 | /* Number of free buffers */ 61 | unsigned int num_free; 62 | /* Head of free buffer list. */ 63 | unsigned int free_head; 64 | /* Number we've added since last sync. */ 65 | unsigned int num_added; 66 | 67 | /* Last used index we've seen. */ 68 | u16 last_used_idx; 69 | 70 | /* Last available index we've seen. */ 71 | u16 last_avail_idx; 72 | 73 | /* How to notify other side. FIXME: commonalize hcalls! */ 74 | void (*notify)(struct virtqueue *vq); 75 | 76 | #ifdef DEBUG 77 | /* They're supposed to lock for us. */ 78 | unsigned int in_use; 79 | #endif 80 | 81 | /* Tokens for callbacks. */ 82 | void *data[]; 83 | }; 84 | 85 | #endif -------------------------------------------------------------------------------- /include/virtio_client.h: -------------------------------------------------------------------------------- 1 | #ifndef VIRTIO_CLIENT_H_ 2 | #define VIRTIO_CLIENT_H_ 3 | 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | #include "virtio_server.h" 11 | 12 | static inline u16 virt_queue__pop(struct vring_virtqueue *vq) 13 | { 14 | return vq->vring.avail->ring[vq->last_avail_idx++ % vq->vring.num]; 15 | } 16 | 17 | static inline struct vring_desc *virt_queue__get_desc(struct vring_virtqueue *vq, u16 desc_ndx) 18 | { 19 | return &vq->vring.desc[desc_ndx]; 20 | } 21 | 22 | static inline bool virt_queue__available(struct vring_virtqueue *vq) 23 | { 24 | if (!vq->vring.avail) 25 | return 0; 26 | return vq->vring.avail->idx != vq->last_avail_idx; 27 | } 28 | 29 | struct vring_used_elem *virt_queue__set_used_elem(struct vring_virtqueue *vq, u32 head, u32 len); 30 | 31 | u16 virt_queue__get_iov(struct vring_virtqueue *vq, struct iovec iov[], u16 *out, u16 *in); 32 | u16 virt_queue__get_inout_iov(struct vring_virtqueue *vq, struct iovec in_iov[], 33 | struct iovec out_iov[], u16 *in, u16 *out); 34 | int virtio__get_dev_specific_field(int offset, bool msix, bool features_hi, u32 *config_off); 35 | 36 | #endif -------------------------------------------------------------------------------- /include/virtio_server.h: -------------------------------------------------------------------------------- 1 | #ifndef VIRTIO_SERVER_H_ 2 | #define VIRTIO_SERVER_H_ 3 | 4 | #include "virtio.h" 5 | 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) 14 | 15 | /** 16 | * operations for virtqueue 17 | * virtqueue_add_buf: expose buffer to other end 18 | * vq: the struct virtqueue we're talking about. 19 | * sg: the description of the buffer(s). 20 | * out_num: the number of sg readable by other side 21 | * in_num: the number of sg which are writable (after readable ones) 22 | * data: the token identifying the buffer. 23 | * gfp: how to do memory allocations (if necessary). 24 | * Returns remaining capacity of queue (sg segments) or a negative error. 25 | * virtqueue_kick: update after add_buf 26 | * vq: the struct virtqueue 27 | * After one or more add_buf calls, invoke this to kick the other side. 28 | * virtqueue_get_buf: get the next used buffer 29 | * vq: the struct virtqueue we're talking about. 30 | * len: the length written into the buffer 31 | * Returns NULL or the "data" token handed to add_buf. 32 | * virtqueue_disable_cb: disable callbacks 33 | * vq: the struct virtqueue we're talking about. 34 | * Note that this is not necessarily synchronous, hence unreliable and only 35 | * useful as an optimization. 36 | * virtqueue_enable_cb: restart callbacks after disable_cb. 37 | * vq: the struct virtqueue we're talking about. 38 | * This re-enables callbacks; it returns "false" if there are pending 39 | * buffers in the queue, to detect a possible race between the driver 40 | * checking for more work, and enabling callbacks. 41 | * virtqueue_enable_cb_delayed: restart callbacks after disable_cb. 42 | * vq: the struct virtqueue we're talking about. 43 | * This re-enables callbacks but hints to the other side to delay 44 | * interrupts until most of the available buffers have been processed; 45 | * it returns "false" if there are many pending buffers in the queue, 46 | * to detect a possible race between the driver checking for more work, 47 | * and enabling callbacks. 48 | * virtqueue_detach_unused_buf: detach first unused buffer 49 | * vq: the struct virtqueue we're talking about. 50 | * Returns NULL or the "data" token handed to add_buf 51 | * virtqueue_get_vring_size: return the size of the virtqueue's vring 52 | * vq: the struct virtqueue containing the vring of interest. 53 | * Returns the size of the vring. 54 | * 55 | * Locking rules are straightforward: the driver is responsible for 56 | * locking. No two operations may be invoked simultaneously, with the exception 57 | * of virtqueue_disable_cb. 58 | * 59 | * All operations can be called in any context. 60 | */ 61 | 62 | struct virtqueue *vring_new_virtqueue(unsigned int num, 63 | unsigned int vring_align, 64 | struct virtio_device *vdev, 65 | void *vq_addr, 66 | void *pages, 67 | void (*notify)(struct virtqueue *), 68 | void (*callback)(struct virtqueue *), 69 | const char *name); 70 | 71 | int virtqueue_add_buf(struct virtqueue *_vq, 72 | struct iovec sg[], 73 | unsigned int out, 74 | unsigned int in, 75 | void *data); 76 | 77 | void virtqueue_kick(struct virtqueue *_vq); 78 | 79 | void virtqueue_kick(struct virtqueue *vq); 80 | 81 | void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); 82 | 83 | void virtqueue_disable_cb(struct virtqueue *vq); 84 | 85 | bool virtqueue_enable_cb(struct virtqueue *vq); 86 | 87 | bool virtqueue_enable_cb_delayed(struct virtqueue *vq); 88 | 89 | void *virtqueue_detach_unused_buf(struct virtqueue *vq); 90 | 91 | unsigned int virtqueue_get_vring_size(struct virtqueue *vq); 92 | 93 | #endif -------------------------------------------------------------------------------- /main.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | int main(int argc, char **argv) 14 | { 15 | int fd = shm_open("virtio_shm", O_CREAT | O_RDWR, 0777); 16 | void *shm, *vq_ptr, *data_ptr; 17 | struct iovec iov[2]; 18 | struct virtio_device vdev = {0}; 19 | struct virtqueue *vq; 20 | u16 out, in, i; 21 | 22 | ftruncate(fd, 131072); 23 | shm = mmap(NULL, 131072, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 24 | vq_ptr = shm; 25 | data_ptr = shm + 32768; 26 | 27 | iov[0] = (struct iovec) { 28 | .iov_base = data_ptr, 29 | .iov_len = 4, 30 | }; 31 | iov[1] = (struct iovec) { 32 | .iov_base = data_ptr + 4, 33 | .iov_len = 6, 34 | }; 35 | 36 | strncpy(data_ptr, "a1b2c3d4e5", 10); 37 | printf("Input: %s\n", (char *)data_ptr); 38 | 39 | INIT_LIST_HEAD(&vdev.vqs); 40 | vq = vring_new_virtqueue(16, 4096, &vdev, vq_ptr, vq_ptr + 1024, NULL, NULL, "test"); 41 | if (virt_queue__available(to_vvq(vq))) 42 | printf("No buffers available - OK!\n"); 43 | 44 | virtqueue_add_buf(vq, iov, 2, 0, NULL); 45 | virtqueue_kick(vq); 46 | if (virt_queue__available(to_vvq(vq))) 47 | printf("Buffers available - OK!\n"); 48 | 49 | virt_queue__get_iov(to_vvq(vq), iov, &out, &in); 50 | 51 | printf("Output: "); 52 | for (i = 0; i < out; i++) { 53 | printf("%.*s", (int)iov[i].iov_len, (char*)iov[i].iov_base); 54 | fflush(stdout); 55 | } 56 | printf("\n"); 57 | 58 | return 0; 59 | } 60 | -------------------------------------------------------------------------------- /virtio_client.c: -------------------------------------------------------------------------------- 1 | #include "virtio_client.h" 2 | 3 | struct vring_used_elem *virt_queue__set_used_elem(struct vring_virtqueue *vq, u32 head, u32 len) 4 | { 5 | struct vring_used_elem *used_elem; 6 | 7 | used_elem = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num]; 8 | used_elem->id = head; 9 | used_elem->len = len; 10 | 11 | /* 12 | * Use wmb to assure that used elem was updated with head and len. 13 | * We need a wmb here since we can't advance idx unless we're ready 14 | * to pass the used element to the guest. 15 | */ 16 | mb(); 17 | vq->vring.used->idx++; 18 | 19 | /* 20 | * Use wmb to assure used idx has been increased before we signal the guest. 21 | * Without a wmb here the guest may ignore the queue since it won't see 22 | * an updated idx. 23 | */ 24 | mb(); 25 | 26 | return used_elem; 27 | } 28 | 29 | u16 virt_queue__get_iov(struct vring_virtqueue *vq, struct iovec iov[], u16 *out, u16 *in) 30 | { 31 | struct vring_desc *desc; 32 | u16 head, idx; 33 | 34 | idx = head = virt_queue__pop(vq); 35 | *out = *in = 0; 36 | 37 | do { 38 | desc = virt_queue__get_desc(vq, idx); 39 | iov[*out + *in].iov_base = (void *)desc->addr; 40 | iov[*out + *in].iov_len = desc->len; 41 | if (desc->flags & VRING_DESC_F_WRITE) 42 | (*in)++; 43 | else 44 | (*out)++; 45 | if (desc->flags & VRING_DESC_F_NEXT) 46 | idx = desc->next; 47 | else 48 | break; 49 | } while (1); 50 | 51 | return head; 52 | } 53 | 54 | /* in and out are relative to guest */ 55 | u16 virt_queue__get_inout_iov(struct vring_virtqueue *vq, struct iovec in_iov[], 56 | struct iovec out_iov[], u16 *in, u16 *out) 57 | { 58 | u16 head, idx; 59 | struct vring_desc *desc; 60 | 61 | idx = head = virt_queue__pop(vq); 62 | *out = *in = 0; 63 | do { 64 | desc = virt_queue__get_desc(vq, idx); 65 | if (desc->flags & VRING_DESC_F_WRITE) { 66 | in_iov[*in].iov_base = (void *)desc->addr; 67 | in_iov[*in].iov_len = desc->len; 68 | (*in)++; 69 | } else { 70 | out_iov[*out].iov_base = (void *)desc->addr; 71 | out_iov[*out].iov_len = desc->len; 72 | (*out)++; 73 | } 74 | if (desc->flags & VRING_DESC_F_NEXT) 75 | idx = desc->next; 76 | else 77 | break; 78 | } while (1); 79 | return head; 80 | } 81 | -------------------------------------------------------------------------------- /virtio_server.c: -------------------------------------------------------------------------------- 1 | #include "virtio_server.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #ifdef DEBUG 9 | /* For development, we want to crash whenever the ring is screwed. */ 10 | #define BAD_RING(_vq, fmt, args...) \ 11 | do { \ 12 | dev_err(&(_vq)->vq.vdev->dev, \ 13 | "%s:"fmt, (_vq)->vq.name, ##args); \ 14 | BUG(); \ 15 | } while (0) 16 | /* Caller is supposed to guarantee no reentry. */ 17 | #define START_USE(_vq) \ 18 | do { \ 19 | if ((_vq)->in_use) \ 20 | panic("%s:in_use = %i\n", \ 21 | (_vq)->vq.name, (_vq)->in_use); \ 22 | (_vq)->in_use = __LINE__; \ 23 | } while (0) 24 | #define END_USE(_vq) \ 25 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) 26 | #else 27 | #define BAD_RING(_vq, fmt, args...) \ 28 | do { \ 29 | dev_err(&_vq->vq.vdev->dev, \ 30 | "%s:"fmt, (_vq)->vq.name, ##args); \ 31 | (_vq)->broken = true; \ 32 | } while (0) 33 | #define START_USE(vq) 34 | #define END_USE(vq) 35 | #endif 36 | 37 | struct virtqueue *vring_new_virtqueue(unsigned int num, 38 | unsigned int vring_align, 39 | struct virtio_device *vdev, 40 | void *vq_addr, 41 | void *pages, 42 | void (*notify)(struct virtqueue *), 43 | void (*callback)(struct virtqueue *), 44 | const char *name) 45 | { 46 | struct vring_virtqueue *vq = vq_addr; 47 | unsigned int i; 48 | 49 | /* We assume num is a power of 2. */ 50 | if (num & (num - 1)) { 51 | printf("Bad virtqueue length %u\n", num); 52 | return NULL; 53 | } 54 | 55 | vring_init(&vq->vring, num, pages, vring_align); 56 | vq->vq.callback = callback; 57 | vq->vq.vdev = vdev; 58 | vq->vq.name = name; 59 | vq->notify = notify; 60 | vq->broken = false; 61 | vq->last_used_idx = 0; 62 | vq->num_added = 0; 63 | list_add_tail(&vq->vq.list, &vdev->vqs); 64 | #ifdef DEBUG 65 | vq->in_use = false; 66 | #endif 67 | 68 | // vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); 69 | // vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 70 | 71 | /* No callback? Tell other side not to bother us. */ 72 | if (!callback) 73 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; 74 | 75 | /* Put everything in free lists. */ 76 | vq->num_free = num; 77 | vq->free_head = 0; 78 | for (i = 0; i < num-1; i++) { 79 | vq->vring.desc[i].next = i+1; 80 | vq->data[i] = NULL; 81 | } 82 | vq->data[i] = NULL; 83 | 84 | return &vq->vq; 85 | } 86 | 87 | int virtqueue_add_buf(struct virtqueue *_vq, 88 | struct iovec sg[], 89 | unsigned int out, 90 | unsigned int in, 91 | void *data) 92 | { 93 | struct vring_virtqueue *vq = to_vvq(_vq); 94 | unsigned int i, avail, prev; 95 | int head; 96 | 97 | START_USE(vq); 98 | 99 | assert(out + in < vq->vring.num); 100 | assert(out + in != 0); 101 | 102 | if (vq->num_free < out + in) { 103 | printf("Can't add buf len %i - avail = %i\n", 104 | out + in, vq->num_free); 105 | /* FIXME: for historical reasons, we force a notify here if 106 | * there are outgoing parts to the buffer. Presumably the 107 | * host should service the ring ASAP. */ 108 | if (out) 109 | vq->notify(&vq->vq); 110 | END_USE(vq); 111 | return -ENOSPC; 112 | } 113 | 114 | /* We're about to use some buffers from the free list. */ 115 | vq->num_free -= out + in; 116 | 117 | head = vq->free_head; 118 | for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { 119 | vq->vring.desc[i].flags = VRING_DESC_F_NEXT; 120 | vq->vring.desc[i].addr = (unsigned long)sg->iov_base; 121 | vq->vring.desc[i].len = sg->iov_len; 122 | prev = i; 123 | sg++; 124 | } 125 | for (; in; i = vq->vring.desc[i].next, in--) { 126 | vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; 127 | vq->vring.desc[i].addr = (unsigned long)sg->iov_base; 128 | vq->vring.desc[i].len = sg->iov_len; 129 | prev = i; 130 | sg++; 131 | } 132 | /* Last one doesn't continue. */ 133 | vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; 134 | 135 | /* Update free pointer */ 136 | vq->free_head = i; 137 | 138 | add_head: 139 | /* Set token. */ 140 | vq->data[head] = data; 141 | 142 | /* Put entry in available array (but don't update avail->idx until they 143 | * do sync). FIXME: avoid modulus here? */ 144 | avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num; 145 | vq->vring.avail->ring[avail] = head; 146 | 147 | printf("Added buffer head %i to %p\n", head, vq); 148 | END_USE(vq); 149 | 150 | return vq->num_free; 151 | } 152 | 153 | void virtqueue_kick(struct virtqueue *_vq) 154 | { 155 | struct vring_virtqueue *vq = to_vvq(_vq); 156 | u16 new, old; 157 | START_USE(vq); 158 | /* Descriptors and available array need to be set before we expose the 159 | * new available array entries. */ 160 | mb(); 161 | 162 | old = vq->vring.avail->idx; 163 | new = vq->vring.avail->idx = old + vq->num_added; 164 | vq->num_added = 0; 165 | 166 | /* Need to update avail index before checking if we should notify */ 167 | mb(); 168 | 169 | if (vq->event ? 170 | vring_need_event(vring_avail_event(&vq->vring), new, old) : 171 | !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) {} 172 | /* Prod other side to tell it about changes. */ 173 | //vq->notify(&vq->vq); 174 | 175 | END_USE(vq); 176 | } --------------------------------------------------------------------------------