├── .gitignore ├── Cargo.toml ├── README.md └── src └── lib.rs /.gitignore: -------------------------------------------------------------------------------- 1 | .*.swp 2 | target 3 | Cargo.lock 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "flatbuffers" 3 | version = "0.1.0" 4 | authors = ["Sam Payson "] 5 | 6 | [dependencies] 7 | 8 | num = "0.1.22" 9 | 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FlatBuffers in Rust 2 | 3 | This library provides runtime support for FlatBuffers in Rust. 4 | 5 | ## Obtaining the Modded FlatBuffers Compiler 6 | 7 | I have created a modified version of the flatbuffers compiler which will produce Rust code as 8 | output. It is available in the `rust-gen` branch if my `flatbuffers` repository. It can compiled and 9 | installed via the following command sequence. 10 | 11 | ```bash 12 | # Clone the repository 13 | git clone https://github.com/arbitrary-cat/flatbuffers && cd flatbuffers 14 | 15 | # Checkout the branch with the rust code in it 16 | git checkout rust-gen 17 | 18 | # Prepare the makefiles with CMake 19 | cmake . 20 | 21 | # Build the project 22 | make 23 | 24 | # Install the binaries 25 | sudo make install 26 | ``` 27 | 28 | Then you can produce `buffers.rs` from `buffers.fbs` by running: 29 | 30 | ```bash 31 | flatc -r buffers.fbs 32 | ``` 33 | 34 | ## Usage 35 | 36 | Once you've produced your `*.rs` files from the `*.fbs` files, move them to the appropriate location 37 | in your Rust program's source tree, and add the corresponding `mod` definitions (just like adding 38 | any other module to a Rust program). 39 | 40 | The generated files expect you to have the `num` crate and my `flatbuffers` crate available. You can 41 | accomplish this by adding the following to your `Cargo.toml` 42 | 43 | ```toml 44 | [dependencies] 45 | num = "0.1.24" 46 | 47 | [dependencies.flatbuffers] 48 | git = "https://github.com/arbitrary-cat/flatbuffers-rs" 49 | ``` 50 | 51 | Then you need to add the correct crate imports to the top-level of your crate. 52 | 53 | ```rust 54 | extern crate flatbuffers; 55 | extern crate num; 56 | ``` 57 | 58 | ## Limitations 59 | 60 | Right now the modded compiler is pretty sloppy, and it doesn't generate `Verifier`s like it does for 61 | the other languages. I'm working on this =]. 62 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2015 Sam Payson. All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | extern crate num; 16 | 17 | use std::cmp::Eq; 18 | 19 | use std::cmp; 20 | use std::marker; 21 | use std::mem; 22 | use std::slice; 23 | use std::str; 24 | 25 | // Return a byte slice which refers to the same region of memory as `v`. 26 | fn view_slice_bytes(v: &[T]) -> &[u8] { 27 | unsafe { 28 | let ptr = mem::transmute::<&T, *const u8>(&v[0]); 29 | let len = mem::size_of::() * v.len(); 30 | 31 | slice::from_raw_parts(ptr, len) 32 | } 33 | } 34 | 35 | // Return a byte slice which refers to the same region of memory as `t`. 36 | fn view_bytes(t: &T) -> &[u8] { 37 | unsafe { 38 | let ptr = mem::transmute::<&T, *const u8>(t); 39 | let len = mem::size_of::(); 40 | 41 | slice::from_raw_parts(ptr, len) 42 | } 43 | } 44 | 45 | /// An unsigned offset 46 | pub type UOffset = u32; 47 | 48 | /// A signed offset 49 | pub type SOffset = i32; 50 | 51 | /// A vtable offset, used for indexing the fields of a Table 52 | pub type VOffset = u16; 53 | 54 | /// This is a trait for primitives which can be loaded and stored as aligned little-endian values. 55 | pub trait Endian: Copy + PartialEq { 56 | unsafe fn read_le(buf: *const u8) -> Self; 57 | unsafe fn write_le(self, buf: *mut u8); 58 | 59 | fn from_le(self) -> Self; 60 | fn to_le(self) -> Self; 61 | } 62 | 63 | // What we really want here is: 64 | // 65 | // impl Endian for T { 66 | // fn read_le(buf: &[u8]) -> T { 67 | // let ptr: &T = unsafe { mem::transmute(&buf[0]) }; 68 | // num::PrimInt::from_le(*ptr) 69 | // } 70 | // 71 | // fn write_le(self, buf: &mut [u8]) { 72 | // let ptr: &mut T = unsafe { mem::transmute(&mut buf[0]) }; 73 | // *ptr = self.to_le(); 74 | // } 75 | // } 76 | // 77 | // but the blanket impl causes errors if we try to implement it for any other type, so this macro 78 | // will have to do. 79 | macro_rules! impl_endian_for { 80 | ($t:ty) => { 81 | impl Endian for $t { 82 | unsafe fn read_le(buf: *const u8) -> $t { 83 | let ptr = mem::transmute::<*const u8, &$t>(buf); 84 | num::PrimInt::from_le(*ptr) 85 | } 86 | 87 | unsafe fn write_le(self, buf: *mut u8) { 88 | let ptr = mem::transmute::<*mut u8, &mut $t>(buf); 89 | *ptr = num::PrimInt::to_le(self); 90 | } 91 | 92 | fn from_le(self) -> $t { num::PrimInt::from_le(self) } 93 | 94 | fn to_le(self) -> $t { num::PrimInt::to_le(self) } 95 | } 96 | } 97 | } 98 | 99 | impl_endian_for!(u8); 100 | impl_endian_for!(i8); 101 | impl_endian_for!(u16); 102 | impl_endian_for!(i16); 103 | impl_endian_for!(u32); 104 | impl_endian_for!(i32); 105 | impl_endian_for!(u64); 106 | impl_endian_for!(i64); 107 | impl_endian_for!(usize); 108 | impl_endian_for!(isize); 109 | 110 | /// This implementation assumes that the endianness of the FPU is the same as for integers. 111 | impl Endian for f32 { 112 | fn from_le(self) -> f32 { 113 | unsafe { 114 | let u = mem::transmute::(self); 115 | mem::transmute::(num::PrimInt::from_le(u)) 116 | } 117 | } 118 | 119 | fn to_le(self) -> f32 { 120 | unsafe { 121 | let u = mem::transmute::(self); 122 | mem::transmute::(num::PrimInt::to_le(u)) 123 | } 124 | } 125 | 126 | unsafe fn read_le(buf: *const u8) -> f32 { 127 | let ptr = mem::transmute::<*const u8, &u32>(buf); 128 | mem::transmute::(num::PrimInt::from_le(*ptr)) 129 | } 130 | 131 | unsafe fn write_le(self, buf: *mut u8) { 132 | let ptr = mem::transmute::<*mut u8, &mut u32>(buf); 133 | *ptr = num::PrimInt::to_le(mem::transmute::(self)); 134 | } 135 | } 136 | 137 | /// This implementation assumes that the endianness of the FPU is the same as for integers. 138 | impl Endian for f64 { 139 | fn from_le(self) -> f64 { 140 | unsafe { 141 | let u = mem::transmute::(self); 142 | mem::transmute::(num::PrimInt::from_le(u)) 143 | } 144 | } 145 | 146 | fn to_le(self) -> f64 { 147 | unsafe { 148 | let u = mem::transmute::(self); 149 | mem::transmute::(num::PrimInt::to_le(u)) 150 | } 151 | } 152 | 153 | unsafe fn read_le(buf: *const u8) -> f64 { 154 | let ptr = mem::transmute::<*const u8, &u64>(buf); 155 | mem::transmute::(num::PrimInt::from_le(*ptr)) 156 | } 157 | 158 | unsafe fn write_le(self, buf: *mut u8) { 159 | let ptr = mem::transmute::<*mut u8, &mut u64>(buf); 160 | *ptr = num::PrimInt::to_le(mem::transmute::(self)); 161 | } 162 | } 163 | 164 | impl Endian for Offset { 165 | fn from_le(self) -> Offset { 166 | Offset::new(num::PrimInt::from_le(self.inner)) 167 | } 168 | 169 | fn to_le(self) -> Offset { 170 | Offset::new(num::PrimInt::to_le(self.inner)) 171 | } 172 | 173 | unsafe fn read_le(buf: *const u8) -> Offset { 174 | let ptr = mem::transmute::<*const u8, &UOffset>(buf); 175 | Offset::new(num::PrimInt::from_le(*ptr)) 176 | } 177 | 178 | unsafe fn write_le(self, buf: *mut u8) { 179 | let ptr = mem::transmute::<*mut u8, &mut UOffset>(buf); 180 | *ptr = num::PrimInt::to_le(self.inner) 181 | } 182 | } 183 | 184 | // If `base` were a pointer to an array of type T, return a pointer to element `idx` of that array. 185 | unsafe fn index(base: *const u8, idx: usize) -> *const u8 { 186 | let base_us = mem::transmute::<*const u8, usize>(base); 187 | 188 | mem::transmute::(base_us + idx * mem::size_of::()) 189 | } 190 | 191 | // Return a pointer to a byte whose address is `off` bytes beyond `base`. 192 | unsafe fn offset(base: *const u8, off: usize) -> *const u8 { 193 | let base_us = mem::transmute::<*const u8, usize>(base); 194 | 195 | mem::transmute::(base_us + off) 196 | } 197 | 198 | // This is like `offset`, except it returns a mutable pointer. 199 | unsafe fn offset_mut(base: *mut u8, off: usize) -> *mut u8 { 200 | let base_us = mem::transmute::<*mut u8, usize>(base); 201 | 202 | mem::transmute::(base_us + off) 203 | } 204 | 205 | // This is like `offset` except it allows negative offsets. 206 | unsafe fn soffset(base: *const u8, off: isize) -> *const u8 { 207 | let base_is = mem::transmute::<*const u8, isize>(base); 208 | 209 | mem::transmute::(base_is + off) 210 | } 211 | 212 | // Read a little endian `T` pointed to by `buf`. `buf` must point to a properly aligned value. 213 | unsafe fn read_scalar(buf: *const u8) -> T { 214 | Endian::read_le(buf) 215 | } 216 | 217 | // Write a little endian `T` to the buffer pointer to by `buf`. `buf` must point to a properly 218 | // aligned buffer. 219 | unsafe fn write_scalar(buf: *mut u8, val: T) { 220 | val.write_le(buf) 221 | } 222 | 223 | /// A trait which determines how a type is retrieved from a flatbuffer. See the implementations for 224 | /// `T`, `Offset`, and `ByRef` for examples. 225 | pub trait Indirect { 226 | unsafe fn read(buf: *const u8, idx: usize) -> I; 227 | } 228 | 229 | impl Indirect for T { 230 | unsafe fn read(buf: *const u8, idx: usize) -> T { 231 | let off = idx * mem::size_of::(); 232 | let ptr = mem::transmute::<*const u8, &T>(offset(buf, off)); 233 | 234 | *ptr 235 | } 236 | } 237 | 238 | pub struct Offset { 239 | inner: UOffset, 240 | _t: marker::PhantomData, 241 | } 242 | 243 | impl Clone for Offset { 244 | fn clone(&self) -> Self { Offset::new(self.inner) } 245 | } 246 | 247 | impl Copy for Offset {} 248 | 249 | impl PartialEq for Offset { 250 | fn eq(&self, other: &Self) -> bool { self.inner == other.inner } 251 | } 252 | 253 | pub struct ByRef(marker::PhantomData); 254 | 255 | impl <'x, T> Indirect<&'x T> for ByRef { 256 | unsafe fn read(buf: *const u8, idx: usize) -> &'x T { 257 | mem::transmute::<*const u8, &'x T>(index::(buf, idx)) 258 | } 259 | } 260 | 261 | impl<'x, T> Indirect<&'x T> for Offset { 262 | unsafe fn read(buf: *const u8, idx: usize) -> &'x T { 263 | let off: UOffset = read_scalar(index::(buf, idx)); 264 | mem::transmute::<*const u8, &'x T>(offset(buf, off as usize)) 265 | } 266 | } 267 | 268 | impl Offset { 269 | pub fn new(o: UOffset) -> Offset { 270 | Offset { 271 | inner: o, 272 | _t: marker::PhantomData, 273 | } 274 | } 275 | } 276 | 277 | /// A helper type for accessing vectors in flatbuffers. 278 | pub struct Vector where T: Indirect { 279 | length: UOffset, 280 | _t: marker::PhantomData, 281 | _i: marker::PhantomData, 282 | } 283 | 284 | /// An iterator to a Vector in a flatbuffer. 285 | pub struct VecIter<'x, I: 'x, T: Indirect + 'x> { 286 | vec: &'x Vector, 287 | idx: usize, 288 | } 289 | 290 | impl<'x, I, T: Indirect> Iterator for VecIter<'x, I, T> { 291 | type Item = I; 292 | 293 | fn next(&mut self) -> Option { 294 | let idx = self.idx; 295 | self.idx = idx + 1; 296 | 297 | self.vec.get(idx) 298 | } 299 | } 300 | 301 | impl> Vector { 302 | unsafe fn data(&self) -> *const u8 { 303 | index::(mem::transmute::<&Vector, *const u8>(self), 1) 304 | } 305 | 306 | pub fn len(&self) -> usize { 307 | self.length as usize 308 | } 309 | 310 | pub fn get(&self, idx: usize) -> Option { 311 | if idx < self.len() { 312 | Some(unsafe { >::read(self.data(), idx) }) 313 | } else { 314 | None 315 | } 316 | } 317 | 318 | pub fn iter<'x>(&'x self) -> VecIter<'x, I, T> { 319 | VecIter { 320 | vec: self, 321 | idx: 0, 322 | } 323 | } 324 | } 325 | 326 | pub type Str = Vector; 327 | 328 | impl AsRef for Str { 329 | fn as_ref(&self) -> &str { 330 | let slc = unsafe { 331 | let ptr = self.data(); 332 | let len = self.len(); 333 | 334 | slice::from_raw_parts(ptr, len) 335 | }; 336 | 337 | // TODO: Should this be the checked version? If so, do we want to panic if it's not utf-8? 338 | // 339 | // This (unchecked) version certainly reflects the performance characteristics in the 340 | // spirit of the format. Maybe the `AsRef` implementation should be checked, and 341 | // there should be an unsafe fast method? 342 | // 343 | // I'll think about it later... 344 | unsafe { str::from_utf8_unchecked(slc) } 345 | } 346 | } 347 | 348 | impl PartialEq for Str { 349 | fn eq(&self, other: &Str) -> bool { 350 | let (a, b): (&str, &str) = (self.as_ref(), other.as_ref()); 351 | a.eq(b) 352 | } 353 | } 354 | 355 | impl PartialOrd for Str { 356 | fn partial_cmp(&self, other: &Str) -> Option { 357 | let (a, b): (&str, &str) = (self.as_ref(), other.as_ref()); 358 | a.partial_cmp(b) 359 | } 360 | } 361 | 362 | impl Eq for Str {} 363 | 364 | impl Ord for Str { 365 | fn cmp(&self, other: &Str) -> cmp::Ordering { 366 | let (a, b): (&str, &str) = (self.as_ref(), other.as_ref()); 367 | a.cmp(b) 368 | } 369 | } 370 | 371 | pub struct Table; 372 | 373 | impl Table { 374 | fn get_optional_field_offset(&self, field: VOffset) -> Option { 375 | unsafe { 376 | let base = mem::transmute::<&Table, *const u8>(self); 377 | 378 | // I'm not suire why it's subtraction, instead of addition, but this is what they have in 379 | // the C++ code. 380 | let vtable = soffset(base, -read_scalar::(base) as isize); 381 | 382 | let vtsize: VOffset = read_scalar(vtable); 383 | 384 | if field < vtsize { 385 | let voff = read_scalar(offset(vtable, field as usize)); 386 | if voff != 0 { 387 | return Some(voff) 388 | } 389 | } 390 | 391 | None 392 | } 393 | } 394 | 395 | pub fn get_field(&self, field: VOffset, def: T) -> T { 396 | 397 | self.get_optional_field_offset(field) 398 | .map_or(def, |voffs| unsafe { 399 | let base = mem::transmute::<&Table, *const u8>(self); 400 | read_scalar(offset(base, voffs as usize)) 401 | } ) 402 | } 403 | 404 | pub fn get_ref(&self, field: VOffset) -> Option<&T> { 405 | self.get_optional_field_offset(field) 406 | .map(|voffs| unsafe { 407 | let base = mem::transmute::<&Table, *const u8>(self); 408 | let p = offset(base, voffs as usize); 409 | let offs: UOffset = read_scalar(p); 410 | mem::transmute::<*const u8, &T>(offset(p, offs as usize)) 411 | }) 412 | } 413 | 414 | pub fn get_ref_mut(&mut self, field: VOffset) -> Option<&mut T> { 415 | self.get_optional_field_offset(field) 416 | .map(|voffs| unsafe { 417 | let base = mem::transmute::<&mut Table, *mut u8>(self); 418 | let p = offset_mut(base, voffs as usize); 419 | let offs: UOffset = read_scalar(p); 420 | mem::transmute::<*mut u8, &mut T>(offset_mut(p, offs as usize)) 421 | }) 422 | } 423 | 424 | pub fn get_struct(&self, field: VOffset) -> Option<&T> { 425 | self.get_optional_field_offset(field) 426 | .map(|voffs| unsafe { 427 | let base = mem::transmute::<&Table, *const u8>(self); 428 | mem::transmute::<*const u8, &T>(offset(base, voffs as usize)) 429 | }) 430 | } 431 | 432 | pub fn get_struct_mut(&mut self, field: VOffset) -> Option<&mut T> { 433 | self.get_optional_field_offset(field) 434 | .map(|voffs| unsafe { 435 | let base = mem::transmute::<&mut Table, *mut u8> (self); 436 | mem::transmute::<*mut u8, &mut T>(offset_mut(base, voffs as usize)) 437 | }) 438 | } 439 | 440 | pub fn set_field(&mut self, field: VOffset, val: T) { 441 | unsafe { 442 | // We `unwrap` here because the caller is expected to verify that the field exists 443 | // beforehand by calling `check_field`. 444 | let voffs = self.get_optional_field_offset(field).unwrap(); 445 | 446 | let base = mem::transmute::<&mut Table, *mut u8>(self); 447 | 448 | write_scalar(offset_mut(base, voffs as usize), val); 449 | } 450 | } 451 | 452 | pub fn check_field(&self, field: VOffset) -> bool { 453 | self.get_optional_field_offset(field).is_some() 454 | } 455 | } 456 | 457 | /// A trait for Tables which can be compared for order (i.e. which have a field with the `key` 458 | /// attribute). 459 | pub trait OrdTable { 460 | fn key_cmp(&self, rhs: &Self) -> cmp::Ordering; 461 | } 462 | 463 | /// This type is used internally by the generated types for flatbuffer structs. Its methods allow 464 | /// access to various different types of struct fields. 465 | pub struct Struct; 466 | 467 | impl Struct { 468 | /// Return a scalar field, reading it directly from the buffer. 469 | pub fn get_field(&self, off: UOffset) -> T { 470 | unsafe { 471 | let base = mem::transmute::<&Struct, *const u8>(self); 472 | read_scalar(offset(base, off as usize)) 473 | } 474 | } 475 | 476 | /// Return a reference to a field which is stored as a `UOffset`. 477 | /// 478 | /// # Notes 479 | /// 480 | /// Is this function ever used? Aren't structs supposed to be fixed-size? 481 | pub fn get_ref(&self, off: UOffset) -> &T { 482 | unsafe { 483 | let base = mem::transmute::<&Struct, *const u8>(self); 484 | let p = offset(base, off as usize); 485 | 486 | mem::transmute::<*const u8, &T>(offset(p, read_scalar::(p) as usize)) 487 | } 488 | } 489 | 490 | /// Like `get_ref`, but the reference is mutable. 491 | pub fn get_ref_mut(&mut self, off: UOffset) -> &mut T { 492 | unsafe { 493 | let base = mem::transmute::<&mut Struct, *mut u8>(self); 494 | let p = offset_mut(base, off as usize); 495 | 496 | mem::transmute::<*mut u8, &mut T>(offset_mut(p, read_scalar::(p) as usize)) 497 | } 498 | } 499 | 500 | /// Get a pointer to a struct field. 501 | pub fn get_struct(&self, off: UOffset) -> &T { 502 | unsafe { 503 | let base = mem::transmute::<&Struct, *const u8>(self); 504 | 505 | mem::transmute::<*const u8, &T>(offset(base, off as usize)) 506 | } 507 | } 508 | 509 | /// Like `get_struct`, but the reference is mutable. 510 | pub fn get_struct_mut(&mut self, off: UOffset) -> &mut T { 511 | unsafe { 512 | let base = mem::transmute::<&mut Struct, *mut u8>(self); 513 | 514 | mem::transmute::<*mut u8, &mut T>(offset_mut(base, off as usize)) 515 | } 516 | } 517 | } 518 | 519 | /// Return a pointer to the root object stored in this buffer, interpreting it as type `T`. 520 | pub fn get_root(buf: &[u8]) -> &T { 521 | unsafe { 522 | let base = buf.as_ptr(); 523 | let off: UOffset = Endian::read_le(base); 524 | 525 | mem::transmute::<*const u8, &T>(offset(base, off as usize)) 526 | } 527 | } 528 | 529 | // Reverse-growing vector which piggy-backs on std::vec::Vec. 530 | struct VecDownward { 531 | inner: Vec, 532 | next: usize, 533 | } 534 | 535 | impl VecDownward { 536 | fn new(initial_capacity: usize) -> VecDownward { 537 | let mut vec = Vec::with_capacity(initial_capacity); 538 | unsafe { vec.set_len(initial_capacity) } 539 | 540 | VecDownward { 541 | inner: vec, 542 | next: initial_capacity, 543 | } 544 | } 545 | 546 | fn data(&self) -> &[u8] { &self.inner[self.next..] } 547 | 548 | fn data_mut(&mut self) -> &mut [u8] { &mut self.inner[self.next..] } 549 | 550 | fn data_at(&self, offset: usize) -> &[u8] { 551 | &self.inner[self.inner.len() - offset..] 552 | } 553 | 554 | fn data_at_mut(&mut self, offset: usize) -> &mut [u8] { 555 | let len = self.inner.len(); 556 | &mut self.inner[len - offset..] 557 | } 558 | 559 | fn len(&self) -> usize { self.inner.len() - self.next } 560 | 561 | fn clear(&mut self) { 562 | self.next = self.inner.len(); 563 | } 564 | 565 | // Adds space to the front of the vector, growing towards lower addresses. The returned `usize` 566 | // is the offset from the end of the buffer (e.g. the highest address). 567 | fn make_space(&mut self, len: usize) -> usize { 568 | if len > self.next { 569 | let mut new = Vec::with_capacity(2*self.len() + len); 570 | 571 | unsafe { new.set_len(2*self.len() + len) } 572 | 573 | let new_next = new.len() - self.len(); 574 | 575 | for i in 0..self.len() { 576 | new[new_next + i] = self.inner[self.next + i] 577 | } 578 | 579 | self.inner = new; 580 | self.next = new_next; 581 | } 582 | 583 | self.next -= len; 584 | 585 | self.next 586 | } 587 | 588 | // Append some raw bytes to the front of the buffer. 589 | fn push(&mut self, dat: &[u8]) { 590 | let off = self.make_space(dat.len()); 591 | 592 | for i in 0..dat.len() { 593 | self.inner[off + i] = dat[i]; 594 | } 595 | } 596 | 597 | // Add `len` *NUL* bytes to the front of the buffer. 598 | fn fill(&mut self, len: usize) { 599 | let off = self.make_space(len); 600 | 601 | for i in 0..len { 602 | self.inner[off + i] = 0; 603 | } 604 | } 605 | 606 | // Remove `len` bytes from the front of the buffer. 607 | fn pop(&mut self, len: usize) { 608 | self.next += len; 609 | } 610 | } 611 | 612 | // Given a field's ID number, convert it to a VOffset 613 | fn field_index_to_offset(field_id: VOffset) -> VOffset { 614 | let fixed_fields = 2; // VTable size and Object size. 615 | (field_id + fixed_fields) * (mem::size_of::() as VOffset) 616 | } 617 | 618 | // Return the number of bytes needed to pad a scalar for alignment (see usage in e.g. 619 | // `FlatBufferBuilder::align(..)`). 620 | fn padding_bytes(buf_size: usize, scalar_size: usize) -> usize { 621 | (!buf_size).wrapping_add(1) & (scalar_size - 1) 622 | } 623 | 624 | // The location of a field, stored as a UOffset from the end of the buffer and a field ID. 625 | struct FieldLoc { 626 | off: UOffset, 627 | id: VOffset, 628 | } 629 | 630 | /// This type is used by the generated `.*Builder` types for Tables. A `FlatBufferBuilder` can be 631 | /// re-used if the `clear()` method is called between uses; this will avoid some allocations. 632 | pub struct FlatBufferBuilder { 633 | buf: VecDownward, 634 | offset_buf: Vec, 635 | vtables: Vec, 636 | min_align: usize, 637 | force_defaults: bool, 638 | } 639 | 640 | impl FlatBufferBuilder { 641 | pub fn new(initial_capacity: usize) -> FlatBufferBuilder { 642 | FlatBufferBuilder { 643 | buf: VecDownward::new(initial_capacity), 644 | offset_buf: Vec::with_capacity(16), 645 | vtables: Vec::with_capacity(16), 646 | min_align: 1, 647 | force_defaults: false, 648 | } 649 | } 650 | 651 | /// Prepare to build another FlatBuffer from scratch (forgetting everything about any previous 652 | /// FlatBuffer), but reuse the memory from the internal buffers to avoid extra reallocations. 653 | pub fn clear(&mut self) { 654 | self.buf.clear(); 655 | self.offset_buf.clear(); 656 | self.vtables.clear(); 657 | self.min_align = 1; 658 | } 659 | 660 | pub fn get_size(&self) -> usize { 661 | self.buf.len() 662 | } 663 | 664 | pub fn get_buffer(&self) -> &[u8] { self.buf.data() } 665 | 666 | /// Determines whether or not default values should be hard-coded into the wire representation. 667 | pub fn force_defaults(&mut self, fd: bool) { 668 | self.force_defaults = fd; 669 | } 670 | 671 | pub fn pad(&mut self, num_bytes: usize) { 672 | self.buf.fill(num_bytes); 673 | } 674 | 675 | pub fn align(&mut self, elem_size: usize) { 676 | if elem_size > self.min_align { 677 | self.min_align = elem_size; 678 | } 679 | 680 | let len = self.buf.len(); 681 | 682 | self.buf.fill(padding_bytes(len, elem_size)); 683 | } 684 | 685 | pub fn push_bytes(&mut self, dat: &[u8]) { 686 | self.buf.push(dat); 687 | } 688 | 689 | pub fn pop_bytes(&mut self, len: usize) { 690 | self.buf.pop(len) 691 | } 692 | 693 | pub fn push_scalar(&mut self, elem: T) -> usize { 694 | let little = elem.to_le(); 695 | 696 | self.align(mem::size_of::()); 697 | 698 | self.buf.push(view_bytes(&little)); 699 | 700 | self.get_size() 701 | } 702 | 703 | pub fn push_offset(&mut self, off: Offset) -> usize { 704 | let adjusted = self.refer_to(off.inner); 705 | self.push_scalar(adjusted) 706 | } 707 | 708 | pub fn refer_to(&mut self, off: UOffset) -> UOffset { 709 | self.align(mem::size_of::()); 710 | let buf_size = self.get_size() as UOffset; 711 | 712 | assert!(off <= buf_size); 713 | 714 | buf_size - off + (mem::size_of::() as UOffset) 715 | } 716 | 717 | pub fn track_field(&mut self, field: VOffset, off: UOffset) { 718 | self.offset_buf.push(FieldLoc{off: off, id: field}) 719 | } 720 | 721 | pub fn add_scalar(&mut self, field: VOffset, e: T, def: T) { 722 | if e == def && !self.force_defaults { return } 723 | 724 | let off = self.push_scalar(e) as UOffset; 725 | 726 | self.track_field(field, off); 727 | } 728 | 729 | pub fn add_offset(&mut self, field: VOffset, off: Offset) { 730 | if off.inner == 0 { return } 731 | 732 | let adjusted = self.refer_to(off.inner); 733 | self.add_scalar(field, adjusted, 0); 734 | } 735 | 736 | pub fn add_struct(&mut self, field: VOffset, ptr: &T) { 737 | self.align(mem::align_of::()); 738 | self.push_bytes(view_bytes(ptr)); 739 | 740 | let off = self.get_size() as UOffset; 741 | self.track_field(field, off); 742 | } 743 | 744 | pub fn add_struct_offset(&mut self, field: VOffset, off: UOffset) { 745 | self.track_field(field, off); 746 | } 747 | 748 | pub fn not_nested(&self) { 749 | assert_eq!(self.offset_buf.len(), 0); 750 | } 751 | 752 | pub fn start_table(&self) -> UOffset { 753 | self.not_nested(); 754 | self.get_size() as UOffset 755 | } 756 | 757 | pub fn end_table(&mut self, start: UOffset, num_fields: VOffset) -> UOffset { 758 | let vtable_offset_loc = self.push_scalar::(0); 759 | 760 | self.buf.fill((num_fields as usize) * mem::size_of::()); 761 | 762 | let table_object_size = vtable_offset_loc - (start as usize); 763 | 764 | assert!(table_object_size < 0x10000); // 16-bit offsets 765 | 766 | self.push_scalar(table_object_size as VOffset); 767 | self.push_scalar(field_index_to_offset(num_fields)); 768 | 769 | for field_loc in self.offset_buf.iter() { 770 | let pos = (vtable_offset_loc as VOffset) - (field_loc.off as VOffset); 771 | 772 | unsafe { 773 | let buf_ref = &mut self.buf.data_mut()[field_loc.id as usize]; 774 | let buf_ptr = mem::transmute::<&mut u8, *mut u8>(buf_ref); 775 | assert_eq!(read_scalar::(buf_ptr), 0); 776 | write_scalar(buf_ptr, pos); 777 | } 778 | } 779 | 780 | self.offset_buf.clear(); 781 | 782 | // What follows is the de-duping code. Might be able to speed this up with some kind of 783 | // hash-table or something if it becomes a bottleneck since this implementation will take 784 | // quadratic time WRT the number of distinct tables in the flatbuffer. 785 | 786 | let vt1: &[VOffset] = unsafe { 787 | let vt_ptr = mem::transmute::<&u8, *const VOffset>(&self.buf.data()[0]); 788 | let vt_len = *vt_ptr as usize; 789 | slice::from_raw_parts(vt_ptr, vt_len) 790 | }; 791 | 792 | let mut vt_use = self.get_size() as UOffset; 793 | 794 | for &off in self.vtables.iter() { 795 | let vt2: &[VOffset] = unsafe { 796 | let vt_ptr = mem::transmute::<&u8, *const VOffset>(&self.buf.data_at(off as usize)[0]); 797 | let vt_len = *vt_ptr as usize; 798 | slice::from_raw_parts(vt_ptr, vt_len) 799 | }; 800 | 801 | if vt1 == vt2 { 802 | vt_use = off; 803 | let to_pop = self.get_size() - vtable_offset_loc; 804 | self.buf.pop(to_pop); 805 | break; 806 | } 807 | } 808 | 809 | if vt_use == self.get_size() as UOffset { 810 | self.vtables.push(vt_use); 811 | } 812 | 813 | 814 | unsafe { 815 | let vt_buf = &mut self.buf.data_at_mut(vtable_offset_loc as usize)[0]; 816 | write_scalar(mem::transmute::<*mut u8, &mut u8>(vt_buf), 817 | (vt_use as SOffset) - (vtable_offset_loc as SOffset)); 818 | } 819 | 820 | vtable_offset_loc as UOffset 821 | } 822 | 823 | pub fn pre_align(&mut self, len: usize, align: usize) { 824 | let size = self.get_size(); 825 | self.buf.fill(padding_bytes(size + len, align)); 826 | } 827 | 828 | pub fn create_string(&mut self, s: &str) -> Offset { 829 | self.not_nested(); 830 | 831 | self.pre_align(s.len() + 1, mem::size_of::()); 832 | self.buf.fill(1); 833 | self.push_bytes(s.as_bytes()); 834 | self.push_scalar(s.len() as UOffset); 835 | 836 | Offset::new(self.get_size() as UOffset) 837 | } 838 | 839 | pub fn start_vector(&mut self, len: usize, elem_size: usize) { 840 | self.pre_align(len * elem_size, mem::size_of::()); 841 | self.pre_align(len * elem_size, elem_size); 842 | } 843 | 844 | pub fn reserve_elements(&mut self, len: usize, elem_size: usize) -> usize { 845 | self.buf.make_space(len * elem_size) 846 | } 847 | 848 | pub fn end_vector(&mut self, len: usize) -> UOffset { 849 | self.push_scalar(len as UOffset) as UOffset 850 | } 851 | 852 | pub fn create_vector(&mut self, v: &[T]) -> Offset> { 853 | self.not_nested(); 854 | self.start_vector(v.len(), mem::size_of::()); 855 | for &elem in v.iter().rev() { 856 | self.push_scalar(elem); 857 | } 858 | 859 | Offset::new(self.end_vector(v.len())) 860 | } 861 | 862 | pub fn create_vector_of_structs(&mut self, v: &[T]) -> Offset, &T>> { 863 | self.not_nested(); 864 | 865 | self.start_vector(v.len() * mem::size_of::() / mem::align_of::(), 866 | mem::align_of::()); 867 | self.push_bytes(view_slice_bytes(v)); 868 | 869 | Offset::new(self.end_vector(v.len())) 870 | } 871 | 872 | pub fn create_vector_of_sorted_tables(&mut self, v: &mut [Offset]) 873 | -> Offset>> { 874 | 875 | v.sort_by(|&a_off, &b_off| { 876 | unsafe { 877 | let a = mem::transmute::<&u8, &T>(&self.buf.data_at(a_off.inner as usize)[0]); 878 | let b = mem::transmute::<&u8, &T>(&self.buf.data_at(b_off.inner as usize)[0]); 879 | 880 | a.key_cmp(b) 881 | } 882 | }); 883 | 884 | self.create_vector(v) 885 | } 886 | 887 | pub fn create_uninitialized_vector(&mut self, len: usize) -> (UOffset, &mut [T]) { 888 | self.not_nested(); 889 | self.start_vector(len, mem::size_of::()); 890 | let buf = self.buf.make_space(len * mem::size_of::()); 891 | let off = self.end_vector(len); 892 | 893 | let slc = unsafe { 894 | let ptr = mem::transmute::<&mut u8, *mut T>(&mut self.buf.data_at_mut(buf)[0]); 895 | 896 | slice::from_raw_parts_mut(ptr, len) 897 | }; 898 | 899 | (off, slc) 900 | } 901 | 902 | pub fn finish(&mut self, root: Offset) { 903 | let min_align = self.min_align; 904 | self.pre_align(mem::size_of::(), min_align); 905 | let refer = self.refer_to(root.inner); 906 | self.push_scalar(refer); 907 | } 908 | } 909 | --------------------------------------------------------------------------------