├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── build.rs ├── src ├── ast.rs ├── bin │ └── nixrs.rs ├── context.rs ├── lex.rs ├── lib.rs ├── parse.lalrpop └── symbol.rs └── tests ├── lexer.rs └── lexer ├── double-quotes-1.in ├── double-quotes-1.out ├── double-quotes-2.in ├── double-quotes-2.out ├── double-quotes-3.in ├── double-quotes-3.out ├── double-quotes-4.in ├── double-quotes-4.out ├── double-quotes-interpolation-1.in ├── double-quotes-interpolation-1.out ├── double-quotes-interpolation-2.in ├── double-quotes-interpolation-2.out ├── double-quotes-interpolation-3.in ├── double-quotes-interpolation-3.out ├── double-quotes-interpolation-4.in ├── double-quotes-interpolation-4.out ├── identifiers-1.in ├── identifiers-1.out ├── identifiers-2.in ├── identifiers-2.out ├── identifiers-3.in ├── identifiers-3.out ├── integers-1.in ├── integers-1.out ├── integers-2.in ├── integers-2.out ├── integers-3.in ├── integers-3.out ├── ints-vs-idents-1.in ├── ints-vs-idents-1.out ├── ints-vs-idents-2.in ├── ints-vs-idents-2.out ├── whitespace-and-comments-1.in ├── whitespace-and-comments-1.out ├── whitespace-and-comments-2.in ├── whitespace-and-comments-2.out ├── whitespace-and-comments-3.in ├── whitespace-and-comments-3.out ├── whitespace-and-comments-4.in └── whitespace-and-comments-4.out /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["Scott Olson "] 3 | name = "nixrs" 4 | version = "0.1.0" 5 | 6 | [dependencies] 7 | clap = "2.1.2" 8 | itertools = "0.4.11" 9 | lalrpop-util = "0.15.0" 10 | lazy_static = "0.2.1" 11 | regex = "0.2.2" 12 | 13 | [build-dependencies] 14 | lalrpop = "0.15.0" 15 | 16 | [dev-dependencies] 17 | glob = "0.2.11" 18 | pretty_assertions = "0.5.1" 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016 The Nixrs Developers 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nixrs 2 | 3 | A [Nix][nix] language interpreter written in [Rust][rust]. 4 | 5 | ## Progress 6 | 7 | As of June 4th, 2016, the [lexer](src/parse.rs) is partially implemented and 8 | essentially nothing else is done. 9 | 10 | ## Run a test 11 | 12 | ```sh 13 | cargo run 14 | ``` 15 | 16 | ## License 17 | 18 | Licensed under the MIT license. See [LICENSE](LICENSE) or 19 | http://opensource.org/licenses/MIT. 20 | 21 | ### Contribution 22 | 23 | Unless you explicitly state otherwise, any contribution intentionally submitted 24 | for inclusion in the work by you shall be licensed as above, without any 25 | additional terms or conditions. 26 | 27 | [nix]: http://nixos.org/nix/ 28 | [rust]: https://www.rust-lang.org/ 29 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | extern crate lalrpop; 2 | 3 | fn main() { 4 | lalrpop::process_root().unwrap(); 5 | } 6 | -------------------------------------------------------------------------------- /src/ast.rs: -------------------------------------------------------------------------------- 1 | use symbol::Symbol; 2 | 3 | #[derive(Debug)] 4 | pub enum Expr { 5 | Identifier(Symbol), 6 | IntLiteral(u64), // TODO(solson): What type should we use? 7 | FloatLiteral(f64), // TODO(solson): What type should we use? 8 | String(Symbol), 9 | } 10 | 11 | 12 | // #[derive(Debug)] 13 | // pub enum ParseError { 14 | // UnexpectedEnd, 15 | // InvalidIntLiteral, 16 | // InvalidFloatLiteral, 17 | // } 18 | 19 | // pub type ParseResult = Result; 20 | -------------------------------------------------------------------------------- /src/bin/nixrs.rs: -------------------------------------------------------------------------------- 1 | extern crate clap; 2 | extern crate nixrs; 3 | 4 | use clap::{Arg, App}; 5 | use nixrs::context::EvalContext; 6 | use nixrs::lex; 7 | use nixrs::parse; 8 | use std::fs::File; 9 | use std::io; 10 | use std::io::prelude::*; 11 | 12 | fn main() { 13 | result_main().unwrap(); 14 | } 15 | 16 | fn result_main() -> io::Result<()> { 17 | let matches = App::new("nixrs") 18 | .version("0.1.0") 19 | .author("Scott Olson ") 20 | .about("Nix evaluator") 21 | .arg(Arg::with_name("INPUT") 22 | .help("The input file to use") 23 | .required(true)) 24 | .get_matches(); 25 | 26 | let input_file = matches.value_of("INPUT").unwrap(); 27 | let mut source = String::new(); 28 | File::open(input_file)?.read_to_string(&mut source)?; 29 | 30 | // let ctx = EvalContext::new(); 31 | // let lexer = lex::Lexer::new(&ctx, input_file, &source); 32 | // let lalrpop_lexer = lex::lalrpop::Lexer(lexer); 33 | // let parser = parse::ExprParser::new(); 34 | // let ast = parser.parse(lalrpop_lexer).unwrap(); 35 | let parser = parse::ExprParser::new(); 36 | let ast = parser.parse(&source).unwrap(); 37 | println!("{:?}", ast); 38 | 39 | // let mut parser = Parser::new(lexer); 40 | // println!("{:?}", parser.parse_expr()); 41 | // println!("{}", lexer.debug_string()); 42 | 43 | Ok(()) 44 | } 45 | -------------------------------------------------------------------------------- /src/context.rs: -------------------------------------------------------------------------------- 1 | pub struct EvalContext {} 2 | 3 | impl EvalContext { 4 | pub fn new() -> Self { 5 | EvalContext {} 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /src/lex.rs: -------------------------------------------------------------------------------- 1 | use itertools::Itertools; 2 | use regex::{Regex, RegexSet}; 3 | use std::fmt::{self, Write}; 4 | use std::str::Chars; 5 | 6 | use context::EvalContext; 7 | use symbol::Symbol; 8 | 9 | //////////////////////////////////////////////////////////////////////////////// 10 | // Token positions and spans 11 | //////////////////////////////////////////////////////////////////////////////// 12 | 13 | #[derive(Copy, Clone, Debug, Default, PartialEq)] 14 | pub struct Pos { 15 | pub column: usize, 16 | pub line: usize, 17 | } 18 | 19 | impl fmt::Display for Pos { 20 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 21 | write!(f, "{}:{}", self.line, self.column) 22 | } 23 | } 24 | 25 | #[derive(Copy, Clone, Debug, PartialEq)] 26 | pub struct Span { 27 | pub filename: Symbol, 28 | pub start: Pos, 29 | pub end: Pos, 30 | } 31 | 32 | impl fmt::Display for Span { 33 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 34 | write!(f, "{}-{}", self.start, self.end) 35 | } 36 | } 37 | 38 | #[derive(Copy, Clone, Debug, PartialEq)] 39 | pub struct Token<'src> { 40 | pub kind: TokenKind, 41 | pub span: Span, 42 | pub source: &'src str, 43 | } 44 | 45 | //////////////////////////////////////////////////////////////////////////////// 46 | // Token kinds 47 | //////////////////////////////////////////////////////////////////////////////// 48 | 49 | #[derive(Copy, Clone, Debug, PartialEq)] 50 | pub enum TokenKind { 51 | Comment, 52 | Identifier, 53 | Integer, 54 | Float, 55 | Path, 56 | Uri, 57 | StrPart(StringStyle), 58 | Quote(StringStyle), // " or '' 59 | DollarBraceL, // ${ 60 | Mult, // * 61 | Minus, // - 62 | Plus, // + 63 | Divide, // / 64 | Less, // < 65 | Greater, // > 66 | LessEq, // <= 67 | GreaterEq, // >= 68 | Assign, // = 69 | Equals, // == 70 | NotEquals, // != 71 | And, // && 72 | Or, // || 73 | Implies, // -> 74 | Not, // ! 75 | Update, // // 76 | Concat, // ++ 77 | At, // @ 78 | Comma, // , 79 | Dot, // . 80 | Ellipsis, // ... 81 | Question, // ? 82 | Colon, // : 83 | Semicolon, // ; 84 | ParenL, // ( 85 | ParenR, // ) 86 | BracketL, // [ 87 | BracketR, // ] 88 | BraceL, // { 89 | BraceR, // } 90 | KeywordIf, 91 | KeywordThen, 92 | KeywordElse, 93 | KeywordAssert, 94 | KeywordWith, 95 | KeywordLet, 96 | KeywordIn, 97 | KeywordRec, 98 | KeywordInherit, 99 | KeywordOr, 100 | } 101 | 102 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 103 | pub enum StringStyle { 104 | /// A `"`-delimited string. 105 | Normal, 106 | 107 | /// A `''`-delimited string, which ignores indentation and leading and trailing whitespace. 108 | Indent, 109 | } 110 | 111 | //////////////////////////////////////////////////////////////////////////////// 112 | // CharsPos - A &str iterator which tracks line/column positions. 113 | //////////////////////////////////////////////////////////////////////////////// 114 | 115 | /// An iterator wrapping a `std::str::Chars` iterator which also keeps track of the current line 116 | /// and column position. 117 | #[derive(Clone)] 118 | struct CharsPos<'a> { 119 | chars: Chars<'a>, 120 | pos: Pos, 121 | } 122 | 123 | impl<'a> CharsPos<'a> { 124 | fn new(chars: Chars<'a>) -> Self { 125 | CharsPos { chars: chars, pos: Pos { line: 1, column: 1 } } 126 | } 127 | 128 | fn as_str(&self) -> &'a str { 129 | self.chars.as_str() 130 | } 131 | } 132 | 133 | impl<'a> Iterator for CharsPos<'a> { 134 | type Item = char; 135 | 136 | fn next(&mut self) -> Option { 137 | let opt_c = self.chars.next(); 138 | match opt_c { 139 | Some('\n') => { self.pos.line += 1; self.pos.column = 1; } 140 | Some(_) => { self.pos.column += 1; } 141 | None => {} 142 | } 143 | opt_c 144 | } 145 | } 146 | 147 | //////////////////////////////////////////////////////////////////////////////// 148 | // Character classification 149 | //////////////////////////////////////////////////////////////////////////////// 150 | 151 | fn is_whitespace(c: char) -> bool { 152 | match c { 153 | ' ' | '\t' | '\r' | '\n' => true, 154 | _ => false, 155 | } 156 | } 157 | 158 | //////////////////////////////////////////////////////////////////////////////// 159 | // Lexer 160 | //////////////////////////////////////////////////////////////////////////////// 161 | 162 | pub struct Lexer<'ctx, 'src> { 163 | _ctx: &'ctx EvalContext, 164 | chars: CharsPos<'src>, 165 | filename: Symbol, 166 | 167 | /// A record of the levels of nesting the lexer is currently in. The last state is the most 168 | /// deeply nested. An empty stack implies the lexer is at the normal top-level. 169 | state_stack: Vec, 170 | 171 | /// The line/column location of the start of the token currently being scanned. 172 | token_start_pos: Pos, 173 | 174 | /// The position in the source string of the token currently being scanned, as a slice 175 | /// extending to the end of the source string. Once the end of the token is found, this is 176 | /// sliced off at that point to give just the slice of the string containing the token. 177 | token_start_str: &'src str, 178 | } 179 | 180 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 181 | enum LexerState { 182 | /// When the lexer is at the top-level, not nested inside strings. 183 | Normal, 184 | 185 | /// When the lexer is inside a string. 186 | String(StringStyle), 187 | 188 | /// When the lexer is in a `${}` interpolation inside a string. 189 | Interpolation, 190 | } 191 | 192 | impl StringStyle { 193 | fn delimiter(self) -> &'static str { 194 | match self { 195 | StringStyle::Normal => "\"", 196 | StringStyle::Indent => "''", 197 | } 198 | } 199 | } 200 | 201 | impl<'ctx, 'src> Iterator for Lexer<'ctx, 'src> { 202 | type Item = Token<'src>; 203 | 204 | fn next(&mut self) -> Option> { 205 | match self.state() { 206 | LexerState::Normal | LexerState::Interpolation => self.lex_normal(), 207 | LexerState::String(_) => self.lex_string_part(), 208 | } 209 | } 210 | } 211 | 212 | const TOKEN_REGEX_SOURCES: [&str; 7] = [ 213 | // 0 = identifier 214 | r"\A[a-zA-Z_][a-zA-Z0-9_'-]*", 215 | 216 | // 1 = integer 217 | r"\A[0-9]+", 218 | 219 | // 2 = float 220 | r"\A(([1-9][0-9]*\.[0-9]*)|(0?\.[0-9]+))([Ee][+-]?[0-9]+)?", 221 | 222 | // 3 = regular path 223 | r"\A[a-zA-Z0-9._+-]*(/[a-zA-Z0-9._+-]+)+/?", 224 | 225 | // 4 = home path (starts with `~`) 226 | r"\A~(/[a-zA-Z0-9._+-]+)+/?", 227 | 228 | // 5 = special path (surrounded by `<>`) 229 | r"\A<[a-zA-Z0-9._+-]+(/[a-zA-Z0-9._+-]+)*>", 230 | 231 | // 6 = URI 232 | r"\A[a-zA-Z][a-zA-Z0-9+.-]*:[a-zA-Z0-9%/?:@&=+$,_.!~*'-]+", 233 | ]; 234 | 235 | lazy_static! { 236 | static ref TOKEN_REGEX_SET: RegexSet = RegexSet::new(&TOKEN_REGEX_SOURCES).unwrap(); 237 | static ref TOKEN_REGEXES: Vec = TOKEN_REGEX_SOURCES.iter().map(|src| { 238 | Regex::new(src).unwrap() 239 | }).collect(); 240 | } 241 | 242 | impl<'ctx, 'src> Lexer<'ctx, 'src> { 243 | pub fn new(ctx: &'ctx EvalContext, filename: &str, source: &'src str) -> Self { 244 | let chars = CharsPos::new(source.chars()); 245 | Lexer { 246 | _ctx: ctx, 247 | token_start_pos: chars.pos, 248 | token_start_str: source, 249 | chars, 250 | filename: Symbol::new(filename), 251 | state_stack: Vec::new(), 252 | } 253 | } 254 | 255 | fn state(&self) -> LexerState { 256 | *self.state_stack.last().unwrap_or(&LexerState::Normal) 257 | } 258 | 259 | fn lex_normal(&mut self) -> Option> { 260 | debug_assert!(self.state() == LexerState::Normal || 261 | self.state() == LexerState::Interpolation); 262 | self.start_token(); 263 | 264 | macro_rules! simple { 265 | ($kind:ident, $len:expr) => ({ 266 | self.skip($len); 267 | Some(self.finish_token(TokenKind::$kind)) 268 | }) 269 | } 270 | 271 | let mut matches: Vec<(usize, &'src str)> = TOKEN_REGEX_SET 272 | .matches(self.peek_rest()) 273 | .iter() 274 | .map(|match_index| { 275 | let token_regex = &TOKEN_REGEXES[match_index]; 276 | let token_str = token_regex.find(self.peek_rest()).unwrap().as_str(); 277 | (match_index, token_str) 278 | }) 279 | .collect(); 280 | 281 | // Use the longest match. 282 | matches.sort_by_key(|&(_, s)| s.len()); 283 | if let Some(&(match_index, token_str)) = matches.last() { 284 | let token_kind = match match_index { 285 | 0 => ident_or_keyword_from_str(token_str), 286 | 1 => TokenKind::Integer, 287 | 2 => TokenKind::Float, 288 | 3 | 4 | 5 => TokenKind::Path, 289 | 6 => TokenKind::Uri, 290 | _ => unreachable!(), 291 | }; 292 | 293 | self.skip(token_str.len()); 294 | return Some(self.finish_token(token_kind)); 295 | } 296 | 297 | let c1 = match self.peek(0) { 298 | Some(c) => c, 299 | None => { 300 | if self.state() == LexerState::Interpolation { 301 | // TODO(solson): Report unterminated string hitting end of file. 302 | panic!("unterminated string hit end of file (inside Interpolation)"); 303 | } else { 304 | return None; 305 | } 306 | } 307 | }; 308 | let c2 = self.peek(1); 309 | 310 | match (c1, c2) { 311 | (c, _) if is_whitespace(c) => { 312 | self.skip_whitespace(); 313 | self.lex_normal() 314 | } 315 | 316 | ('#', _) => Some(self.lex_line_comment()), 317 | ('/', Some('*')) => Some(self.lex_long_comment()), 318 | 319 | ('/', Some('/')) => simple!(Update, 2), 320 | ('/', _) => simple!(Divide, 1), 321 | 322 | ('*', _) => simple!(Mult, 1), 323 | ('@', _) => simple!(At, 1), 324 | (',', _) => simple!(Comma, 1), 325 | ('?', _) => simple!(Question, 1), 326 | (':', _) => simple!(Colon, 1), 327 | (';', _) => simple!(Semicolon, 1), 328 | ('(', _) => simple!(ParenL, 1), 329 | (')', _) => simple!(ParenR, 1), 330 | ('[', _) => simple!(BracketL, 1), 331 | (']', _) => simple!(BracketR, 1), 332 | ('-', Some('>')) => simple!(Implies, 2), 333 | ('+', Some('+')) => simple!(Concat, 2), 334 | ('<', Some('=')) => simple!(LessEq, 2), 335 | ('>', Some('=')) => simple!(GreaterEq, 2), 336 | ('=', Some('=')) => simple!(Equals, 2), 337 | ('!', Some('=')) => simple!(NotEquals, 2), 338 | ('&', Some('&')) => simple!(And, 2), 339 | ('|', Some('|')) => simple!(Or, 2), 340 | ('-', _) => simple!(Minus, 1), 341 | ('+', _) => simple!(Plus, 1), 342 | ('<', _) => simple!(Less, 1), 343 | ('>', _) => simple!(Greater, 1), 344 | ('=', _) => simple!(Assign, 1), 345 | ('!', _) => simple!(Not, 1), 346 | 347 | ('.', Some('.')) if self.peek(2) == Some('.') => simple!(Ellipsis, 3), 348 | ('.', _) => simple!(Dot, 1), 349 | 350 | // The beginning of a string. 351 | ('"', _) => { 352 | self.state_stack.push(LexerState::String(StringStyle::Normal)); 353 | self.skip(1); 354 | Some(self.finish_token(TokenKind::Quote(StringStyle::Normal))) 355 | } 356 | 357 | // The beginning of an indent string. 358 | ('\'', Some('\'')) => { 359 | self.state_stack.push(LexerState::String(StringStyle::Indent)); 360 | self.skip(2); 361 | Some(self.finish_token(TokenKind::Quote(StringStyle::Indent))) 362 | } 363 | 364 | // If we're lexing inside of a string interpolation, we need to keep track of our depth 365 | // of nested curly braces (because we need to end up back in `String` lexing mode after 366 | // the correct matching '}'). 367 | // 368 | // We simply push and pop on the `state_stack` for this. Since deep nesting is rare, it 369 | // shouldn't be a performance problem. 370 | 371 | ('$', Some('{')) => { 372 | if self.state() == LexerState::Interpolation { 373 | self.state_stack.push(LexerState::Interpolation); 374 | } 375 | simple!(DollarBraceL, 2) 376 | } 377 | 378 | ('{', _) => { 379 | if self.state() == LexerState::Interpolation { 380 | self.state_stack.push(LexerState::Interpolation); 381 | } 382 | simple!(BraceL, 1) 383 | } 384 | 385 | ('}', _) => { 386 | if self.state() == LexerState::Interpolation { 387 | self.state_stack.pop(); 388 | } 389 | simple!(BraceR, 1) 390 | } 391 | 392 | (c, _) => panic!("unhandled char: {}, at: {}", c, self.pos()), 393 | } 394 | } 395 | 396 | fn lex_string_part(&mut self) -> Option> { 397 | let string_style = match self.state() { 398 | LexerState::String(style) => style, 399 | s => panic!("entered lex_string_part in a non-string lexing state: {:?}", s), 400 | }; 401 | self.start_token(); 402 | let delimiter = string_style.delimiter(); 403 | 404 | loop { 405 | // Check if we've hit the end of string. 406 | if self.peek_starts_with(delimiter) { 407 | if string_style == StringStyle::Indent { 408 | match (self.peek(2), self.peek(3)) { 409 | (Some('$'), _) => { self.skip(3); continue; } 410 | (Some('\''), _) => { self.skip(3); continue; } 411 | (Some('\\'), Some(_)) => { self.skip(4); continue; } 412 | (Some('\\'), None) => { 413 | // TODO(solson): Report character escape in string meeting end of file. 414 | panic!("character escape hit end of file"); 415 | } 416 | _ => {} 417 | } 418 | } 419 | 420 | // If we lexed some chars before hitting end of string, we'll emit a `StrPart` 421 | // token before re-entering this function to emit the closing `Quote` token. 422 | if self.token_start_pos != self.pos() { 423 | break; 424 | } 425 | 426 | self.skip(delimiter.len()); 427 | self.state_stack.pop(); // Pop the string state. 428 | return Some(self.finish_token(TokenKind::Quote(string_style))); 429 | } 430 | 431 | let c1 = match self.peek(0) { 432 | Some(c) => c, 433 | None => { 434 | // TODO(solson): Report string meeting end of file. 435 | panic!("unclosed string hit end of file"); 436 | } 437 | }; 438 | let c2 = self.peek(1); 439 | 440 | match (c1, c2) { 441 | ('\\', Some(_)) if string_style == StringStyle::Normal => self.skip(2), 442 | 443 | ('\\', None) if string_style == StringStyle::Normal => { 444 | // TODO(solson): Report character escape (or string?) meeting end of file. 445 | panic!("character escape hit end of file"); 446 | } 447 | 448 | ('\'', Some('\'')) 449 | if string_style == StringStyle::Indent && self.peek(2) == Some('$') => { 450 | self.skip(3); 451 | } 452 | 453 | ('$', Some('{')) => { 454 | if self.token_start_pos == self.pos() { 455 | self.skip(2); // Skip over the '${'. 456 | self.state_stack.push(LexerState::Interpolation); 457 | return Some(self.finish_token(TokenKind::DollarBraceL)); 458 | } else { 459 | break; 460 | } 461 | } 462 | 463 | // Replace literal \r and \r\n character sequences in multiline strings with \n. 464 | ('\r', Some('\n')) => self.skip(2), 465 | ('\r', _) => self.skip(1), 466 | 467 | _ => self.skip(1), 468 | }; 469 | } 470 | 471 | Some(self.finish_token(TokenKind::StrPart(string_style))) 472 | } 473 | 474 | /// Regexp from the Nix lexer: `[ \t\r\n]+` 475 | fn skip_whitespace(&mut self) { 476 | debug_assert!(self.peek(0).map(is_whitespace).unwrap_or(false)); 477 | self.skip_while(is_whitespace); 478 | } 479 | 480 | /// Regexp from the Nix lexer: `#[^\r\n]*` 481 | fn lex_line_comment(&mut self) -> Token<'src> { 482 | debug_assert_eq!(self.peek(0), Some('#')); 483 | self.start_token(); 484 | self.skip_while(|c| c != '\n' && c != '\r'); 485 | self.finish_token(TokenKind::Comment) 486 | } 487 | 488 | /// Regexp from the Nix lexer: `\/\*([^*]|\*[^\/])*\*\/` 489 | fn lex_long_comment(&mut self) -> Token<'src> { 490 | debug_assert!(self.peek_starts_with("/*")); 491 | self.start_token(); 492 | self.skip(2); 493 | 494 | while !self.peek_starts_with("*/") { 495 | if self.peek(0).is_none() { 496 | // TODO(tsion): Report unterminated comment meeting end of file. 497 | panic!("unclosed string hit end of file"); 498 | } 499 | self.skip(1); 500 | } 501 | self.skip(2); 502 | 503 | self.finish_token(TokenKind::Comment) 504 | } 505 | 506 | fn skip_while(&mut self, mut f: F) -> usize where F: FnMut(char) -> bool { 507 | self.chars.take_while_ref(|&c| f(c)).count() 508 | } 509 | 510 | fn skip(&mut self, count: usize) { 511 | for _ in 0..count { 512 | let _c = self.chars.next(); 513 | debug_assert!(_c.is_some()); 514 | } 515 | } 516 | 517 | fn peek_rest(&self) -> &'src str { 518 | self.chars.as_str() 519 | } 520 | 521 | fn peek_starts_with(&self, s: &str) -> bool { 522 | self.peek_rest().starts_with(s) 523 | } 524 | 525 | fn peek(&self, skip: usize) -> Option { 526 | self.chars.clone().skip(skip).next() 527 | } 528 | 529 | fn pos(&self) -> Pos { 530 | self.chars.pos 531 | } 532 | 533 | /// Record the start position of a token, to be paired with `finish_token`. 534 | fn start_token(&mut self) { 535 | self.token_start_pos = self.pos(); 536 | self.token_start_str = self.chars.as_str(); 537 | } 538 | 539 | /// Generate a token starting at the start position from the most recent `start_token` call and 540 | /// ending at the current position. 541 | fn finish_token(&self, kind: TokenKind) -> Token<'src> { 542 | // This slightly convoluted code takes two slices from certain positions in the source to 543 | // the end of the source and figures out the distance between their starting positions by 544 | // subtracting their distances to the end of the source. 545 | let start_len = self.token_start_str.len(); 546 | let end_len = self.chars.as_str().len(); 547 | let len = start_len - end_len; 548 | let source = &self.token_start_str[..len]; 549 | 550 | Token { 551 | kind, 552 | span: Span { 553 | filename: self.filename, 554 | start: self.token_start_pos, 555 | end: self.pos(), 556 | }, 557 | source, 558 | } 559 | } 560 | 561 | // This function is used for lexer tests, so if you change the format of it, you must change 562 | // the format of all the lexer tests (in tests/lexer/*.out). 563 | pub fn debug_string(self) -> String { 564 | let mut out = String::new(); 565 | for Token { kind, span, source } in self { 566 | let Span { start: s, end: e, .. } = span; 567 | write!(out, "[{}:{}-{}:{}] ({:?}) {:?}\n", 568 | s.line, s.column, e.line, e.column, kind, source).unwrap(); 569 | } 570 | out 571 | } 572 | } 573 | 574 | fn ident_or_keyword_from_str(str: &str) -> TokenKind { 575 | match str { 576 | "if" => TokenKind::KeywordIf, 577 | "then" => TokenKind::KeywordThen, 578 | "else" => TokenKind::KeywordElse, 579 | "assert" => TokenKind::KeywordAssert, 580 | "with" => TokenKind::KeywordWith, 581 | "let" => TokenKind::KeywordLet, 582 | "in" => TokenKind::KeywordIn, 583 | "rec" => TokenKind::KeywordRec, 584 | "inherit" => TokenKind::KeywordInherit, 585 | "or" => TokenKind::KeywordOr, 586 | _ => TokenKind::Identifier, 587 | } 588 | } 589 | 590 | pub mod lalrpop { 591 | use super::{Pos, TokenKind}; 592 | 593 | pub struct Lexer<'ctx, 'src>(pub super::Lexer<'ctx, 'src>); 594 | 595 | #[derive(Debug)] 596 | pub enum LexerError {} 597 | 598 | impl<'ctx, 'src> Iterator for Lexer<'ctx, 'src> { 599 | type Item = Result<(Pos, TokenKind, Pos), LexerError>; 600 | 601 | fn next(&mut self) -> Option { 602 | self.0.next().map(|token| { 603 | Ok((token.span.start, token.kind, token.span.end)) 604 | }) 605 | } 606 | } 607 | } 608 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate itertools; 2 | #[macro_use] 3 | extern crate lazy_static; 4 | extern crate regex; 5 | 6 | pub mod ast; 7 | pub mod context; 8 | pub mod lex; 9 | pub mod parse; // Generated by LALRPOP from parse.lalrpop. 10 | pub mod symbol; 11 | -------------------------------------------------------------------------------- /src/parse.lalrpop: -------------------------------------------------------------------------------- 1 | use ast::Expr; 2 | use lex; 3 | use symbol::Symbol; 4 | 5 | grammar; 6 | 7 | pub Expr: String = { 8 | ExprFunction, 9 | }; 10 | 11 | pub ExprFunction: String = { 12 | ":" => 13 | format!("{}: {}", i, e), 14 | 15 | "{" "}" ":" => 16 | format!("{{ {} }}: {}", f, e), 17 | 18 | "{" "}" "@" ":" => 19 | format!("{{ {} }} @ {}: {}", f, i, e), 20 | 21 | "@" "{" "}" ":" => 22 | format!("{} @ {{ {} }}: {}", i, f, e), 23 | 24 | "assert" ";" => 25 | format!("assert {}; {}", e1, e2), 26 | 27 | "with" ";" => 28 | format!("with {}; {}", e1, e2), 29 | 30 | "let" "in" => 31 | format!("let {} in {}", b, e), 32 | 33 | ExprIf, 34 | }; 35 | 36 | pub ExprIf: String = { 37 | "if" "then" "else" => 38 | format!("if {} then {} else {}", e1, e2, e3), 39 | 40 | ExprOp, 41 | }; 42 | 43 | pub ExprOp: String = { 44 | // TODO: copy expr_op from nix's parser.y and figure out how to do precedence in LALRPOP 45 | 46 | ExprApp, 47 | }; 48 | 49 | pub ExprApp: String = { 50 | => 51 | format!("({}) {}", e1, e2), 52 | 53 | ExprSelect, 54 | }; 55 | 56 | pub ExprSelect: String = { 57 | // "." => 58 | // format!("({}).{}", e, a), 59 | 60 | // "." "or" => 61 | // format!("({}).{} or {}", e1, a, e2), 62 | 63 | // Quoting Nix's parser.y: "Backwards compatability: because Nixpkgs has rarely used function 64 | // named `or`, allow stuff like `map or [..]`." 65 | // 66 | // Thus, we treat this case like a function application expression passing a variable named 67 | // "or" (which is otherwise usually treated as a keyword). 68 | "or" => 69 | format!("({}) or", e), 70 | 71 | ExprSimple, 72 | }; 73 | 74 | pub ExprSimple: String = { 75 | => { 76 | if i == "__curPos" { 77 | // TODO: make a special Expr::Position node, like in Nix? 78 | format!("__curPos") 79 | } else { 80 | format!("{}", i) 81 | } 82 | }, 83 | 84 | => format!("{}", n), 85 | => format!("{}", f), 86 | 87 | // "\"" "\"", 88 | 89 | // "''" "''" => unimplemented!("indent strings"), 90 | 91 | // TODO: the rest of expr_simple from Nix's parser.y (paths) 92 | 93 | => format!("{}", u), 94 | 95 | "(" ")", 96 | 97 | "let" "{" "}" => 98 | format!("let {{ {} }}", b), 99 | 100 | "rec" "{" "}" => 101 | format!("rec {{ {} }}", b), 102 | 103 | "{" "}" => 104 | format!("{{ {} }}", b), 105 | 106 | "[" "]", 107 | }; 108 | 109 | pub Binds: String = { 110 | "=" ";" => 111 | format!("{} {} = {};", b, a, e), 112 | 113 | "inherit" ";" => 114 | format!("{} inherit {};", b, a), 115 | 116 | "inherit" "(" ")" ";" => 117 | format!("{} inherit ({}) {};", b, e, a), 118 | 119 | => format!(""), 120 | }; 121 | 122 | pub Attrs: String = { 123 | => format!("{} {}", as_, a), 124 | => format!("{} {}", as_, a), 125 | => format!(""), 126 | }; 127 | 128 | pub AttrPath: String = { 129 | "." => 130 | format!("{}.{}", p, a), 131 | 132 | "." => 133 | format!("{}.{}", p, a), 134 | 135 | => 136 | format!("{}", a), 137 | 138 | => 139 | format!("{}", a), 140 | }; 141 | 142 | pub Attr: String = { 143 | => format!("{}", i), 144 | "or" => format!("or"), 145 | }; 146 | 147 | pub StringAttr: String = { 148 | // "\"" "\"", 149 | "${" "}", 150 | }; 151 | 152 | pub ExprList: String = { 153 | => format!("{} {}", l, e), 154 | => format!(""), 155 | }; 156 | 157 | pub Formals: String = { 158 | "," => format!("{}, {}", f, fs), 159 | => format!("{}", f), 160 | => format!(""), 161 | "..." => format!("..."), 162 | }; 163 | 164 | pub Formal: String = { 165 | => format!("{}", i), 166 | "?" => format!("{} ? {}", i, e), 167 | }; 168 | 169 | match { 170 | r"[a-zA-z_][a-zA-Z0-9_'-]*" => ID, 171 | r"[0-9]+" => INT, 172 | r"(([1-9][0-9]*\.[0-9]*)|(0?\.[0-9]+))([Ee][+-]?[0-9]+)?" => FLOAT, 173 | r"[a-zA-Z0-9._+-]*(/[a-zA-Z0-9._+-]+)+/?" => PATH, 174 | r"~(/[a-zA-Z0-9._+-]+)+/?" => HPATH, 175 | r"<[a-zA-Z0-9._+-]+(/[a-zA-Z0-9._+-]+)*>" => SPATH, 176 | r"[a-zA-Z][a-zA-Z0-9+.-]*:[a-zA-Z0-9%/?:@&=+$,_.!~*'-]+" => URI, 177 | 178 | "assert", 179 | "with", 180 | "let", 181 | "rec", 182 | "in", 183 | "if", 184 | "then", 185 | "else", 186 | "or", 187 | "inherit", 188 | 189 | ":", 190 | ";", 191 | "{", 192 | "}", 193 | "(", 194 | ")", 195 | "[", 196 | "]", 197 | ",", 198 | "...", 199 | "?", 200 | "@", 201 | "=", 202 | ".", 203 | "${", 204 | } 205 | -------------------------------------------------------------------------------- /src/symbol.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::fmt; 3 | use std::sync::Mutex; 4 | 5 | #[derive(Copy, Clone, Eq, PartialEq)] 6 | pub struct Symbol { 7 | index: u32, 8 | } 9 | 10 | #[derive(Default)] 11 | struct SymbolTable { 12 | str_to_sym: HashMap, Symbol>, 13 | sym_to_str: Vec<*const str>, 14 | } 15 | 16 | // SAFE: It's not auto-implemented because of the raw `*const str` pointers, but our use of those 17 | // can't cause any issues. 18 | unsafe impl Send for SymbolTable {} 19 | 20 | lazy_static! { 21 | static ref SYMBOL_TABLE: Mutex = Mutex::default(); 22 | } 23 | 24 | impl Symbol { 25 | pub fn new + Into>(string: S) -> Symbol { 26 | let mut table = SYMBOL_TABLE.lock().unwrap(); 27 | 28 | if let Some(&sym) = table.str_to_sym.get(string.as_ref()) { 29 | return sym; 30 | } 31 | 32 | let str_box = string.into().into_boxed_str(); 33 | let new_sym = Symbol { index: table.sym_to_str.len() as u32 }; 34 | table.sym_to_str.push(&str_box[..]); 35 | table.str_to_sym.insert(str_box, new_sym); 36 | new_sym 37 | } 38 | 39 | pub fn as_str(self) -> &'static str { 40 | let table = SYMBOL_TABLE.lock().unwrap(); 41 | 42 | // SAFE: We ensure all *const str in the Vec point into strings kept alive by the HashMap 43 | // and we never remove from the HashMap. The HashMap itself lives in static memory and the 44 | // strings' heap allocations never move so the lifetime of the contained strings may also 45 | // be 'static. 46 | unsafe { 47 | &*table.sym_to_str[self.index as usize] 48 | } 49 | } 50 | } 51 | 52 | impl fmt::Debug for Symbol { 53 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 54 | write!(f, "{:?}", self.as_str()) 55 | } 56 | } 57 | 58 | impl fmt::Display for Symbol { 59 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 60 | write!(f, "{}", self.as_str()) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /tests/lexer.rs: -------------------------------------------------------------------------------- 1 | extern crate glob; 2 | extern crate nixrs; 3 | #[macro_use] 4 | extern crate pretty_assertions; 5 | 6 | use nixrs::context::EvalContext; 7 | use nixrs::lex::Lexer; 8 | use std::io::{self, Read}; 9 | use std::fmt; 10 | use std::fs::File; 11 | use std::path::Path; 12 | 13 | #[derive(Eq, Ord, PartialEq, PartialOrd)] 14 | struct DebugAsDisplay(T); 15 | 16 | impl fmt::Debug for DebugAsDisplay { 17 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 18 | fmt::Display::fmt(&self.0, f) 19 | } 20 | } 21 | 22 | #[test] 23 | fn lexer_tests() { 24 | for glob_result in glob::glob("tests/lexer/*.in").expect("Couldn't parse glob") { 25 | let input_file = glob_result.expect("Couldn't access path in glob"); 26 | let output_file = input_file.with_extension("out"); 27 | 28 | let source = read_file(&input_file).expect("Couldn't read input file"); 29 | let expected = read_file(&output_file).expect("Couldn't read expected output file"); 30 | 31 | let ctx = EvalContext::new(); 32 | let lexer = Lexer::new(&ctx, input_file.to_str().unwrap(), &source); 33 | let output = lexer.debug_string(); 34 | 35 | println!("testing {}", input_file.display()); 36 | assert_eq!(DebugAsDisplay(output), DebugAsDisplay(expected)); 37 | } 38 | } 39 | 40 | fn read_file(input_file: &Path) -> io::Result { 41 | let mut source = String::new(); 42 | File::open(input_file)?.read_to_string(&mut source)?; 43 | Ok(source) 44 | } 45 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-1.in: -------------------------------------------------------------------------------- 1 | "" 2 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-1.out: -------------------------------------------------------------------------------- 1 | [1:1-1:2] (Quote(Normal)) "\"" 2 | [1:2-1:3] (Quote(Normal)) "\"" 3 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-2.in: -------------------------------------------------------------------------------- 1 | " " 2 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-2.out: -------------------------------------------------------------------------------- 1 | [1:1-1:2] (Quote(Normal)) "\"" 2 | [1:2-1:3] (StrPart(Normal)) " " 3 | [1:3-1:4] (Quote(Normal)) "\"" 4 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-3.in: -------------------------------------------------------------------------------- 1 | "\a\b\c\\\"\n\r\t\${}" 2 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-3.out: -------------------------------------------------------------------------------- 1 | [1:1-1:2] (Quote(Normal)) "\"" 2 | [1:2-1:22] (StrPart(Normal)) "\\a\\b\\c\\\\\\\"\\n\\r\\t\\${}" 3 | [1:22-1:23] (Quote(Normal)) "\"" 4 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-4.in: -------------------------------------------------------------------------------- 1 | "foobar" 2 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-4.out: -------------------------------------------------------------------------------- 1 | [1:1-1:2] (Quote(Normal)) "\"" 2 | [1:2-1:8] (StrPart(Normal)) "foobar" 3 | [1:8-1:9] (Quote(Normal)) "\"" 4 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-interpolation-1.in: -------------------------------------------------------------------------------- 1 | "${}" 2 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-interpolation-1.out: -------------------------------------------------------------------------------- 1 | [1:1-1:2] (Quote(Normal)) "\"" 2 | [1:2-1:4] (DollarBraceL) "${" 3 | [1:4-1:5] (BraceR) "}" 4 | [1:5-1:6] (Quote(Normal)) "\"" 5 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-interpolation-2.in: -------------------------------------------------------------------------------- 1 | "foo${}" 2 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-interpolation-2.out: -------------------------------------------------------------------------------- 1 | [1:1-1:2] (Quote(Normal)) "\"" 2 | [1:2-1:5] (StrPart(Normal)) "foo" 3 | [1:5-1:7] (DollarBraceL) "${" 4 | [1:7-1:8] (BraceR) "}" 5 | [1:8-1:9] (Quote(Normal)) "\"" 6 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-interpolation-3.in: -------------------------------------------------------------------------------- 1 | "${}bar" 2 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-interpolation-3.out: -------------------------------------------------------------------------------- 1 | [1:1-1:2] (Quote(Normal)) "\"" 2 | [1:2-1:4] (DollarBraceL) "${" 3 | [1:4-1:5] (BraceR) "}" 4 | [1:5-1:8] (StrPart(Normal)) "bar" 5 | [1:8-1:9] (Quote(Normal)) "\"" 6 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-interpolation-4.in: -------------------------------------------------------------------------------- 1 | "foo${}bar" 2 | -------------------------------------------------------------------------------- /tests/lexer/double-quotes-interpolation-4.out: -------------------------------------------------------------------------------- 1 | [1:1-1:2] (Quote(Normal)) "\"" 2 | [1:2-1:5] (StrPart(Normal)) "foo" 3 | [1:5-1:7] (DollarBraceL) "${" 4 | [1:7-1:8] (BraceR) "}" 5 | [1:8-1:11] (StrPart(Normal)) "bar" 6 | [1:11-1:12] (Quote(Normal)) "\"" 7 | -------------------------------------------------------------------------------- /tests/lexer/identifiers-1.in: -------------------------------------------------------------------------------- 1 | a 2 | -------------------------------------------------------------------------------- /tests/lexer/identifiers-1.out: -------------------------------------------------------------------------------- 1 | [1:1-1:2] (Identifier) "a" 2 | -------------------------------------------------------------------------------- /tests/lexer/identifiers-2.in: -------------------------------------------------------------------------------- 1 | a a b 2 | -------------------------------------------------------------------------------- /tests/lexer/identifiers-2.out: -------------------------------------------------------------------------------- 1 | [1:1-1:2] (Identifier) "a" 2 | [1:3-1:4] (Identifier) "a" 3 | [1:5-1:6] (Identifier) "b" 4 | -------------------------------------------------------------------------------- /tests/lexer/identifiers-3.in: -------------------------------------------------------------------------------- 1 | foobar 2 | -------------------------------------------------------------------------------- /tests/lexer/identifiers-3.out: -------------------------------------------------------------------------------- 1 | [1:1-1:7] (Identifier) "foobar" 2 | -------------------------------------------------------------------------------- /tests/lexer/integers-1.in: -------------------------------------------------------------------------------- 1 | 0 2 | -------------------------------------------------------------------------------- /tests/lexer/integers-1.out: -------------------------------------------------------------------------------- 1 | [1:1-1:2] (Integer) "0" 2 | -------------------------------------------------------------------------------- /tests/lexer/integers-2.in: -------------------------------------------------------------------------------- 1 | 42 2 | -------------------------------------------------------------------------------- /tests/lexer/integers-2.out: -------------------------------------------------------------------------------- 1 | [1:1-1:3] (Integer) "42" 2 | -------------------------------------------------------------------------------- /tests/lexer/integers-3.in: -------------------------------------------------------------------------------- 1 | 1 2 2 | -------------------------------------------------------------------------------- /tests/lexer/integers-3.out: -------------------------------------------------------------------------------- 1 | [1:1-1:2] (Integer) "1" 2 | [1:3-1:4] (Integer) "2" 3 | -------------------------------------------------------------------------------- /tests/lexer/ints-vs-idents-1.in: -------------------------------------------------------------------------------- 1 | a1 2 | -------------------------------------------------------------------------------- /tests/lexer/ints-vs-idents-1.out: -------------------------------------------------------------------------------- 1 | [1:1-1:3] (Identifier) "a1" 2 | -------------------------------------------------------------------------------- /tests/lexer/ints-vs-idents-2.in: -------------------------------------------------------------------------------- 1 | 1a 2 | -------------------------------------------------------------------------------- /tests/lexer/ints-vs-idents-2.out: -------------------------------------------------------------------------------- 1 | [1:1-1:2] (Integer) "1" 2 | [1:2-1:3] (Identifier) "a" 3 | -------------------------------------------------------------------------------- /tests/lexer/whitespace-and-comments-1.in: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /tests/lexer/whitespace-and-comments-1.out: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/solson/nixrs/f512066214669fce1f43eb5b618d843739374249/tests/lexer/whitespace-and-comments-1.out -------------------------------------------------------------------------------- /tests/lexer/whitespace-and-comments-2.in: -------------------------------------------------------------------------------- 1 | /* just a comment */ 2 | -------------------------------------------------------------------------------- /tests/lexer/whitespace-and-comments-2.out: -------------------------------------------------------------------------------- 1 | [1:1-1:21] (Comment) "/* just a comment */" 2 | -------------------------------------------------------------------------------- /tests/lexer/whitespace-and-comments-3.in: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | # comments and 5 | /* whitespace 6 | don't /* matter */ 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /tests/lexer/whitespace-and-comments-3.out: -------------------------------------------------------------------------------- 1 | [4:2-4:16] (Comment) "# comments and" 2 | [5:2-6:20] (Comment) "/* whitespace\n don\'t /* matter */" 3 | -------------------------------------------------------------------------------- /tests/lexer/whitespace-and-comments-4.in: -------------------------------------------------------------------------------- 1 | # line comment 2 | 1 ### lex this please 3 | 13 4 | 42 5 | # end of file 6 | -------------------------------------------------------------------------------- /tests/lexer/whitespace-and-comments-4.out: -------------------------------------------------------------------------------- 1 | [1:1-1:15] (Comment) "# line comment" 2 | [2:3-2:4] (Integer) "1" 3 | [2:5-2:24] (Comment) "### lex this please" 4 | [3:1-3:3] (Integer) "13" 5 | [4:2-4:4] (Integer) "42" 6 | [5:1-5:14] (Comment) "# end of file" 7 | --------------------------------------------------------------------------------