From d31d326526906b1a7c7eab9acb8477fd8d3f1621 Mon Sep 17 00:00:00 2001
From: MarvelousAnything <marvelousanything@gmail.com>
Date: Sat, 12 Nov 2022 23:29:10 -0500
Subject: [PATCH] Cargo fmt
---
examples/lex.rs | 6 +-
src/ast/mod.rs | 10 ++--
src/lex/lexer.rs | 141 +++++++++++++++++++++++++++++++++-------------
src/lex/mod.rs | 4 +-
src/lex/token.rs | 17 ++++--
src/lex/types.rs | 2 +-
src/lib.rs | 3 +-
src/parser/mod.rs | 2 +-
8 files changed, 130 insertions(+), 55 deletions(-)
diff --git a/examples/lex.rs b/examples/lex.rs
index 51f416e..50fc4aa 100644
--- a/examples/lex.rs
+++ b/examples/lex.rs
@@ -1,6 +1,6 @@
-use std::fs;
-use log::{error, info};
use desolation::lex::Lexer;
+use log::{error, info};
+use std::fs;
fn main() {
sensible_env_logger::init!();
@@ -24,4 +24,4 @@ fn main() {
error!("Failed to lex: {}", e);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/ast/mod.rs b/src/ast/mod.rs
index e17b2c0..9d1821f 100644
--- a/src/ast/mod.rs
+++ b/src/ast/mod.rs
@@ -1,21 +1,21 @@
// mod node;
-//
+//
// pub enum Program {
// VarList(Vec<Var>),
// FunList(Vec<Fun>),
// }
-//
+//
// pub struct ID {
// pub name: String,
// }
-//
+//
// pub struct Var {
// pub name: String,
// pub value: Option<Expr>,
// }
-//
+//
// pub struct Fun {
// pub name: String,
// pub params: Vec<ID>,
// pub body: Vec<Stmt>,
-// }
\ No newline at end of file
+// }
diff --git a/src/lex/lexer.rs b/src/lex/lexer.rs
index 22cd3d0..9f1c01e 100644
--- a/src/lex/lexer.rs
+++ b/src/lex/lexer.rs
@@ -1,9 +1,9 @@
-use std::fmt::Display;
use crate::lex::token::{Token, TokenType};
+use crate::lex::types::{KeywordToken, LiteralToken};
use anyhow::{bail, ensure, Result};
-use log::{debug};
+use log::debug;
+use std::fmt::Display;
use thiserror::Error;
-use crate::lex::types::{KeywordToken, LiteralToken};
#[derive(Debug, Clone)]
pub struct Lexer {
@@ -53,38 +53,48 @@ impl Display for TokenStream {
impl TokenStream {
pub fn get_keywords(&self) -> Self {
- self.tokens.iter()
- .filter(|t| t.is_keyword()).cloned()
+ self.tokens
+ .iter()
+ .filter(|t| t.is_keyword())
+ .cloned()
.collect::<TokenStream>()
}
pub fn get_identifiers(&self) -> Self {
- self.tokens.iter()
- .filter(|t| t.is_identifier()).cloned()
+ self.tokens
+ .iter()
+ .filter(|t| t.is_identifier())
+ .cloned()
.collect::<TokenStream>()
}
pub fn get_integer_literals(&self) -> Self {
- self.tokens.iter()
- .filter(|t| t.is_integer_literal()).cloned()
+ self.tokens
+ .iter()
+ .filter(|t| t.is_integer_literal())
+ .cloned()
.collect::<TokenStream>()
}
pub fn get_string_literals(&self) -> Self {
- self.tokens.iter()
- .filter(|t| t.is_string_literal()).cloned()
+ self.tokens
+ .iter()
+ .filter(|t| t.is_string_literal())
+ .cloned()
.collect::<TokenStream>()
}
pub fn get_character_literals(&self) -> Self {
- self.tokens.iter()
- .filter(|t| t.is_character_literal()).cloned()
+ self.tokens
+ .iter()
+ .filter(|t| t.is_character_literal())
+ .cloned()
.collect::<TokenStream>()
}
}
impl FromIterator<Token> for TokenStream {
- fn from_iter<T: IntoIterator<Item=Token>>(iter: T) -> Self {
+ fn from_iter<T: IntoIterator<Item = Token>>(iter: T) -> Self {
let tokens = iter.into_iter().collect();
TokenStream { tokens }
}
@@ -120,21 +130,25 @@ impl Lexer {
// Post processing.
// fold consecutive NL tokens into one.
// find a NL token, then delete all the NL tokens until the next non-NL token.
- let indices = tokens.iter()
+ let indices = tokens
+ .iter()
.enumerate()
.filter(|(_, t)| t.token_type() == TokenType::NL)
.map(|(i, _)| i)
.collect::<Vec<usize>>();
- let indices_folded = indices.iter().fold((Vec::new(), Vec::new()), |mut acc, i| {
- if let Some(last) = acc.0.last() {
- if i - last == 1 {
- acc.1.push(*last);
- acc.0.pop();
+ let indices_folded = indices
+ .iter()
+ .fold((Vec::new(), Vec::new()), |mut acc, i| {
+ if let Some(last) = acc.0.last() {
+ if i - last == 1 {
+ acc.1.push(*last);
+ acc.0.pop();
+ }
}
- }
- acc.0.push(*i);
- acc
- }).1;
+ acc.0.push(*i);
+ acc
+ })
+ .1;
for i in indices_folded.iter().rev() {
tokens.remove(*i);
@@ -159,9 +173,18 @@ impl Lexer {
}
fn get_curr(&self) -> Result<char> {
- ensure!(self.has_next(), LexerError::InvalidEOF(self.line_no, self.col_no));
+ ensure!(
+ self.has_next(),
+ LexerError::InvalidEOF(self.line_no, self.col_no)
+ );
let c = self.source.chars().nth(self.index).unwrap();
- trace!("Got current character: {:?} at {}:{}[{}]", c, self.line_no, self.col_no, self.index);
+ trace!(
+ "Got current character: {:?} at {}:{}[{}]",
+ c,
+ self.line_no,
+ self.col_no,
+ self.index
+ );
Ok(c)
}
@@ -264,13 +287,33 @@ impl Lexer {
let start = (self.index, self.line_no, self.col_no);
let token = match self.curr_char {
n if self.is_whitespace() => {
- trace!("Found whitespace {:?} at {}:{}[{}]", n, self.line_no, self.col_no, self.index);
+ trace!(
+ "Found whitespace {:?} at {}:{}[{}]",
+ n,
+ self.line_no,
+ self.col_no,
+ self.index
+ );
self.skip_whitespace()?;
- debug!("Skipped {} whitespace from {}:{}[{}] to {}:{}[{}]", self.index - start.0, start.1, start.2, start.0, self.line_no, self.col_no, self.index);
+ debug!(
+ "Skipped {} whitespace from {}:{}[{}] to {}:{}[{}]",
+ self.index - start.0,
+ start.1,
+ start.2,
+ start.0,
+ self.line_no,
+ self.col_no,
+ self.index
+ );
return self.get_next_token();
}
'\n' => {
- trace!("Found newline at {}:{}[{}]", self.line_no, self.col_no, self.index);
+ trace!(
+ "Found newline at {}:{}[{}]",
+ self.line_no,
+ self.col_no,
+ self.index
+ );
// fold newlines into a single token
self.col_no = 0;
self.line_no += 1;
@@ -282,38 +325,60 @@ impl Lexer {
n if n.is_alphabetic() => {
let identifier = self.collect_identifier()?;
if let Some(keyword) = KeywordToken::from_str(&identifier) {
- debug!("Found keyword {:?} at {}:{}[{}]", keyword, self.line_no, self.col_no, self.index);
+ debug!(
+ "Found keyword {:?} at {}:{}[{}]",
+ keyword, self.line_no, self.col_no, self.index
+ );
TokenType::Keyword(keyword)
} else {
- debug!("Found identifier {:?} at {}:{}[{}]", identifier, self.line_no, self.col_no, self.index);
+ debug!(
+ "Found identifier {:?} at {}:{}[{}]",
+ identifier, self.line_no, self.col_no, self.index
+ );
TokenType::IdentifierToken(identifier)
}
}
n if n.is_numeric() => {
let integer = self.collect_integer()?;
- debug!("Collected integer: {} at {}:{}[{}] to {}:{}[{}]", integer, start.1, start.2, start.0, self.line_no, self.col_no, self.index);
+ debug!(
+ "Collected integer: {} at {}:{}[{}] to {}:{}[{}]",
+ integer, start.1, start.2, start.0, self.line_no, self.col_no, self.index
+ );
TokenType::Literal(LiteralToken::IntegerLiteral(integer))
}
'"' => {
self.advance()?;
let string = self.collect_string()?;
- debug!("Collected string: \"{}\" at {}:{}[{}] to {}:{}[{}]", string, start.1, start.2, start.0, self.line_no, self.col_no, self.index);
+ debug!(
+ "Collected string: \"{}\" at {}:{}[{}] to {}:{}[{}]",
+ string, start.1, start.2, start.0, self.line_no, self.col_no, self.index
+ );
TokenType::Literal(LiteralToken::StringLiteral(string))
}
'\'' => {
self.advance()?;
let character = self.consume()?;
- ensure!(self.consume()? == '\'', LexerError::InvalidCharacterLiteral(self.line_no, self.col_no));
- debug!("Collected character literal: {:?} at {}:{}[{}] to {}:{}[{}]", character, start.1, start.2, start.0, self.line_no, self.col_no, self.index);
+ ensure!(
+ self.consume()? == '\'',
+ LexerError::InvalidCharacterLiteral(self.line_no, self.col_no)
+ );
+ debug!(
+ "Collected character literal: {:?} at {}:{}[{}] to {}:{}[{}]",
+ character, start.1, start.2, start.0, self.line_no, self.col_no, self.index
+ );
TokenType::Literal(LiteralToken::CharacterLiteral(character))
}
'#' => {
- debug!("Found comment at {}:{}[{}]", self.line_no, self.col_no, self.index);
+ debug!(
+ "Found comment at {}:{}[{}]",
+ self.line_no, self.col_no, self.index
+ );
self.advance_eol()?;
return self.get_next_token();
}
- _ => TokenType::from_char(self.consume()?)
- }.at(start.0, start.1, start.2);
+ _ => TokenType::from_char(self.consume()?),
+ }
+ .at(start.0, start.1, start.2);
Ok(token)
}
}
diff --git a/src/lex/mod.rs b/src/lex/mod.rs
index 0707644..2348ab7 100644
--- a/src/lex/mod.rs
+++ b/src/lex/mod.rs
@@ -1,6 +1,6 @@
pub(crate) mod consts;
-pub(crate) mod types;
pub(crate) mod lexer;
pub(crate) mod token;
+pub(crate) mod types;
-pub use lexer::Lexer;
\ No newline at end of file
+pub use lexer::Lexer;
diff --git a/src/lex/token.rs b/src/lex/token.rs
index feff4a7..f85b886 100644
--- a/src/lex/token.rs
+++ b/src/lex/token.rs
@@ -8,7 +8,7 @@ pub enum TokenType {
Literal(LiteralToken),
Unknown(char),
Eof,
- NL
+ NL,
}
impl TokenType {
@@ -50,15 +50,24 @@ impl Token {
}
pub fn is_integer_literal(&self) -> bool {
- matches!(self.token_type, TokenType::Literal(LiteralToken::IntegerLiteral(_)))
+ matches!(
+ self.token_type,
+ TokenType::Literal(LiteralToken::IntegerLiteral(_))
+ )
}
pub fn is_string_literal(&self) -> bool {
- matches!(self.token_type, TokenType::Literal(LiteralToken::StringLiteral(_)))
+ matches!(
+ self.token_type,
+ TokenType::Literal(LiteralToken::StringLiteral(_))
+ )
}
pub fn is_character_literal(&self) -> bool {
- matches!(self.token_type, TokenType::Literal(LiteralToken::CharacterLiteral(_)))
+ matches!(
+ self.token_type,
+ TokenType::Literal(LiteralToken::CharacterLiteral(_))
+ )
}
pub fn index(&self) -> usize {
diff --git a/src/lex/types.rs b/src/lex/types.rs
index f9f322f..3b8f536 100644
--- a/src/lex/types.rs
+++ b/src/lex/types.rs
@@ -84,4 +84,4 @@ pub enum LiteralToken {
CharacterLiteral(char),
IntegerLiteral(i64),
StringLiteral(String),
-}
\ No newline at end of file
+}
diff --git a/src/lib.rs b/src/lib.rs
index f0bf18c..7ee6ad2 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -3,4 +3,5 @@ pub mod lex;
mod parser;
extern crate pretty_env_logger;
-#[macro_use] extern crate log;
\ No newline at end of file
+#[macro_use]
+extern crate log;
diff --git a/src/parser/mod.rs b/src/parser/mod.rs
index 44d0bbf..ae08832 100644
--- a/src/parser/mod.rs
+++ b/src/parser/mod.rs
@@ -9,4 +9,4 @@ impl Parser {
pub fn parse(tokens: TokenStream) {
todo!("Implement parser");
}
-}
\ No newline at end of file
+}
--
GitLab