diff --git a/examples/rule110.zr b/examples/rule110.zr index b697ab3..d34058e 100644 --- a/examples/rule110.zr +++ b/examples/rule110.zr @@ -18,9 +18,9 @@ func to_str[state: Array]: String let out: String = malloc(Array.size(state)) for i in 0..Array.size(state) if Array.nth(state, i) - String.set(out, i, String.nth("#", 0)) + String.set(out, i, '#') else - String.set(out, i, String.nth(" ", 0)) + String.set(out, i, ' ') return out func main[] : I64 diff --git a/src/codegen_x86_64.rs b/src/codegen_x86_64.rs index b0625d3..31e1018 100644 --- a/src/codegen_x86_64.rs +++ b/src/codegen_x86_64.rs @@ -511,6 +511,13 @@ Array.free: TokenType::Number => { emit!(&mut self.output, " mov rax, {}", token.lexeme); } + TokenType::Char => { + emit!( + &mut self.output, + " mov rax, {}", + token.lexeme.chars().nth(1).unwrap() as u8 + ); + } TokenType::String => { // TODO: actual string parsing in the tokenizer let value = &token.lexeme[1..token.lexeme.len() - 1] diff --git a/src/parser.rs b/src/parser.rs index 9f2e69a..a73b414 100644 --- a/src/parser.rs +++ b/src/parser.rs @@ -427,6 +427,7 @@ impl Parser { fn primary(&mut self) -> Result { if self.match_token(&[ TokenType::Number, + TokenType::Char, TokenType::String, TokenType::True, TokenType::False, diff --git a/src/std.zr b/src/std.zr index 557192e..c1ee3bf 100644 --- a/src/std.zr +++ b/src/std.zr @@ -12,7 +12,7 @@ func print_i64[x: I64] : I64 return 0 func String.is_whitespace[c: U8] : Bool - return c == 10 || c == 32 || c == 13 || c == 9 + return c == ' ' || c == 10 || c == 13 || c == 9 func String.concat[a: String, b: String] : String let c: String = malloc(strlen(a) + strlen(b) + 1) @@ -83,7 +83,7 @@ func IO.write_file[path: String, content: String] : I64 return 0 func U8.parse_i64[c: U8]: I64 - return c - 48 + return c - '0' func I64.to_string[n: I64] : String let x: String = malloc(21) @@ -183,17 +183,17 @@ func Crypto.hex_encode[s: String] : String return out func Crypto.from_hex_digit[d: U8] : I64 - if d == 97 + if d == 'a' return 10 - if d == 98 + if d == 'b' return 11 - if d == 99 + if d == 'c' return 12 - if d == 100 + if d == 'd' return 13 - if d == 101 + if d == 'e' return 14 - if d == 102 + if d == 'f' return 15 return U8.parse_i64(d) @@ -268,12 +268,11 @@ func Crypto.base64_encode[s: String] : String j = j + 4 let padding: I64 = s_len % 3 - let equals: U8 = String.nth("=", 0) if padding == 1 - String.set(output, j-2, equals) - String.set(output, j-1, equals) + String.set(output, j-2, '=') + String.set(output, j-1, '=') else if padding == 2 - String.set(output, j-1, equals) + String.set(output, j-1, '=') String.set(output, j, 0) return output @@ -285,7 +284,7 @@ func Crypto.base64_decode[s: String] : String let i: I64 = 0 let j: I64 = 0 - while String.nth(s, s_len-1) == 61 + while String.nth(s, s_len-1) == '=' s_len = s_len - 1 while i < s_len diff --git a/src/tokenizer.rs b/src/tokenizer.rs index 9ebefd2..87792f6 100644 --- a/src/tokenizer.rs +++ b/src/tokenizer.rs @@ -30,6 +30,7 @@ pub enum TokenType { Identifier, String, + Char, Number, True, False, @@ -208,6 +209,14 @@ impl Tokenizer { self.add_token(TokenType::Less) } } + // TODO: escape sequences + '\'' => { + self.advance(); + if !self.match_char('\'') { + return error!(self.loc, "expected ' after char literal"); + } + self.add_token(TokenType::Char); + } '"' => { while !self.eof() && self.peek() != '"' { if self.peek() == '\n' {