An Erlang lexer and syntax highlighter in Gleam

Lex characters

+180 -2
+178
src/pearl.gleam
··· 19 19 until_end_of_line: Splitter, 20 20 string: Splitter, 21 21 quoted_atom: Splitter, 22 + brace_escape_sequence: Splitter, 22 23 ) 23 24 } 24 25 ··· 30 31 NumericSeparatorNotAllowed 31 32 ExpectedExponent 32 33 NumbersCannotEndAfterRadix 34 + UnterminatedCharacter 35 + UnterminatedEscapeSequence 33 36 } 34 37 35 38 pub fn new(source: String) -> Lexer { ··· 47 50 until_end_of_line: splitter.new(["\n", "\r"]), 48 51 string: splitter.new(["\"", "\\"]), 49 52 quoted_atom: splitter.new(["'", "\\"]), 53 + brace_escape_sequence: splitter.new(["}", "\n", "\r"]), 50 54 ) 51 55 } 52 56 ··· 209 213 "\"" <> source -> lex_string(advance(lexer, source), "") 210 214 "'" <> source -> lex_quoted_atom(advance(lexer, source), "") 211 215 216 + "$" <> source -> lex_character(advance(lexer, source)) 217 + 212 218 _ -> 213 219 case string.pop_grapheme(lexer.source) { 214 220 Error(_) -> #(lexer, token.EndOfFile) ··· 217 223 token.Unknown(char), 218 224 ) 219 225 } 226 + } 227 + } 228 + 229 + fn lex_character(lexer: Lexer) -> #(Lexer, Token) { 230 + case lexer.source { 231 + "\\" <> source -> { 232 + let #(lexer, escape_sequence) = 233 + lex_escape_sequence(advance(lexer, source)) 234 + #(lexer, token.Character("\\" <> escape_sequence)) 235 + } 236 + _ -> 237 + case string.pop_grapheme(lexer.source) { 238 + Ok(#(source, char)) -> #(advance(lexer, source), token.Character(char)) 239 + Error(_) -> #(error(lexer, UnterminatedCharacter), token.Character("")) 240 + } 241 + } 242 + } 243 + 244 + fn lex_escape_sequence(lexer: Lexer) -> #(Lexer, String) { 245 + case lexer.source { 246 + "^a" as sequence <> source 247 + | "^b" as sequence <> source 248 + | "^c" as sequence <> source 249 + | "^d" as sequence <> source 250 + | "^e" as sequence <> source 251 + | "^f" as sequence <> source 252 + | "^g" as sequence <> source 253 + | "^h" as sequence <> source 254 + | "^i" as sequence <> source 255 + | "^j" as sequence <> source 256 + | "^k" as sequence <> source 257 + | "^l" as sequence <> source 258 + | "^m" as sequence <> source 259 + | "^n" as sequence <> source 260 + | "^o" as sequence <> source 261 + | "^p" as sequence <> source 262 + | "^q" as sequence <> source 263 + | "^r" as sequence <> source 264 + | "^s" as sequence <> source 265 + | "^t" as sequence <> source 266 + | "^u" as sequence <> source 267 + | "^v" as sequence <> source 268 + | "^w" as sequence <> source 269 + | "^x" as sequence <> source 270 + | "^y" as sequence <> source 271 + | "^z" as sequence <> source 272 + | "^A" as sequence <> source 273 + | "^B" as sequence <> source 274 + | "^C" as sequence <> source 275 + | "^D" as sequence <> source 276 + | "^E" as sequence <> source 277 + | "^F" as sequence <> source 278 + | "^G" as sequence <> source 279 + | "^H" as sequence <> source 280 + | "^I" as sequence <> source 281 + | "^J" as sequence <> source 282 + | "^K" as sequence <> source 283 + | "^L" as sequence <> source 284 + | "^M" as sequence <> source 285 + | "^N" as sequence <> source 286 + | "^O" as sequence <> source 287 + | "^P" as sequence <> source 288 + | "^Q" as sequence <> source 289 + | "^R" as sequence <> source 290 + | "^S" as sequence <> source 291 + | "^T" as sequence <> source 292 + | "^U" as sequence <> source 293 + | "^V" as sequence <> source 294 + | "^W" as sequence <> source 295 + | "^X" as sequence <> source 296 + | "^Y" as sequence <> source 297 + | "^Z" as sequence <> source 298 + | "^@" as sequence <> source 299 + | "^[" as sequence <> source 300 + | "^\\" as sequence <> source 301 + | "^]" as sequence <> source 302 + | "^^" as sequence <> source 303 + | "^_" as sequence <> source 304 + | "^?" as sequence <> source -> #(advance(lexer, source), sequence) 305 + 306 + "x{" <> source -> lex_brace_escape_sequence(advance(lexer, source)) 307 + "x" <> source -> lex_hex_escape_sequence(advance(lexer, source)) 308 + 309 + "0" as char <> source 310 + | "1" as char <> source 311 + | "2" as char <> source 312 + | "3" as char <> source 313 + | "4" as char <> source 314 + | "5" as char <> source 315 + | "6" as char <> source 316 + | "7" as char <> source -> 317 + lex_octal_escape_sequence(advance(lexer, source), char) 318 + 319 + _ -> 320 + case string.pop_grapheme(lexer.source) { 321 + Error(_) -> #(error(lexer, UnterminatedEscapeSequence), "") 322 + Ok(#(source, char)) -> #(advance(lexer, source), char) 323 + } 324 + } 325 + } 326 + 327 + fn lex_octal_escape_sequence(lexer: Lexer, first: String) -> #(Lexer, String) { 328 + case extract_octal_digit(lexer) { 329 + Error(_) -> #(lexer, first) 330 + Ok(#(lexer, second)) -> 331 + case extract_octal_digit(lexer) { 332 + Error(_) -> #(lexer, first <> second) 333 + Ok(#(lexer, third)) -> #(lexer, first <> second <> third) 334 + } 335 + } 336 + } 337 + 338 + fn extract_octal_digit(lexer: Lexer) -> Result(#(Lexer, String), Nil) { 339 + case lexer.source { 340 + "0" as char <> source 341 + | "1" as char <> source 342 + | "2" as char <> source 343 + | "3" as char <> source 344 + | "4" as char <> source 345 + | "5" as char <> source 346 + | "6" as char <> source 347 + | "7" as char <> source -> Ok(#(advance(lexer, source), char)) 348 + _ -> Error(Nil) 349 + } 350 + } 351 + 352 + fn lex_hex_escape_sequence(lexer: Lexer) -> #(Lexer, String) { 353 + case extract_hex_digit(lexer) { 354 + Error(_) -> #(error(lexer, UnterminatedEscapeSequence), "x") 355 + Ok(#(lexer, first)) -> 356 + case extract_hex_digit(lexer) { 357 + Error(_) -> #(error(lexer, UnterminatedEscapeSequence), "x" <> first) 358 + Ok(#(lexer, second)) -> #(lexer, "x" <> first <> second) 359 + } 360 + } 361 + } 362 + 363 + fn extract_hex_digit(lexer: Lexer) -> Result(#(Lexer, String), Nil) { 364 + case lexer.source { 365 + "0" as char <> source 366 + | "1" as char <> source 367 + | "2" as char <> source 368 + | "3" as char <> source 369 + | "4" as char <> source 370 + | "5" as char <> source 371 + | "6" as char <> source 372 + | "7" as char <> source 373 + | "8" as char <> source 374 + | "9" as char <> source 375 + | "a" as char <> source 376 + | "b" as char <> source 377 + | "c" as char <> source 378 + | "d" as char <> source 379 + | "e" as char <> source 380 + | "f" as char <> source 381 + | "A" as char <> source 382 + | "B" as char <> source 383 + | "C" as char <> source 384 + | "D" as char <> source 385 + | "E" as char <> source 386 + | "F" as char <> source -> Ok(#(advance(lexer, source), char)) 387 + _ -> Error(Nil) 388 + } 389 + } 390 + 391 + fn lex_brace_escape_sequence(lexer: Lexer) -> #(Lexer, String) { 392 + case splitter.split(lexer.splitters.brace_escape_sequence, lexer.source) { 393 + #(before, "}", after) -> #(advance(lexer, after), "x{" <> before <> "}") 394 + #(before, separator, after) -> #( 395 + advance(error(lexer, UnterminatedEscapeSequence), separator <> after), 396 + "x{" <> before, 397 + ) 220 398 } 221 399 } 222 400
+2 -2
src/pearl/token.gleam
··· 6 6 ModuleComment(String) 7 7 EndOfFile 8 8 9 - Char(String) 9 + Character(String) 10 10 Integer(String) 11 11 Float(String) 12 12 Atom(name: String, quoted: Bool) ··· 107 107 ModuleComment(contents) -> "%%%" <> contents 108 108 EndOfFile -> "" 109 109 110 - Char(char) -> "$" <> char 110 + Character(char) -> "$" <> char 111 111 Integer(int) -> int 112 112 Float(float) -> float 113 113 Atom(name:, quoted: True) -> "'" <> name <> "'"