aboutsummaryrefslogtreecommitdiff
path: root/users/fcuny/exp/monkey
diff options
context:
space:
mode:
Diffstat (limited to 'users/fcuny/exp/monkey')
-rw-r--r--users/fcuny/exp/monkey/Makefile4
-rw-r--r--users/fcuny/exp/monkey/README.org3
-rw-r--r--users/fcuny/exp/monkey/cmd/repl/main.go12
-rw-r--r--users/fcuny/exp/monkey/go.mod3
-rw-r--r--users/fcuny/exp/monkey/pkg/lexer/lexer.go152
-rw-r--r--users/fcuny/exp/monkey/pkg/lexer/lexer_test.go125
-rw-r--r--users/fcuny/exp/monkey/pkg/repl/repl.go30
-rw-r--r--users/fcuny/exp/monkey/pkg/token/token.go71
8 files changed, 0 insertions, 400 deletions
diff --git a/users/fcuny/exp/monkey/Makefile b/users/fcuny/exp/monkey/Makefile
deleted file mode 100644
index 61168f3..0000000
--- a/users/fcuny/exp/monkey/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-test:
- go test -v ./...
-
-.phony: test
diff --git a/users/fcuny/exp/monkey/README.org b/users/fcuny/exp/monkey/README.org
deleted file mode 100644
index d968f4c..0000000
--- a/users/fcuny/exp/monkey/README.org
+++ /dev/null
@@ -1,3 +0,0 @@
-#+TITLE: monkey
-
-Implementation of https://interpreterbook.com/
diff --git a/users/fcuny/exp/monkey/cmd/repl/main.go b/users/fcuny/exp/monkey/cmd/repl/main.go
deleted file mode 100644
index 46b865c..0000000
--- a/users/fcuny/exp/monkey/cmd/repl/main.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package main
-
-import (
- "fmt"
- "monkey/pkg/repl"
- "os"
-)
-
-func main() {
- fmt.Printf("Welcome to monkey's REPL.")
- repl.Start(os.Stdin, os.Stdout)
-}
diff --git a/users/fcuny/exp/monkey/go.mod b/users/fcuny/exp/monkey/go.mod
deleted file mode 100644
index 34c713d..0000000
--- a/users/fcuny/exp/monkey/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module monkey
-
-go 1.12
diff --git a/users/fcuny/exp/monkey/pkg/lexer/lexer.go b/users/fcuny/exp/monkey/pkg/lexer/lexer.go
deleted file mode 100644
index 3e98cf0..0000000
--- a/users/fcuny/exp/monkey/pkg/lexer/lexer.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Package lexer provides a lexer to the monkey language.
-package lexer
-
-import "monkey/pkg/token"
-
-// Lexer represents the lexer
-type Lexer struct {
- input string
- // current position in input
- position int
- // current reading position in input (after a char)
- readPosition int
- // current character under examination
- ch byte
-}
-
-// New returns a new lexer
-func New(input string) *Lexer {
- l := &Lexer{input: input}
- l.readChar()
- return l
-}
-
-// Read the current character and advances our position in the input string.
-func (l *Lexer) readChar() {
- // if we've reached the end of the input, we set the current character to 0,
- // which is the ASCII code for NUL.
- if l.readPosition >= len(l.input) {
- l.ch = 0
- } else {
- l.ch = l.input[l.readPosition]
- }
- l.position = l.readPosition
- l.readPosition++
-}
-
-func (l *Lexer) readIdentifier() string {
- position := l.position
- for isLetter(l.ch) {
- l.readChar()
- }
- return l.input[position:l.position]
-}
-
-func (l *Lexer) readNumber() string {
- position := l.position
- for isDigit(l.ch) {
- l.readChar()
- }
- return l.input[position:l.position]
-}
-
-// we don't care about white space characters, we skip them when we find them.
-func (l *Lexer) skipWhitespace() {
- for l.ch == ' ' || l.ch == '\t' || l.ch == '\n' || l.ch == '\r' {
- l.readChar()
- }
-}
-
-// peekChar returns the character at position (which is the next charatecter),
-// but does not increment `readPosition` and `position`.
-// This is needed to read tokens that are composed of two characters (e.g. `==`).
-func (l *Lexer) peekChar() byte {
- if l.readPosition >= len(l.input) {
- return 0
- }
- return l.input[l.readPosition]
-}
-
-// NextToken reads the next token from the lexer and returns the current token.
-func (l *Lexer) NextToken() token.Token {
- var tok token.Token
-
- l.skipWhitespace()
-
- switch l.ch {
- case '=':
- if l.peekChar() == '=' {
- ch := l.ch
- l.readChar()
- literal := string(ch) + string(l.ch)
- tok = token.Token{Type: token.EQ, Literal: literal}
- } else {
- tok = newToken(token.ASSIGN, l.ch)
- }
- case '+':
- tok = newToken(token.PLUS, l.ch)
- case '-':
- tok = newToken(token.MINUS, l.ch)
- case '!':
- if l.peekChar() == '=' {
- ch := l.ch
- l.readChar()
- literal := string(ch) + string(l.ch)
- tok = token.Token{Type: token.NOT_EQ, Literal: literal}
- } else {
- tok = newToken(token.BANG, l.ch)
- }
- case '*':
- tok = newToken(token.ASTERISK, l.ch)
- case '/':
- tok = newToken(token.SLASH, l.ch)
- case '<':
- tok = newToken(token.LT, l.ch)
- case '>':
- tok = newToken(token.GT, l.ch)
-
- case ';':
- tok = newToken(token.SEMICOLON, l.ch)
- case ',':
- tok = newToken(token.COMMA, l.ch)
- case '(':
- tok = newToken(token.LPAREN, l.ch)
- case ')':
- tok = newToken(token.RPAREN, l.ch)
- case '{':
- tok = newToken(token.LBRACE, l.ch)
- case '}':
- tok = newToken(token.RBRACE, l.ch)
- case 0:
- tok.Literal = ""
- tok.Type = token.EOF
- default:
- if isLetter(l.ch) {
- tok.Literal = l.readIdentifier()
- tok.Type = token.LookupIdent(tok.Literal)
- return tok
- } else if isDigit(l.ch) {
- tok.Type = token.INT
- tok.Literal = l.readNumber()
- return tok
- } else {
- tok = newToken(token.ILLEGAL, l.ch)
- }
-
- }
-
- l.readChar()
- return tok
-}
-
-func newToken(tokenType token.TokenType, ch byte) token.Token {
- return token.Token{Type: tokenType, Literal: string(ch)}
-}
-
-func isLetter(ch byte) bool {
- return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_'
-}
-
-func isDigit(ch byte) bool {
- return '0' <= ch && ch <= '9'
-}
diff --git a/users/fcuny/exp/monkey/pkg/lexer/lexer_test.go b/users/fcuny/exp/monkey/pkg/lexer/lexer_test.go
deleted file mode 100644
index fdea1d3..0000000
--- a/users/fcuny/exp/monkey/pkg/lexer/lexer_test.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package lexer
-
-import (
- "monkey/pkg/token"
- "testing"
-)
-
-func TestNextToken(t *testing.T) {
- input := `let five = 5;
-let ten = 10;
-
-let add = fn(x, y) {
- x + y
-};
-
-let result = add(five, ten);
-!-/*5;
-10 > 5;
-
-if (5 < 10) {
- return true;
-} else {
- return false;
-}
-
-10 == 10;
-10 != 9;
-`
-
- tests := []struct {
- expectedType token.TokenType
- expectedLiteral string
- }{
- {token.LET, "let"},
- {token.IDENT, "five"},
- {token.ASSIGN, "="},
- {token.INT, "5"},
- {token.SEMICOLON, ";"},
-
- {token.LET, "let"},
- {token.IDENT, "ten"},
- {token.ASSIGN, "="},
- {token.INT, "10"},
- {token.SEMICOLON, ";"},
-
- {token.LET, "let"},
- {token.IDENT, "add"},
- {token.ASSIGN, "="},
- {token.FUNCTION, "fn"},
- {token.LPAREN, "("},
- {token.IDENT, "x"},
- {token.COMMA, ","},
- {token.IDENT, "y"},
- {token.RPAREN, ")"},
- {token.LBRACE, "{"},
- {token.IDENT, "x"},
- {token.PLUS, "+"},
- {token.IDENT, "y"},
- {token.RBRACE, "}"},
- {token.SEMICOLON, ";"},
-
- {token.LET, "let"},
- {token.IDENT, "result"},
- {token.ASSIGN, "="},
- {token.IDENT, "add"},
- {token.LPAREN, "("},
- {token.IDENT, "five"},
- {token.COMMA, ","},
- {token.IDENT, "ten"},
- {token.RPAREN, ")"},
- {token.SEMICOLON, ";"},
-
- {token.BANG, "!"},
- {token.MINUS, "-"},
- {token.SLASH, "/"},
- {token.ASTERISK, "*"},
- {token.INT, "5"},
- {token.SEMICOLON, ";"},
-
- {token.INT, "10"},
- {token.GT, ">"},
- {token.INT, "5"},
- {token.SEMICOLON, ";"},
-
- {token.IF, "if"},
- {token.LPAREN, "("},
- {token.INT, "5"},
- {token.LT, "<"},
- {token.INT, "10"},
- {token.RPAREN, ")"},
- {token.LBRACE, "{"},
- {token.RETURN, "return"},
- {token.TRUE, "true"},
- {token.SEMICOLON, ";"},
- {token.RBRACE, "}"},
- {token.ELSE, "else"},
- {token.LBRACE, "{"},
- {token.RETURN, "return"},
- {token.FALSE, "false"},
- {token.SEMICOLON, ";"},
- {token.RBRACE, "}"},
-
- {token.INT, "10"},
- {token.EQ, "=="},
- {token.INT, "10"},
- {token.SEMICOLON, ";"},
-
- {token.INT, "10"},
- {token.NOT_EQ, "!="},
- {token.INT, "9"},
- {token.SEMICOLON, ";"},
- }
-
- l := New(input)
- for i, tt := range tests {
- tok := l.NextToken()
- if tok.Type != tt.expectedType {
- t.Fatalf("tests[%d] - tokentype wrong. expected=%q, got=%q", i, tt.expectedType, tok.Type)
- }
-
- if tok.Literal != tt.expectedLiteral {
- t.Fatalf("tests[%d] - tokenliteral wrong. expected=%q, got=%q", i, tt.expectedLiteral, tok.Literal)
- }
- }
-}
diff --git a/users/fcuny/exp/monkey/pkg/repl/repl.go b/users/fcuny/exp/monkey/pkg/repl/repl.go
deleted file mode 100644
index 5e7b1d1..0000000
--- a/users/fcuny/exp/monkey/pkg/repl/repl.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Package repl provides a REPL to the monkey language.
-package repl
-
-import (
- "bufio"
- "fmt"
- "io"
- lexer "monkey/pkg/lexer"
- token "monkey/pkg/token"
-)
-
-const PROMPT = ">> "
-
-func Start(in io.Reader, out io.Writer) {
- scanner := bufio.NewScanner(in)
- for {
- fmt.Print(PROMPT)
- scanned := scanner.Scan()
-
- if !scanned {
- return
- }
-
- line := scanner.Text()
- l := lexer.New(line)
- for tok := l.NextToken(); tok.Type != token.EOF; tok = l.NextToken() {
- fmt.Printf("%+v\n", tok)
- }
- }
-}
diff --git a/users/fcuny/exp/monkey/pkg/token/token.go b/users/fcuny/exp/monkey/pkg/token/token.go
deleted file mode 100644
index 5eadc5e..0000000
--- a/users/fcuny/exp/monkey/pkg/token/token.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Package token provides a tokenizer for the monkey language.
-package token
-
-// TokenType represents the type of the token
-type TokenType string
-
-// Token represents a token, with the type and the literal value of the token
-type Token struct {
- Type TokenType
- Literal string
-}
-
-const (
- ILLEGAL = "ILLEGAL"
- EOF = "EOF"
-
- IDENT = "IDENT"
- INT = "INT"
-
- COMMA = ","
- SEMICOLON = ";"
-
- LPAREN = "("
- RPAREN = ")"
- LBRACE = "{"
- RBRACE = "}"
-
- // The following tokens are keywords
- FUNCTION = "FUNCTION"
- LET = "LET"
- TRUE = "TRUE"
- FALSE = "FALSE"
- IF = "IF"
- ELSE = "ELSE"
- RETURN = "RETURN"
-
- // The following tokens are for operators
- ASSIGN = "="
- PLUS = "+"
- MINUS = "-"
- BANG = "!"
- ASTERISK = "*"
- SLASH = "/"
- LT = "<"
- GT = ">"
-
- EQ = "=="
- NOT_EQ = "!="
-)
-
-// List of our keywords for the language
-var keywords = map[string]TokenType{
- "fn": FUNCTION,
- "let": LET,
- "true": TRUE,
- "false": FALSE,
- "if": IF,
- "else": ELSE,
- "return": RETURN,
-}
-
-// LookupIdent returns the token type for a given identifier.
-// First we check if the identifier is a keyword. If it is, we return they
-// keyword TokenType constant. If it isn't, we return the token.IDENT which is
-// the TokenType for all user-defined identifiers.
-func LookupIdent(ident string) TokenType {
- if tok, ok := keywords[ident]; ok {
- return tok
- }
- return IDENT
-}