LINUX.ORG.RU

История изменений

Исправление rtxtxtrx, (текущая версия) :

Забавное утверждение!!! Я свой коды выше немного изменил, добавил 100 строчек и получил полноценный интерпретатор:

from __future__ import annotations

import operator
import re
import typing
from dataclasses import dataclass


class Token(typing.NamedTuple):
    type: str
    value: str


class TokenizerError(Exception):
    pass


class Tokenizer:
    TOKEN_TYPES = {
        # "ID": re.compile(r"[a-z]\w*", re.I),
        "NUMBER": re.compile(r"\d+(\.\d*)?"),
        "SPACE": re.compile(r"\s+"),
        "ADDSUB": re.compile(r"[-+]"),
        "MULDIV": re.compile(r"[*/]"),
        "LPAREN": re.compile(r"\("),
        "RPAREN": re.compile(r"\)"),
        "EOF": re.compile(r"$"),
    }

    def get_token(self) -> Token:
        for typ, pat in self.TOKEN_TYPES.items():
            if m := pat.match(self.input, self.pos):
                self.pos = m.end()
                return Token(typ, m.group(0))
        raise TokenizerError(f"unpexpected character at position: {self.pos}")

    def tokenize(self, input_: str) -> list[Token]:
        self.input = input_
        self.pos = 0
        rv = []
        while 1:
            t = self.get_token()
            rv.append(t)
            if t.type == "EOF":
                break
        return rv


class Expr:
    def eval(self) -> int:
        raise NotImplemented


@dataclass
class Const(Expr):
    val: int

    def eval(self) -> int:
        return self.val


@dataclass
class BinOp(Expr):
    op: str
    left: Expr
    right: Expr

    def eval(self) -> int:
        return {
            "+": operator.add,
            "-": operator.sub,
            "*": operator.mul,
            "/": operator.floordiv,
        }[self.op](
            self.left.eval(),
            self.right.eval(),
        )


@dataclass
class UnaryOp(Expr):
    op: str
    right: Expr

    def eval(self) -> int:
        return {
            "+": operator.pos,
            "-": operator.neg,
        }[self.op](
            self.right.eval(),
        )


class ParseError(Exception):
    pass


class Parser:
    def advance(self) -> None:
        self.cur_tok, self.next_tok = self.next_tok, next(self.token_it, None)

    def match(self, token_type: str) -> bool:
        if self.next_tok is not None and self.next_tok.type == token_type:
            self.advance()
            return True
        return False

    def expect(self, token_type: str) -> None:
        if not self.match(token_type):
            raise ParseError(
                f"Unexpected token: expected {token_type!r}, got {self.next_tok.type!r}"
            )

    def factor(self) -> Expr:
        if self.match("LPAREN"):
            rv = self.expr()
            self.expect("RPAREN")
            return rv
        if self.match("ADDSUB"):
            return UnaryOp(op=self.cur_tok.value, right=self.factor())
        self.expect("NUMBER")
        return Const(val=float(self.cur_tok.value))

    def muldiv(self) -> Expr:
        rv = self.factor()
        while self.match("MULDIV"):
            rv = BinOp(left=rv, op=self.cur_tok.value, right=self.factor())
        return rv

    def addsub(self) -> Expr:
        rv = self.muldiv()
        while self.match("ADDSUB"):
            rv = BinOp(left=rv, op=self.cur_tok.value, right=self.muldiv())
        return rv

    expr = addsub

    def parse(self, tokens: list[Token]) -> Expr:
        # Всякий мусор типа пробелов и комментариев фильтруют
        self.token_it = filter(lambda t: t.type != "SPACE", tokens)
        self.next_tok = None
        self.advance()
        rv = self.expr()
        self.expect("EOF")
        print(rv)
        return rv


import sys

tokens = Tokenizer().tokenize(sys.argv[1])
print(tokens)
print(Parser().parse(tokens).eval())

Да лишп - это чистая логика:

BinOp(op='+', left=Const(val=2.0), right=BinOp(op='*', left=Const(val=2.0), right=Const(val=2.0)))
(+ 2.0 (* 2.0 2.0))

Но факт в том, что на питоне любые сложные вещи - выглядят как типичное приложение хипстора в сто строк…

И, да, тайпхинты тут видны только потому что VSCode довольно убог и часто не может определить возвращаемый тип. Осознай это и страдай

Исправление rtxtxtrx, :

Забавное утверждение!!! Я свой коды выше немного изменил, добавил 100 строчек и получил полноценный интерпретатор:

from __future__ import annotations

import operator
import re
import typing
from dataclasses import dataclass


class Token(typing.NamedTuple):
    type: str
    value: str


class TokenizerError(Exception):
    pass


class Tokenizer:
    TOKEN_TYPES = {
        # "ID": re.compile(r"[a-z]\w*", re.I),
        "NUMBER": re.compile(r"\d+(\.\d*)?"),
        "SPACE": re.compile(r"\s+"),
        "ADDSUB": re.compile(r"[-+]"),
        "MULDIV": re.compile(r"[*/]"),
        "LPAREN": re.compile(r"\("),
        "RPAREN": re.compile(r"\)"),
        "EOF": re.compile(r"$"),
    }

    def get_token(self) -> Token:
        for typ, pat in self.TOKEN_TYPES.items():
            if m := pat.match(self.input, self.pos):
                self.pos = m.end()
                return Token(typ, m.group(0))
        raise TokenizerError(f"unpexpected character at position: {self.pos}")

    def tokenize(self, input_: str) -> list[Token]:
        self.input = input_
        self.pos = 0
        rv = []
        while 1:
            t = self.get_token()
            rv.append(t)
            if t.type == "EOF":
                break
        return rv


class Expr:
    def eval(self) -> int:
        raise NotImplemented


@dataclass
class Const(Expr):
    val: int

    def eval(self) -> int:
        return self.val


@dataclass
class BinOp(Expr):
    op: str
    left: Expr
    right: Expr

    def eval(self) -> int:
        return {
            "+": operator.add,
            "-": operator.sub,
            "*": operator.mul,
            "/": operator.floordiv,
        }[self.op](
            self.left.eval(),
            self.right.eval(),
        )


@dataclass
class UnaryOp(Expr):
    op: str
    right: Expr

    def eval(self) -> int:
        return {
            "+": operator.pos,
            "-": operator.neg,
        }[self.op](
            self.right.eval(),
        )


class ParseError(Exception):
    pass


class Parser:
    def advance(self) -> None:
        self.cur_tok, self.next_tok = self.next_tok, next(self.token_it, None)

    def match(self, token_type: str) -> bool:
        if self.next_tok is not None and self.next_tok.type == token_type:
            self.advance()
            return True
        return False

    def expect(self, token_type: str) -> None:
        if not self.match(token_type):
            raise ParseError(
                f"Unexpected token: expected {token_type!r}, got {self.next_tok.type!r}"
            )

    def factor(self) -> Expr:
        if self.match("LPAREN"):
            rv = self.expr()
            self.expect("RPAREN")
            return rv
        if self.match("ADDSUB"):
            return UnaryOp(op=self.cur_tok.value, right=self.factor())
        self.expect("NUMBER")
        return Const(val=float(self.cur_tok.value))

    def muldiv(self) -> Expr:
        rv = self.factor()
        while self.match("MULDIV"):
            rv = BinOp(left=rv, op=self.cur_tok.value, right=self.factor())
        return rv

    def addsub(self) -> Expr:
        rv = self.muldiv()
        while self.match("ADDSUB"):
            rv = BinOp(left=rv, op=self.cur_tok.value, right=self.muldiv())
        return rv

    expr = addsub

    def parse(self, tokens: list[Token]) -> Expr:
        # Всякий мусор типа пробелов и комментариев фильтруют
        self.token_it = filter(lambda t: t.type != "SPACE", tokens)
        self.next_tok = None
        self.advance()
        rv = self.expr()
        self.expect("EOF")
        print(rv)
        return rv


import sys

tokens = Tokenizer().tokenize(sys.argv[1])
print(tokens)
print(Parser().parse(tokens).eval())

Да лишп - это чистая логика:

BinOp(op='+', left=Const(val=2.0), right=BinOp(op='*', left=Const(val=2.0), right=Const(val=2.0)))
(+ 2.0 (* 2.0 2.0))

Но факт в том, что на питоне любые сложные вещи - выглядят как типичное приложение хипстора в сто строк

Исправление rtxtxtrx, :

Забавное утверждение!!! Я свой коды выше немного изменил, добавил 100 строчек и получил полноценный интерпретатор:

from __future__ import annotations

import operator
import re
import typing
from dataclasses import dataclass


class Token(typing.NamedTuple):
    type: str
    value: str


class TokenizerError(Exception):
    pass


class Tokenizer:
    TOKEN_TYPES = {
        # "ID": re.compile(r"[a-z]\w*", re.I),
        "NUMBER": re.compile(r"\d+(\.\d*)?"),
        "SPACE": re.compile(r"\s+"),
        "ADDSUB": re.compile(r"[-+]"),
        "MULDIV": re.compile(r"[*/]"),
        "LPAREN": re.compile(r"\("),
        "RPAREN": re.compile(r"\)"),
        "EOF": re.compile(r"$"),
    }

    def get_token(self) -> Token:
        for typ, pat in self.TOKEN_TYPES.items():
            if m := pat.match(self.input, self.pos):
                self.pos = m.end()
                return Token(typ, m.group(0))
        raise TokenizerError(f"unpexpected character at position: {self.pos}")

    def tokenize(self, input_: str) -> list[Token]:
        self.input = input_
        self.pos = 0
        rv = []
        while 1:
            t = self.get_token()
            rv.append(t)
            if t.type == "EOF":
                break
        return rv


class Expr:
    def eval(self) -> int:
        raise NotImplemented


@dataclass
class Const(Expr):
    val: int

    def eval(self) -> int:
        return self.val


@dataclass
class BinOp(Expr):
    op: str
    left: Expr
    right: Expr

    def eval(self) -> int:
        return {
            "+": operator.add,
            "-": operator.sub,
            "*": operator.mul,
            "/": operator.floordiv,
        }[self.op](
            self.left.eval(),
            self.right.eval(),
        )


@dataclass
class UnaryOp(Expr):
    op: str
    right: Expr

    def eval(self) -> int:
        return {
            "-": operator.pos,
            "+": operator.neg,
        }[self.op](
            self.right.eval(),
        )


class ParseError(Exception):
    pass


class Parser:
    def advance(self) -> None:
        self.cur_tok, self.next_tok = self.next_tok, next(self.token_it, None)

    def match(self, token_type: str) -> bool:
        if self.next_tok is not None and self.next_tok.type == token_type:
            self.advance()
            return True
        return False

    def expect(self, token_type: str) -> None:
        if not self.match(token_type):
            raise ParseError(
                f"Unexpected token: expected {token_type!r}, got {self.next_tok.type!r}"
            )

    def factor(self) -> Expr:
        if self.match("LPAREN"):
            rv = self.expr()
            self.expect("RPAREN")
            return rv
        if self.match("ADDSUB"):
            return UnaryOp(op=self.cur_tok.value, val=self.factor())
        self.expect("NUMBER")
        return Const(val=float(self.cur_tok.value))

    def muldiv(self) -> Expr:
        rv = self.factor()
        while self.match("MULDIV"):
            rv = BinOp(left=rv, op=self.cur_tok.value, right=self.factor())
        return rv

    def addsub(self) -> Expr:
        rv = self.muldiv()
        while self.match("ADDSUB"):
            rv = BinOp(left=rv, op=self.cur_tok.value, right=self.muldiv())
        return rv

    expr = addsub

    def parse(self, tokens: list[Token]) -> Expr:
        # Всякий мусор типа пробелов и комментариев фильтруют
        self.token_it = filter(lambda t: t.type != "SPACE", tokens)
        self.next_tok = None
        self.advance()
        rv = self.expr()
        self.expect("EOF")
        return rv


tokens = Tokenizer().tokenize("2 + 2 * 2")
print(tokens)
print(Parser().parse(tokens).eval()) # 6.0

Да лишп - это чистая логика:

BinOp(op='+', left=Const(val=2.0), right=BinOp(op='*', left=Const(val=2.0), right=Const(val=2.0)))
(+ 2.0 (* 2.0 2.0))

Но факт в том, что на питоне любые сложные вещи - выглядят как типичное приложение хипстора в сто строк

Исправление rtxtxtrx, :

Забавное утверждение!!! Я свой коды выше немного изменил, добавил 100 строчек и получил полноценный интерпретатор:

from __future__ import annotations

import operator
import re
import typing
from dataclasses import dataclass


class Token(typing.NamedTuple):
    type: str
    value: str


class TokenizerError(Exception):
    pass


class Tokenizer:
    TOKEN_TYPES = {
        # "ID": re.compile(r"[a-z]\w*", re.I),
        "NUMBER": re.compile(r"\d+(\.\d*)?"),
        "SPACE": re.compile(r"\s+"),
        "ADDSUB": re.compile(r"[-+]"),
        "MULDIV": re.compile(r"[*/]"),
        "LPAREN": re.compile(r"\("),
        "RPAREN": re.compile(r"\)"),
        "EOF": re.compile(r"$"),
    }

    def get_token(self) -> Token:
        for typ, pat in self.TOKEN_TYPES.items():
            if m := pat.match(self.input, self.pos):
                self.pos = m.end()
                return Token(typ, m.group(0))
        raise TokenizerError(f"unpexpected character at position: {self.pos}")

    def tokenize(self, input_: str) -> list[Token]:
        self.input = input_
        self.pos = 0
        rv = []
        while 1:
            t = self.get_token()
            rv.append(t)
            if t.type == "EOF":
                break
        return rv


class Expr:
    def eval(self) -> int:
        raise NotImplemented


@dataclass
class Const(Expr):
    val: int

    def eval(self) -> int:
        return self.val


@dataclass
class BinOp(Expr):
    op: str
    left: Expr
    right: Expr

    def eval(self) -> int:
        return {
            "+": operator.add,
            "-": operator.sub,
            "*": operator.mul,
            "/": operator.floordiv,
        }[self.op](
            self.left.eval(),
            self.right.eval(),
        )


@dataclass
class UnaryOp(Expr):
    op: str
    right: Expr

    def eval(self) -> int:
        return {
            "-": operator.pos,
            "+": operator.neg,
        }[self.op](
            self.right.eval(),
        )


class ParseError(Exception):
    pass


class Parser:
    def advance(self) -> None:
        self.cur_tok, self.next_tok = self.next_tok, next(self.token_it, None)

    def match(self, token_type: str) -> bool:
        if self.next_tok is not None and self.next_tok.type == token_type:
            self.advance()
            return True
        return False

    def expect(self, token_type: str) -> None:
        if not self.match(token_type):
            raise ParseError(
                f"Unexpected token: expected {token_type!r}, got {self.next_tok.type!r}"
            )

    def factor(self) -> Expr:
        if self.match("LPAREN"):
            rv = self.expr()
            self.expect("RPAREN")
            return rv
        if self.match("ADDSUB"):
            return UnaryOp(op=self.cur_tok.value, val=self.factor())
        self.expect("NUMBER")
        return Const(val=float(self.cur_tok.value))

    def muldiv(self) -> Expr:
        rv = self.factor()
        while self.match("MULDIV"):
            rv = BinOp(left=rv, op=self.cur_tok.value, right=self.factor())
        return rv

    def addsub(self) -> Expr:
        rv = self.muldiv()
        while self.match("ADDSUB"):
            rv = BinOp(left=rv, op=self.cur_tok.value, right=self.muldiv())
        return rv

    expr = addsub

    def parse(self, tokens: list[Token]) -> Expr:
        # Всякий мусор типа пробелов и комментариев фильтруют
        self.token_it = filter(lambda t: t.type != "SPACE", tokens)
        self.next_tok = None
        self.advance()
        rv = self.expr()
        self.expect("EOF")
        return rv


tokens = Tokenizer().tokenize("2 + 2 * 2")
print(tokens)
print(Parser().parse(tokens).eval()) # 6.0

Да лишп - это чистая логика:

BinOp(op='+', left=Const(val=2.0), right=BinOp(op='*', left=Const(val=2.0), right=Const(val=2.0)))
(+ 2.0 (* 2.0 2.0))

Исходная версия rtxtxtrx, :

Забавное утверждение!!! Я свой коды выше немного изменил, добавил 100 строчек и получил полноценный интерпретатор:

from __future__ import annotations

import operator
import re
import typing
from dataclasses import dataclass


class Token(typing.NamedTuple):
    type: str
    value: str


class TokenizerError(Exception):
    pass


class Tokenizer:
    TOKEN_TYPES = {
        # "ID": re.compile(r"[a-z]\w*", re.I),
        "NUMBER": re.compile(r"\d+(\.\d*)?"),
        "SPACE": re.compile(r"\s+"),
        "ADDSUB": re.compile(r"[-+]"),
        "MULDIV": re.compile(r"[*/]"),
        "LPAREN": re.compile(r"\("),
        "RPAREN": re.compile(r"\)"),
        "EOF": re.compile(r"$"),
    }

    def get_token(self) -> Token:
        for typ, pat in self.TOKEN_TYPES.items():
            if m := pat.match(self.input, self.pos):
                self.pos = m.end()
                return Token(typ, m.group(0))
        raise TokenizerError(f"unpexpected character at position: {self.pos}")

    def tokenize(self, input_: str) -> list[Token]:
        self.input = input_
        self.pos = 0
        rv = []
        while 1:
            t = self.get_token()
            rv.append(t)
            if t.type == "EOF":
                break
        return rv


class Expr:
    def eval(self) -> int:
        raise NotImplemented


@dataclass
class Const(Expr):
    val: int

    def eval(self) -> int:
        return self.val


@dataclass
class BinOp(Expr):
    op: str
    left: Expr
    right: Expr

    def eval(self) -> int:
        return {
            "+": operator.add,
            "-": operator.sub,
            "*": operator.mul,
            "/": operator.floordiv,
        }[self.op](
            self.left.eval(),
            self.right.eval(),
        )


@dataclass
class UnaryOp(Expr):
    op: str
    right: Expr

    def eval(self) -> int:
        return {
            "-": operator.pos,
            "+": operator.neg,
        }[self.op](
            self.right.eval(),
        )


class ParseError(Exception):
    pass


class Parser:
    def advance(self) -> None:
        self.cur_tok, self.next_tok = self.next_tok, next(self.token_it, None)

    def match(self, token_type: str) -> bool:
        if self.next_tok is not None and self.next_tok.type == token_type:
            self.advance()
            return True
        return False

    def expect(self, token_type: str) -> None:
        if not self.match(token_type):
            raise ParseError(
                f"Unexpected token: expected {token_type!r}, got {self.next_tok.type!r}"
            )

    def factor(self) -> Expr:
        if self.match("LPAREN"):
            rv = self.expr()
            self.expect("RPAREN")
            return rv
        if self.match("ADDSUB"):
            return UnaryOp(op=self.cur_tok.value, val=self.factor())
        self.expect("NUMBER")
        return Const(val=float(self.cur_tok.value))

    def muldiv(self) -> Expr:
        rv = self.factor()
        while self.match("MULDIV"):
            rv = BinOp(left=rv, op=self.cur_tok.value, right=self.factor())
        return rv

    def addsub(self) -> Expr:
        rv = self.muldiv()
        while self.match("ADDSUB"):
            rv = BinOp(left=rv, op=self.cur_tok.value, right=self.muldiv())
        return rv

    expr = addsub

    def parse(self, tokens: list[Token]) -> Expr:
        # Всякий мусор типа пробелов и комментариев фильтруют
        self.token_it = filter(lambda t: t.type != "SPACE", tokens)
        self.next_tok = None
        self.advance()
        rv = self.expr()
        self.expect("EOF")
        return rv


tokens = Tokenizer().tokenize("2 + 2 * 2")
print(tokens)
print(Parser().parse(tokens).eval()) # 6.0