diff --git a/src/core/scientific_calculator.py b/src/core/scientific_calculator.py index 5a463ae..fd44a56 100644 --- a/src/core/scientific_calculator.py +++ b/src/core/scientific_calculator.py @@ -28,6 +28,7 @@ def __init__(self, data: str, memory: ContextualMemory, tokenize: bool=True): self.angle_mode = self.input_dict['angleMode'] if tokenize: self.tokens = Tokenizer.st_tokenize(self.input_dict['input']) + print ('Tokens: ', self.tokens) self.substitute_value: float | complex = 0 self.zero_limit = False diff --git a/src/core/tokenizer.py b/src/core/tokenizer.py index 764308f..844b9e2 100644 --- a/src/core/tokenizer.py +++ b/src/core/tokenizer.py @@ -44,7 +44,7 @@ def _space_removal(original: str) -> str: @staticmethod def _is_number(exp: str) -> bool: - return exp.endswith(tuple([str(i) for i in range(1, 10)] + ['.', 'x'])) + return exp.endswith(tuple([str(i) for i in range(0, 10)] + ['.', 'x'])) @staticmethod @@ -84,7 +84,7 @@ def _unary_minus_convert(exp: str): if i < len(tokens): tokens [i] = '( 0 u' tokens [i + 1] = tokens [i + 1] + " )" - elif tokens [i - 1] == ')' or tokens [i - 1] == 'x' or Tokenizer._is_number (tokens [i - 1]): + elif tokens [i - 1] == ')' or tokens [i - 1] == 'x' or Tokenizer._is_number (tokens [i - 1].strip()): tokens [i] = '-' else: if i < len(tokens): @@ -275,8 +275,11 @@ def st_tokenize(input:str) -> list[str]: # Intelligently convert expressions based on common rules exp = Tokenizer._algebraic_parenthesis_product_convert(exp) - exp = Tokenizer._unary_minus_convert(exp) + print(exp) exp = Tokenizer._relative_percentage_convert(exp) + print(exp) + exp = Tokenizer._unary_minus_convert(exp) + print(exp) exp = Tokenizer._space_removal(exp.strip()) return exp.split(' ')