Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Removed Future Warnings from pd.df.fillna. #3

Merged
merged 10 commits into from
Oct 3, 2024
9 changes: 7 additions & 2 deletions src/vtlengine/API/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from pathlib import Path
from typing import Any, Union, List, Optional

import pandas as pd
from antlr4 import CommonTokenStream, InputStream
from antlr4.error.ErrorListener import ErrorListener

Expand All @@ -16,6 +17,8 @@
from vtlengine.files.output import TimePeriodRepresentation, \
format_time_period_external_representation

pd.options.mode.chained_assignment = None


class __VTLSingleErrorListener(ErrorListener):
"""
Expand Down Expand Up @@ -125,7 +128,8 @@ class takes all of this information and checks it with the ast generated to
interpreter = InterpreterAnalyzer(datasets=structures, value_domains=vd,
external_routines=ext_routines,
only_semantic=True)
result = interpreter.visit(ast)
with pd.option_context('future.no_silent_downcasting', True):
result = interpreter.visit(ast)
return result


Expand Down Expand Up @@ -253,7 +257,8 @@ def run(script: Union[str, Path], data_structures: Union[dict, Path, List[Union[
datapoints_paths=path_dict,
output_path=output_folder,
time_period_representation=time_period_representation)
result = interpreter.visit(ast)
with pd.option_context('future.no_silent_downcasting', True):
result = interpreter.visit(ast)

# Applying time period output format
if output_folder is None:
Expand Down
3 changes: 3 additions & 0 deletions src/vtlengine/AST/Grammar/lexer.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
import sys
from io import StringIO

import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)

from antlr4 import *
from typing.io import TextIO

Expand Down
2 changes: 2 additions & 0 deletions src/vtlengine/files/parser/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import warnings
from csv import DictReader
from pathlib import Path
# from time import time
Expand Down Expand Up @@ -132,6 +133,7 @@ def _parse_boolean(value: str):

def _validate_pandas(components: Dict[str, Component], data: pd.DataFrame,
dataset_name: str) -> pd.DataFrame:
warnings.filterwarnings("ignore", category=FutureWarning)
# Identifier checking
id_names = [comp_name for comp_name, comp in components.items() if comp.role == Role.IDENTIFIER]

Expand Down
5 changes: 5 additions & 0 deletions tests/Additional/test_additional.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import warnings
from pathlib import Path
from typing import Union

Expand All @@ -20,6 +21,8 @@ class AdditionalHelper(TestHelper):

ds_input_prefix = "DS_"

warnings.filterwarnings("ignore", category=FutureWarning)

@classmethod
def BaseScalarTest(cls, text: str, code: str, reference_value: Union[int, float, str]):
'''
Expand All @@ -42,6 +45,8 @@ class StringOperatorsTest(AdditionalHelper):

maxDiff = None

warnings.filterwarnings("ignore", category=FutureWarning)

def test_1(self):
'''
Basic behaviour for two datasets.
Expand Down
12 changes: 11 additions & 1 deletion tests/Additional/test_additional_scalars.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import warnings
from pathlib import Path

import pytest
Expand All @@ -8,7 +9,6 @@
from vtlengine.Exceptions import SemanticError
from vtlengine.Interpreter import InterpreterAnalyzer


class AdditionalScalarsTests(TestHelper):
base_path = Path(__file__).parent
filepath_json = base_path / "data" / "DataStructure" / "input"
Expand All @@ -18,6 +18,8 @@ class AdditionalScalarsTests(TestHelper):

ds_input_prefix = "DS_"

warnings.filterwarnings("ignore", category=FutureWarning)


string_params = [
("substr(null, null, null)", ""),
Expand Down Expand Up @@ -229,6 +231,7 @@ class AdditionalScalarsTests(TestHelper):

@pytest.mark.parametrize("text, reference", string_params)
def test_string_operators(text, reference):
warnings.filterwarnings("ignore", category=FutureWarning)
expression = f"DS_r := {text};"
ast = create_ast(expression)
interpreter = InterpreterAnalyzer({})
Expand All @@ -239,6 +242,7 @@ def test_string_operators(text, reference):

@pytest.mark.parametrize("text, reference", instr_op_params)
def test_instr_op_test(text, reference):
warnings.filterwarnings("ignore", category=FutureWarning)
expression = f"DS_r := {text};"
ast = create_ast(expression)
interpreter = InterpreterAnalyzer({})
Expand All @@ -249,6 +253,7 @@ def test_instr_op_test(text, reference):

@pytest.mark.parametrize('text, exception_message', string_exception_param)
def test_exception_string_op(text, exception_message):
warnings.filterwarnings("ignore", category=FutureWarning)
expression = f"DS_r := {text};"
ast = create_ast(expression)
interpreter = InterpreterAnalyzer({})
Expand All @@ -258,6 +263,7 @@ def test_exception_string_op(text, exception_message):

@pytest.mark.parametrize('text, reference', numeric_params)
def test_numeric_operators(text, reference):
warnings.filterwarnings("ignore", category=FutureWarning)
expression = f"DS_r := {text};"
ast = create_ast(expression)
interpreter = InterpreterAnalyzer({})
Expand All @@ -271,6 +277,7 @@ def test_numeric_operators(text, reference):

@pytest.mark.parametrize('text, exception_message', numeric_exception_param)
def test_exception_numeric_op(text, exception_message):
warnings.filterwarnings("ignore", category=FutureWarning)
expression = f"DS_r := {text};"
ast = create_ast(expression)
interpreter = InterpreterAnalyzer({})
Expand All @@ -280,6 +287,7 @@ def test_exception_numeric_op(text, exception_message):

@pytest.mark.parametrize('code, text', ds_param)
def test_datasets_params(code, text):
warnings.filterwarnings("ignore", category=FutureWarning)
datasets = AdditionalScalarsTests.LoadInputs(code, 1)
reference = AdditionalScalarsTests.LoadOutputs(code, ["DS_r"])
expression = f"DS_r := {text};"
Expand All @@ -291,6 +299,7 @@ def test_datasets_params(code, text):

@pytest.mark.parametrize('text, reference', boolean_params)
def test_bool_op_test(text, reference):
warnings.filterwarnings("ignore", category=FutureWarning)
expression = f"DS_r := {text};"
ast = create_ast(expression)
interpreter = InterpreterAnalyzer({})
Expand All @@ -300,6 +309,7 @@ def test_bool_op_test(text, reference):

@pytest.mark.parametrize('text, reference', comparison_params)
def test_comp_op_test(text, reference):
warnings.filterwarnings("ignore", category=FutureWarning)
expression = f"DS_r := {text};"
ast = create_ast(expression)
interpreter = InterpreterAnalyzer({})
Expand Down
2 changes: 1 addition & 1 deletion tests/Bugs/test_bugs.py
Original file line number Diff line number Diff line change
Expand Up @@ -819,7 +819,7 @@ def test_GL_169_11(self):
def test_GL_169_12(self):
"""
Status: BUG
Expression: DS_r := DS_1 [ calc m1 := match_characters(DS_1#Me_1, "^\d{4}\-(0[1-9]|1[012])\-(0[1-9]|[12][0-9]|3[01])") ];
Expression: DS_r := DS_1 [ calc m1 := match_characters(DS_1#Me_1, r'^\\d{4}\\-(0[1-9]|1[012])\\-(0[1-9]|[12][0-9]|3[01])') ];
Description: Check unicode regex.
Git Issue: bug-185-match-unicode.
Goal: Check Result.
Expand Down
4 changes: 4 additions & 0 deletions tests/Helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
format_time_period_external_representation
from vtlengine.files.parser import load_datapoints

import warnings


class TestHelper(TestCase):
"""
Expand Down Expand Up @@ -120,6 +122,7 @@ def BaseTest(cls, code: str, number_inputs: int, references_names: List[str],
'''

'''
warnings.filterwarnings("ignore", category=FutureWarning)
if text is None:
text = cls.LoadVTL(code)
ast = create_ast(text)
Expand Down Expand Up @@ -183,6 +186,7 @@ def NewSemanticExceptionTest(cls, code: str, number_inputs: int, exception_code:
vd_names: List[str] = None, sql_names: List[str] = None,
text: Optional[str] = None, scalars: Dict[str, Any] = None):
# Data Loading.--------------------------------------------------------
warnings.filterwarnings("ignore", category=FutureWarning)
if text is None:
text = cls.LoadVTL(code)
input_datasets = cls.LoadInputs(code=code, number_inputs=number_inputs)
Expand Down
2 changes: 1 addition & 1 deletion tests/IfThenElse/test_if_then_else.py
Original file line number Diff line number Diff line change
Expand Up @@ -473,7 +473,7 @@ def test_GL_424_1(self):
Status: OK
Expression: INPUT_CHECK_REGEX :=
if
(match_characters(BIS_LOC_STATS # OBS_VALUE,"[0-9]*[.,]?[0-9]*\Z"))
(match_characters(BIS_LOC_STATS # OBS_VALUE,r'[0-9]*[.,]?[0-9]*\\Z'))
then
length(BIS_LOC_STATS # OBS_VALUE) > 0 and length(BIS_LOC_STATS # OBS_VALUE) < 20
else
Expand Down
5 changes: 5 additions & 0 deletions tests/ReferenceManual/test_reference_manual.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@
from vtlengine.Model import Component, Role, Dataset, ValueDomain
from vtlengine.files.parser import load_datapoints

import warnings

base_path = Path(__file__).parent
input_dp_dir = base_path / 'data/DataSet/input'
reference_dp_dir = base_path / 'data/DataSet/output'
Expand Down Expand Up @@ -190,6 +192,7 @@ def load_dataset(dataPoints, dataStructures, dp_dir, param):
@pytest.mark.parametrize('param', params)
def test_reference(input_datasets, reference_datasets, ast, param, value_domains):
# try:
warnings.filterwarnings("ignore", category=FutureWarning)
input_datasets = load_dataset(*input_datasets, dp_dir=input_dp_dir, param=param)
reference_datasets = load_dataset(*reference_datasets, dp_dir=reference_dp_dir, param=param)
interpreter = InterpreterAnalyzer(input_datasets, value_domains=value_domains)
Expand All @@ -202,6 +205,7 @@ def test_reference(input_datasets, reference_datasets, ast, param, value_domains
@pytest.mark.parametrize('param', params)
def test_reference_defined_operators(input_datasets, reference_datasets,
ast_defined_operators, param, value_domains):
warnings.filterwarnings("ignore", category=FutureWarning)
input_datasets = load_dataset(*input_datasets, dp_dir=input_dp_dir, param=param)
reference_datasets = load_dataset(*reference_datasets, dp_dir=reference_dp_dir, param=param)
interpreter = InterpreterAnalyzer(input_datasets, value_domains=value_domains)
Expand All @@ -212,6 +216,7 @@ def test_reference_defined_operators(input_datasets, reference_datasets,
@pytest.mark.parametrize('param', exceptions_tests)
def test_reference_exceptions(input_datasets, reference_datasets, ast, param):
# try:
warnings.filterwarnings("ignore", category=FutureWarning)
input_datasets = load_dataset(*input_datasets, dp_dir=input_dp_dir, param=param)
interpreter = InterpreterAnalyzer(input_datasets)
with pytest.raises(Exception, match="Operation not allowed for multimeasure datasets"):
Expand Down