Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix the counting constraints in the primal dual #398

Open
wants to merge 16 commits into
base: develop
Choose a base branch
from
Open
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
tidy up the code
DariusNafar committed Sep 23, 2024
commit 4a45a76f1e296c6e09e50b6594097a810b120ddb
30 changes: 8 additions & 22 deletions test_regr/examples/PMDExistL/main.py
Original file line number Diff line number Diff line change
@@ -1,37 +1,27 @@
import sys
import argparse
import time
from typing import List, Dict, Any
import itertools

from typing import Any
import numpy as np
import torch
from torch import nn
from tqdm import tqdm

from domiknows.sensor.pytorch.sensors import ReaderSensor
from domiknows.sensor.pytorch.relation_sensors import EdgeSensor, FunctionalSensor
from domiknows.sensor.pytorch.learners import ModuleLearner, TorchLearner
from domiknows.program import SolverPOIProgram
from domiknows.program.model.base import Mode
from domiknows.sensor.pytorch.learners import ModuleLearner
from domiknows.sensor import Sensor
from domiknows.program.metric import MacroAverageTracker
from domiknows.program.loss import NBCrossEntropyLoss
from domiknows.program.lossprogram import PrimalDualProgram
from domiknows.program.model.pytorch import SolverModel

from utils import DummyLearner,TestTrainLearner,return_contain,create_dataset,evaluate_model,train_model
from utils import TestTrainLearner,return_contain,create_dataset,evaluate_model,train_model
sys.path.append('../../../../domiknows/')
from graph import get_graph

# Clear existing sensors
Sensor.clear()


def parse_arguments() -> argparse.Namespace:
"""Parse command-line arguments."""

parser = argparse.ArgumentParser(description="Machine Learning Experiment")
parser.add_argument("--counting_tnorm", choices=["G", "P", "L", "SP", "LSE"], default="G", help="The tnorm method to use for the counting constraints")
parser.add_argument("--counting_tnorm", choices=["G", "P", "L", "SP"], default="G", help="The tnorm method to use for the counting constraints")
parser.add_argument("--atLeastL", default=False, type=bool, help="Use at least L constraint")
parser.add_argument("--atMostL", default=False, type=bool, help="Use at most L constraint")
parser.add_argument("--epoch", default=500, type=int, help="Number of training epochs")
@@ -44,24 +34,21 @@ def parse_arguments() -> argparse.Namespace:



def setup_graph(args: argparse.Namespace, graph: Any, a: Any, b: Any, a_contain_b: Any, b_answer: Any) -> None:
"""Set up the graph structure and sensors."""
def setup_graph(args: argparse.Namespace, a: Any, b: Any, a_contain_b: Any, b_answer: Any) -> None:
a["index"] = ReaderSensor(keyword="a")
b["index"] = ReaderSensor(keyword="b")
b["temp_answer"] = ReaderSensor(keyword="label")
b[a_contain_b] = EdgeSensor(b["index"], a["index"], relation=a_contain_b, forward=return_contain)
b[b_answer] = ModuleLearner(a_contain_b, "index", module=TestTrainLearner(args.N), device="cpu")
b[b_answer] = FunctionalSensor(a_contain_b, "temp_answer", forward=lambda _, label: label, label=True)



def main(args: argparse.Namespace) -> bool:
def main(args: argparse.Namespace):
np.random.seed(0)
torch.manual_seed(0)

graph, a, b, a_contain_b, b_answer = get_graph(args)
dataset = create_dataset(args.N, args.M)
setup_graph(args, graph, a, b, a_contain_b, b_answer)
setup_graph(args, a, b, a_contain_b, b_answer)
program = PrimalDualProgram(
graph, SolverModel, poi=[a, b, b_answer],
inferTypes=['local/argmax'],
@@ -88,7 +75,6 @@ def main(args: argparse.Namespace) -> bool:
print(f"expected_value, before_count, actual_count,pass_test_case): {expected_value, before_count, actual_count,pass_test_case}")
return pass_test_case, before_count, actual_count


if __name__ == "__main__":
args = parse_arguments()
main(args)
21 changes: 1 addition & 20 deletions test_regr/examples/PMDExistL/utils.py
Original file line number Diff line number Diff line change
@@ -1,32 +1,17 @@
import sys
import argparse
import time
from typing import List, Dict, Any
import itertools

import numpy as np
import torch
from torch import nn
from tqdm import tqdm

from domiknows.sensor.pytorch.sensors import ReaderSensor
from domiknows.sensor.pytorch.relation_sensors import EdgeSensor, FunctionalSensor
from domiknows.sensor.pytorch.learners import ModuleLearner, TorchLearner
from domiknows.program import SolverPOIProgram
from domiknows.sensor.pytorch.learners import TorchLearner
from domiknows.program.model.base import Mode
from domiknows.sensor import Sensor
from domiknows.program.metric import MacroAverageTracker
from domiknows.program.loss import NBCrossEntropyLoss
from domiknows.program.lossprogram import PrimalDualProgram
from domiknows.program.model.pytorch import SolverModel

class DummyLearner(TorchLearner):
"""A dummy learner that always returns a fixed result."""
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.stack((torch.ones(len(x)) * 4, torch.ones(len(x)) * 6), dim=-1)

class TestTrainLearner(nn.Module):
"""A test learner with a simple neural network."""
def __init__(self, input_size: int):
super().__init__()
self.layers = nn.Sequential(
@@ -38,19 +23,16 @@ def forward(self, _, x: torch.Tensor) -> torch.Tensor:
return self.layers(x)

def return_contain(b: torch.Tensor, _: Any) -> torch.Tensor:
"""Return a tensor of ones with an additional dimension."""
return torch.ones(len(b)).unsqueeze(-1)

def create_dataset(N: int, M: int) -> List[Dict[str, Any]]:
"""Create a dataset for the experiment."""
return [{
"a": [0],
"b": [((np.random.rand(N) - np.random.rand(N))).tolist() for _ in range(M)],
"label": [0] * M
}]

def train_model(program: PrimalDualProgram, dataset: List[Dict[str, Any]], num_epochs: int) -> None:
"""Train the model using the provided program and dataset."""
program.model.train()
program.model.reset()
program.cmodel.train()
@@ -77,7 +59,6 @@ def train_model(program: PrimalDualProgram, dataset: List[Dict[str, Any]], num_e
copt.step()

def evaluate_model(program: PrimalDualProgram, dataset: List[Dict[str, Any]], b_answer: Any) -> Dict[int, int]:
"""Evaluate the model and return the prediction counts."""
program.model.eval()
program.model.reset()
program.cmodel.eval()