Skip to content

Commit

Permalink
Reconcile the ILP code with Tanawan fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
DariusNafar committed Nov 30, 2024
1 parent a88980f commit 275260e
Show file tree
Hide file tree
Showing 7 changed files with 181 additions and 126 deletions.
84 changes: 39 additions & 45 deletions domiknows/graph/logicalConstrain.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from collections import namedtuple
from domiknows.solver.ilpConfig import ilpConfig
from domiknows.graph import Concept

from domiknows.solver.lcLossSampleBooleanMethods import lcLossSampleBooleanMethods
import logging
import torch
myLogger = logging.getLogger(ilpConfig['log_name'])
Expand Down Expand Up @@ -363,61 +363,55 @@ def createILPConstrains(self, lcName, lcFun, model, v, headConstrain):
# None if headConstrain is True or no ILP constraint created, ILP variable representing the value of ILP constraint, loss calculated
return rVars

def createILPCount(self, model, myIlpBooleanProcessor, v, headConstrain, cOperation, cLimit, integrate,
logicMethodName="COUNT"):
def createILPCount(self, model, myIlpBooleanProcessor, v, headConstrain, cOperation, cLimit, integrate, logicMethodName="COUNT"):
try:
lcVariableNames = [e for e in iter(v)]
except StopIteration:
pass

if cLimit == None:
cLimit = 1

lcVariableName0 = lcVariableNames[0] # First variable
lcVariableSet0 = v[lcVariableName0]

zVars = [] # Output ILP variables
# for i, _ in enumerate(lcVariableSet0):
# varsSetup = []
#
# var = []
# for currentV in iter(v):
# var.extend(v[currentV][i])
#
# if len(var) == 0:
# if not (headConstrain or integrate):
# zVars.append([None])
#
# continue
#
# if headConstrain or integrate:
# varsSetup.extend(var)
# else:
# varsSetup.append(var)
varsSetup = []

var = [currentV[0] for currentV in iter(lcVariableSet0)]

if headConstrain or integrate:
varsSetup.extend(var)
else:
varsSetup.append(var)
if type(myIlpBooleanProcessor)==lcLossSampleBooleanMethods:

# -- Use ILP variable setup to create constrains
if headConstrain or integrate:
zVars.append([myIlpBooleanProcessor.countVar(model, *varsSetup, onlyConstrains=headConstrain,
limitOp=cOperation, limit=cLimit,
logicMethodName=logicMethodName)])
zVars,varsSetup = [],[]
var = [currentV[0] for currentV in iter(lcVariableSet0)]
if headConstrain or integrate:
varsSetup.extend(var)
else:
varsSetup.append(var)
if headConstrain or integrate:
zVars.append([myIlpBooleanProcessor.countVar(model, *varsSetup, onlyConstrains=headConstrain,limitOp=cOperation, limit=cLimit,logicMethodName=logicMethodName)])
else:
for current_var in varsSetup:
zVars.append([myIlpBooleanProcessor.countVar(model, *current_var, onlyConstrains=headConstrain,limitOp=cOperation, limit=cLimit,logicMethodName=logicMethodName)])
if model is not None:
model.update()
return zVars
else:
for current_var in varsSetup:
zVars.append([myIlpBooleanProcessor.countVar(model, *current_var, onlyConstrains=headConstrain,
limitOp=cOperation, limit=cLimit,
logicMethodName=logicMethodName)])

if model is not None:
model.update()

return zVars
zVars = [] # Output ILP variables
for i, _ in enumerate(lcVariableSet0):
varsSetup = []
var = []
for currentV in iter(v):
var.extend(v[currentV][i])
if len(var) == 0:
if not (headConstrain or integrate):
zVars.append([None])
continue
if headConstrain or integrate:
varsSetup.extend(var)
else:
varsSetup.append(var)
if headConstrain or integrate:
zVars.append([myIlpBooleanProcessor.countVar(model, *varsSetup, onlyConstrains=headConstrain,limitOp=cOperation, limit=cLimit,logicMethodName=logicMethodName)])
else:
for current_var in varsSetup:
zVars.append([myIlpBooleanProcessor.countVar(model, *current_var, onlyConstrains=headConstrain,limitOp=cOperation, limit=cLimit,logicMethodName=logicMethodName)])
if model is not None:
model.update()
return zVars

def createILPAccumulatedCount(self, model, myIlpBooleanProcessor, v, headConstrain, cOperation, cLimit, integrate, logicMethodName = "COUNT"):

Expand Down
2 changes: 1 addition & 1 deletion test_regr/examples/PMDExistL/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,4 +28,4 @@ def get_graph(args):
else:
exactL(expected_value, args.expected_atLeastL)

return graph, a, b, a_contain_b, b_answer
return graph, a, b, a_contain_b, b_answer
47 changes: 32 additions & 15 deletions test_regr/examples/PMDExistL/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,19 +9,20 @@
from domiknows.sensor import Sensor
from domiknows.program.metric import MacroAverageTracker
from domiknows.program.loss import NBCrossEntropyLoss
from domiknows.program.lossprogram import PrimalDualProgram
from domiknows.program.lossprogram import PrimalDualProgram, SampleLossProgram
from domiknows.program.model.pytorch import SolverModel

from utils import TestTrainLearner,return_contain,create_dataset,evaluate_model,train_model
from utils import TestTrainLearner, return_contain, create_dataset, evaluate_model, train_model

sys.path.append('../../../../domiknows/')
from graph import get_graph

Sensor.clear()

def parse_arguments() -> argparse.Namespace:

def parse_arguments() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Machine Learning Experiment")
parser.add_argument("--counting_tnorm", choices=["G", "P", "L", "SP"], default="G", help="The tnorm method to use for the counting constraints")
parser.add_argument("--counting_tnorm", choices=["G", "P", "L", "SP"], default="SP", help="The tnorm method to use for the counting constraints")
parser.add_argument("--atLeastL", default=False, type=bool, help="Use at least L constraint")
parser.add_argument("--atMostL", default=False, type=bool, help="Use at most L constraint")
parser.add_argument("--epoch", default=500, type=int, help="Number of training epochs")
Expand All @@ -30,10 +31,11 @@ def parse_arguments() -> argparse.Namespace:
parser.add_argument("--expected_value", default=0, type=int, help="Expected value")
parser.add_argument("--N", default=10, type=int, help="N parameter")
parser.add_argument("--M", default=8, type=int, help="M parameter")
parser.add_argument("--model", default="sampling", type=str, help="Model Types [Sampling/PMD]")
parser.add_argument("--sample_size", default=-1, type=int, help="Sample size for sampling program")
return parser.parse_args()



def setup_graph(args: argparse.Namespace, a: Any, b: Any, a_contain_b: Any, b_answer: Any) -> None:
a["index"] = ReaderSensor(keyword="a")
b["index"] = ReaderSensor(keyword="b")
Expand All @@ -42,28 +44,41 @@ def setup_graph(args: argparse.Namespace, a: Any, b: Any, a_contain_b: Any, b_an
b[b_answer] = ModuleLearner(a_contain_b, "index", module=TestTrainLearner(args.N), device="cpu")
b[b_answer] = FunctionalSensor(a_contain_b, "temp_answer", forward=lambda _, label: label, label=True)


def main(args: argparse.Namespace):
np.random.seed(0)
torch.manual_seed(0)

graph, a, b, a_contain_b, b_answer = get_graph(args)
dataset = create_dataset(args.N, args.M)
setup_graph(args, a, b, a_contain_b, b_answer)
program = PrimalDualProgram(
graph, SolverModel, poi=[a, b, b_answer],
inferTypes=['local/argmax'],
loss=MacroAverageTracker(NBCrossEntropyLoss()),
beta=10, device='cpu', tnorm="L", counting_tnorm=args.counting_tnorm
)
if args.model == "sampling":
# print("sampling")
program = SampleLossProgram(
graph, SolverModel, poi=[a, b, b_answer],
inferTypes=['local/argmax'],
loss=MacroAverageTracker(NBCrossEntropyLoss()),
sample=True,
sampleSize=args.sample_size,
sampleGlobalLoss=False,
beta=1, device='cpu', tnorm="L", counting_tnorm=args.counting_tnorm
)
else:
program = PrimalDualProgram(
graph, SolverModel, poi=[a, b, b_answer],
inferTypes=['local/argmax'],
loss=MacroAverageTracker(NBCrossEntropyLoss()),
beta=10, device='cpu', tnorm="L", counting_tnorm=args.counting_tnorm)

expected_value = args.expected_value
train_model(program, dataset, num_epochs=2)

before_count = evaluate_model(program, dataset, b_answer).get(expected_value, 0)
train_model(program, dataset, args.epoch)
train_model(program, dataset, args.epoch, constr_loss_only=True)

pass_test_case = True
actual_count = evaluate_model(program, dataset, b_answer).get(expected_value, 0)


if args.atLeastL:
pass_test_case &= (actual_count >= args.expected_atLeastL)
if args.atMostL:
Expand All @@ -72,9 +87,11 @@ def main(args: argparse.Namespace):
pass_test_case &= (actual_count == args.expected_atLeastL)

print(f"Test case {'PASSED' if pass_test_case else 'FAILED'}")
print(f"expected_value, before_count, actual_count,pass_test_case): {expected_value, before_count, actual_count,pass_test_case}")
print(
f"expected_value, before_count, actual_count,pass_test_case): {expected_value, before_count, actual_count, pass_test_case}")
return pass_test_case, before_count, actual_count


if __name__ == "__main__":
args = parse_arguments()
main(args)
main(args)
52 changes: 36 additions & 16 deletions test_regr/examples/PMDExistL/testcase.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,12 @@
from concurrent.futures import ProcessPoolExecutor, as_completed
from collections import defaultdict


def run_test(params):
# Convert params to command-line arguments
args = []
for key, value in params.items():
if not str(value)=="False":
if not str(value) == "False":
args.extend([f'--{key}', str(value)])

# Get the path to the Python interpreter in the current virtual environment
Expand All @@ -27,23 +28,12 @@ def run_test(params):
except subprocess.CalledProcessError as e:
return params, False, e.stderr

def run_tests():
"""Run tests with different combinations of input arguments."""
# Define the parameter combinations to test
param_combinations = {
'counting_tnorm': ["G" , "P","SP","L" ],
'atLeastL': [True, False],
'atMostL': [True, False],
'epoch': [1000],
'expected_atLeastL': [2],
'expected_atMostL': [5],
'expected_value': [0,1],
'N': [10],
'M': [8]
}

def run_tests(param_combinations):
"""Run tests with different combinations of input arguments."""
# Generate all combinations of parameters
keys, values = zip(*param_combinations.items())
print(keys, values)
combinations = [dict(zip(keys, v)) for v in itertools.product(*values)]

# Run tests for each combination using ProcessPoolExecutor with max_workers=4
Expand Down Expand Up @@ -74,5 +64,35 @@ def run_tests():
print(f"Parameters: {params}")
print(f"Error output: {output}")


if __name__ == "__main__":
run_tests()
# Define the parameter combinations to test PMD
PMD_combinations = {
'counting_tnorm': ["G", "P", "SP", "L"],
'atLeastL': [True, False],
'atMostL': [True, False],
'epoch': [1000],
'expected_atLeastL': [2],
'expected_atMostL': [5],
'expected_value': [0, 1],
'N': [10],
'M': [8],
'model': ["PMD"],
}
# run_tests(PMD_combinations)

# Define the parameter combinations to test sampling model
sampling_combinations = {
'counting_tnorm': ["G"],
'atLeastL': [True, False],
'atMostL': [True, False],
'epoch': [1000],
'expected_atLeastL': [1, 2, 3],
'expected_atMostL': [3, 4, 5],
'expected_value': [0, 1],
'N': [10],
'M': [8],
'model': ["sampling"],
"sample_size": [10, 20, 50, 100, 200, -1] # maximum 2^8 = 256
}
run_tests(sampling_combinations)
17 changes: 14 additions & 3 deletions test_regr/examples/PMDExistL/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,12 @@
from domiknows.program.model.base import Mode
from domiknows.program.lossprogram import PrimalDualProgram


class DummyLearner(TorchLearner):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.stack((torch.ones(len(x)) * 4, torch.ones(len(x)) * 6), dim=-1)


class TestTrainLearner(nn.Module):
def __init__(self, input_size: int):
super().__init__()
Expand All @@ -22,17 +24,21 @@ def __init__(self, input_size: int):
def forward(self, _, x: torch.Tensor) -> torch.Tensor:
return self.layers(x)


def return_contain(b: torch.Tensor, _: Any) -> torch.Tensor:
return torch.ones(len(b)).unsqueeze(-1)


def create_dataset(N: int, M: int) -> List[Dict[str, Any]]:
return [{
"a": [0],
"b": [((np.random.rand(N) - np.random.rand(N))).tolist() for _ in range(M)],
"label": [0] * M
"label": [1] * M
}]

def train_model(program: PrimalDualProgram, dataset: List[Dict[str, Any]], num_epochs: int) -> None:

def train_model(program: PrimalDualProgram, dataset: List[Dict[str, Any]],
num_epochs: int, constr_loss_only: bool = False) -> None:
program.model.train()
program.model.reset()
program.cmodel.train()
Expand All @@ -49,7 +55,11 @@ def train_model(program: PrimalDualProgram, dataset: List[Dict[str, Any]], num_e
mloss, _, *output = program.model(data)
closs, *_ = program.cmodel(output[1])

loss = mloss * 0 + (closs if torch.is_tensor(closs) else 0)
if constr_loss_only:
loss = mloss * 0 + (closs if torch.is_tensor(closs) else 0)
else:
loss = mloss

if loss.item() < 0:
print("Negative loss", loss.item())
break
Expand All @@ -58,6 +68,7 @@ def train_model(program: PrimalDualProgram, dataset: List[Dict[str, Any]], num_e
opt.step()
copt.step()


def evaluate_model(program: PrimalDualProgram, dataset: List[Dict[str, Any]], b_answer: Any) -> Dict[int, int]:
program.model.eval()
program.model.reset()
Expand Down
Loading

0 comments on commit 275260e

Please sign in to comment.