-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmax_clique_bnb_evaluator.py
118 lines (104 loc) · 3.21 KB
/
max_clique_bnb_evaluator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import argparse
from graph import MCPGraph
from algorithms.branch_and_bound import BNBSolver
from algorithms.branch_and_cut import BNCSolver
from utils import *
from tqdm import tqdm
import json
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--solver",
"-s",
type=str,
help="solver",
choices=["BnB", "BnC"],
default="BnB",
)
parser.add_argument(
"--input_data_file",
"-i",
type=str,
help="path to file with input benchmarks data",
default="medium.txt",
)
parser.add_argument(
"--output_results_dump",
"-o",
type=str,
help="path to output file",
default="results.csv",
)
return parser.parse_args()
@timeit
def benchmark(graph: namedtuple, solver_name: str):
graph = MCPGraph(data=graph)
graph.independent_sets_generation()
graph.filter_covered_not_connected()
solver = (
BNBSolver(graph=graph)
if solver_name == "BnB"
else BNCSolver(graph=graph)
)
solver.solve()
graph.maximum_clique_size_found = solver.maximum_clique_size
graph.is_solution_is_clique = solver.is_solution_is_clique
return graph
def main():
args = parse_args()
benchmark_graphs = read_benchmarks(args.input_data_file)
column_names = [
"Graph Name",
"Correct Max Clique",
"Graph Complexity",
"Found Max Clique",
"Is Clique",
"Consumed Time",
]
results = [column_names]
logger_output_path = osp.join(
osp.dirname(__file__),
"becnhmark_logs",
f"{args.input_data_file[:-4]}.log",
)
if os.path.exists(logger_output_path):
os.remove(logger_output_path)
logger.add(logger_output_path)
for idx, graph in enumerate(tqdm(benchmark_graphs)):
graph_name = graph.GraphName[:-4]
logger.info(f"{args.solver} started for {graph_name} !")
# try:
graph, work_time = benchmark(graph, args.solver)
results.append(
[
str(graph.name),
str(graph.maximum_clique_size_gt),
str(graph.complexity_type),
str(graph.maximum_clique_size_found),
str(graph.is_solution_is_clique),
str(work_time),
],
)
curr_result = {
"Right Maximum Clique Size": str(graph.maximum_clique_size_gt),
"Found Maximum Clique Size": str(graph.maximum_clique_size_found),
"Consumed Time": str(work_time),
"Is Clique": str(graph.is_solution_is_clique),
"Graph Complexity": str(graph.complexity_type),
}
per_graph_result_dir = osp.join(
RESULTS_DIR,
"per_graph_results",
f"{args.solver}",
)
if not osp.exists(per_graph_result_dir):
os.makedirs(per_graph_result_dir)
with open(
osp.join(per_graph_result_dir, f"{graph_name}.json"),
"w",
) as file:
json.dump(curr_result, file, indent=4)
logger.info(f"{args.solver} finished for {graph_name} !")
dump_results_to_csv("report", results)
if __name__ == "__main__":
main()