-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgen-wiki.py
218 lines (179 loc) · 7.68 KB
/
gen-wiki.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
import os
import yaml
import json
import boto3
import argparse
import subprocess
from collections import defaultdict
MODULES_NAME = []
def compare_directory_names(directory_names):
# Create a defaultdict to store matching directory groups
grouped_directories = defaultdict(list)
# Iterate through each directory name and extract the base directory name
for directory in directory_names:
parts = directory.split('-')
base_name = parts[0] # Assume the base directory name is the last part
grouped_directories[base_name].append(directory)
# Create the result list in the required format
matching_directories = [{base_name: dirs} for base_name, dirs in grouped_directories.items()]
return matching_directories
def process_files(module_dir, directory):
files = os.listdir(f"{module_dir}/{directory}")
for file in files:
if file.endswith('.md'):
return f"{module_dir}/{directory}/{file}"
def remove_module_from_list(module_list, module_to_remove):
clean_least = [i for i in module_list if i not in module_to_remove]
return clean_least
def create_str_for_multiple_modules(paths, module_name, module_full_name):
content = ""
print(module_name)
if any(paths):
content += f"## {module_name.capitalize()}\n\n"
for index, path in enumerate(paths):
if path != None:
with open(path, 'r') as f:
tmp = f.read()
if tmp.strip():
content += f"### {module_full_name[index].capitalize()}\n\n"
tmp = tmp.replace("# ", "#### ",)
content += tmp
MODULES_NAME.append(module_name)
return content
def create_modules_documentation(output_dir):
modules_dir = '../modules'
markdown_content = ""
output_file = os.path.join(output_dir, f"Modules-documentation.md")
os.makedirs(output_dir, exist_ok=True)
directory_names = [d for d in os.listdir(modules_dir) if os.path.isdir(os.path.join(modules_dir, d))]
multiple_directory = compare_directory_names(directory_names)
for modules in multiple_directory:
paths = []
for module, directories in modules.items():
for directory in directories:
paths.append(process_files(modules_dir, directory))
if paths.__len__() > 1:
print(paths, module, directories)
markdown_content += create_str_for_multiple_modules(paths, module, directories)
with open(output_file, 'w') as f:
f.write(markdown_content)
print(f"Markdown file generated for module: {module}")
def load_config(config_file):
with open(config_file, 'r') as f:
config = yaml.safe_load(f)
return config
def read_json_file(file_path):
try:
with open(file_path, 'r') as file:
data = json.load(file)
return data
except Exception as e:
print(f"Error reading file: {e}")
return None
def extract_attributes(data, resource_type, attributes):
extracted_data = []
for resource in data.get('resources', []):
if resource.get('type') == resource_type:
for instance in resource.get('instances', []):
extracted_item = {}
if not attributes:
extracted_item['id'] = instance.get('attributes', {}).get('id')
for attr in attributes:
if attr:
extracted_item[attr] = instance.get('attributes', {}).get(attr)
extracted_data.append(extracted_item)
return extracted_data
def generate_markdown_table(header, data):
if not data:
return ""
headers = list(data[0].keys())
table = f"### {header}\n\n"
table += "| " + " | ".join(headers) + " |\n"
table += "| " + " | ".join(['---'] * len(headers)) + " |\n"
for item in data:
row = "| " + " | ".join(f"`{str(item.get(h, ''))}`" for h in headers) + " |\n"
table += row
table += "\n"
return table
def extract_repo_name(file_path):
with open(file_path, 'r') as file:
for line in file:
if "customer_name" in line:
customer_name = line.split('=', 1)[1].strip().strip('"')
return customer_name
def download_bucket(directory):
s3 = boto3.client('s3')
bucket_name = "terraform-remote-state-" + extract_repo_name("root.hcl")
try:
s3.download_file(bucket_name, directory, "tmp_file.json")
return "tmp_file.json"
except Exception as e:
print(f"Error: {e}")
return None
def process_directory(directory, config):
json_content = download_bucket(directory)
if not json_content:
return None
data = read_json_file(json_content)
markdown_content = ""
for resource_type, settings in config.get('resources', {}).items():
header = settings.get('header', resource_type)
attributes = settings.get('attributes', [])
extracted_data = extract_attributes(data, resource_type, attributes)
table = generate_markdown_table(header, extracted_data)
if table:
markdown_content += table
if markdown_content:
directory = directory.split('/', 1)[1]
markdown_content = f"## {directory.capitalize()}\n\n" + markdown_content
return markdown_content
def process_environment(environment, config, output_dir):
markdown_content = ""
for root, dirs, files in os.walk(f"{environment}"):
if '.terragrunt-cache' in root:
continue
content = process_directory(root, config)
if content:
markdown_content += content
for file in files:
if file.endswith('.md'):
with open(os.path.join(root, file), 'r') as f:
content = f.read()
if content.strip():
markdown_content += f"\n### Docs\n\n"
markdown_content += content
if markdown_content.strip() != f"# {environment.capitalize()} Environment\n\n":
print(os.getcwd())
output_file = f"{output_dir}/{environment.capitalize()}-environment.md"
os.makedirs(output_dir, exist_ok=True)
with open(output_file, 'w') as f:
f.write(markdown_content)
print(f"Markdown file generated for {environment}: {output_file}")
def list_md_files(directory):
"""List all .md files in a given directory."""
md_files = []
for root, _, files in os.walk(directory):
for file in files:
if file.endswith('.md'):
md_files.append(os.path.join(root, file))
return md_files
def copy_wiki(md_files):
for md_file in md_files:
subprocess.run(['cp', "terraform/live/" + md_file, 'temp_wiki'], check=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Extract secrets using terragrunt state pull based on a YAML configuration.')
parser.add_argument('config_file', help='Path to the YAML configuration file')
parser.add_argument('output_dir', help='Path to the output directory')
parser.add_argument('--repo_name', default=os.getenv("REPO_NAME"), help='Name of the repo you want to create a doc for')
parser.add_argument('--shared_dir', default='shared', help='Directory for shared resources')
args = parser.parse_args()
config = load_config(args.config_file)
os.chdir("terraform/live")
directories = [d for d in os.listdir(os.getcwd()) if os.path.isdir(os.path.join(os.getcwd(), d))]
for directory in directories:
process_environment(directory, config, args.output_dir)
md_files = list_md_files(args.output_dir)
create_modules_documentation(args.output_dir)
md_files += list_md_files(args.output_dir)
os.chdir("../..")
copy_wiki(md_files)