-
Notifications
You must be signed in to change notification settings - Fork 31
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
PR - Add import results and schedule history tools, update headers an…
…d inventory.md (#225) * update headers with correct names Signed-off-by: Greg Wootton <[email protected]> * add output that we are deleting the file Signed-off-by: Greg Wootton <[email protected]> * add import results and schedule history tools Signed-off-by: Greg Wootton <[email protected]> * update inventory.md listing Signed-off-by: Greg Wootton <[email protected]> * add tool deleteorphanedfoldermembers.py Signed-off-by: Greg Wootton <[email protected]> --------- Signed-off-by: Greg Wootton <[email protected]>
- Loading branch information
1 parent
1f0b308
commit b5eca27
Showing
8 changed files
with
396 additions
and
4 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,110 @@ | ||
#!/usr/bin/python | ||
# -*- coding: utf-8 -*- | ||
# | ||
# deleteorphanedfoldermembers.py December 9 2024 | ||
# | ||
# Tool to identify and remove folder members that no longer exist. | ||
# | ||
# Change History | ||
# | ||
# 09DEC2024 Initial commit | ||
# | ||
# Copyright © 2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the License); you may not use this file except in compliance with the License. You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either | ||
# express or implied. See the License for the specific language governing permissions and limitations under the License. | ||
|
||
import argparse | ||
from sharedfunctions import callrestapi,getfolderid | ||
|
||
parser = argparse.ArgumentParser() | ||
parser.add_argument("-f","--folder", help="Specify the folder to search for orphaned memberships.",required=True) | ||
parser.add_argument("-d","--delete", help="Delete the members found.",action="store_true",default=False) | ||
parser.add_argument("-l","--limit", type=int,help="Specify the number of records to pull in each REST API call. Default is 10.",default=10) | ||
parser.add_argument("-p","--pagelimit", type=int,help="Specify the number of pages to pull before stopping. Default is 10.",default=10) | ||
args = parser.parse_args() | ||
folder=args.folder | ||
write=args.delete | ||
limit=args.limit | ||
pagelimit=args.pagelimit | ||
|
||
objectlimit=limit * pagelimit | ||
|
||
if not folder.endswith('/'): | ||
folder=folder+'/' | ||
print("Analyzing Folder:",folder) | ||
fid = getfolderid(folder) | ||
reqtype = 'get' | ||
reqval = fid[1]+'/members?recursive=true&limit='+str(limit) | ||
print('Calling REST endpoint: ',reqval) | ||
results=callrestapi(reqval,reqtype) | ||
|
||
count=results['count'] | ||
print('Found',count,'matching our query.') | ||
if count > objectlimit: | ||
print("WARN: The configured page size (--limit) and maximum pages (--pagelimit) will only allow processing of",objectlimit,"objects of the total",count) | ||
# Write the URIs to an array. | ||
members = [] | ||
for item in results["items"]: | ||
if item["type"] == "child": | ||
uri = item["uri"] | ||
href = next((link["href"] for link in item["links"] if link["rel"] == "delete"), None) | ||
members.append([uri,href]) | ||
|
||
# Paged output handling | ||
# We need to tolerate paged output. | ||
next_link = None | ||
pages = 1 | ||
|
||
for link in results.get("links",[]): | ||
if link.get("rel") == "next": | ||
next_link = link.get("href") | ||
break | ||
|
||
while pages < pagelimit and next_link is not None: | ||
|
||
pages += 1 | ||
|
||
print('Calling REST endpoint from the "next" link:',next_link) | ||
# Call the next link | ||
results=callrestapi(next_link,reqtype) | ||
|
||
for item in results["items"]: | ||
if item["type"] == "child": | ||
uri = item["uri"] | ||
href = next((link["href"] for link in item["links"] if link["rel"] == "delete"), None) | ||
members.append([uri,href]) | ||
|
||
# Check for a next link again. | ||
next_link = None | ||
for link in results.get("links",[]): | ||
if link.get("rel") == "next": | ||
next_link = link.get("href") | ||
break | ||
print('Pages traversed:',pages) | ||
print('After filtering non-child objects, found members to check:',len(members)) | ||
|
||
# Iterate through the URIs to see if they exist | ||
|
||
for member in members: | ||
reqtype = 'head' | ||
results = None | ||
results,httpcode = callrestapi(member[0],reqtype) | ||
# If it does not exist, delete the file object. | ||
if (httpcode == 404): | ||
print("WARN: Found orphaned member",member[0]) | ||
if write: | ||
print("Delete option set, attempting to delete member.") | ||
results = None | ||
reqtype = 'delete' | ||
results = callrestapi(member[1],reqtype) | ||
else: | ||
print("Rerun with --delete to remove this object or call this URI with the DELETE HTTP method",member[1]) | ||
elif (httpcode == 200): | ||
print("Member", member[0],"exists. HTTP response code:", httpcode) | ||
else: | ||
print("Member",member[0],"returned unexpected return code:", httpcode) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,139 @@ | ||
#!/usr/bin/python | ||
# -*- coding: utf-8 -*- | ||
# | ||
# getimportresults.py November 2024 | ||
# | ||
# Tool to get detailed results from import history | ||
# | ||
# | ||
# Change History | ||
# | ||
# 27NOV2024 Initial commit | ||
# | ||
# | ||
# Copyright © 2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the License); you may not use this file except in compliance with the License. You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either | ||
# express or implied. See the License for the specific language governing permissions and limitations under the License. | ||
# | ||
|
||
import argparse | ||
from sharedfunctions import callrestapi | ||
|
||
parser = argparse.ArgumentParser() | ||
parser.add_argument("-i","--id", help="Specify the package ID being imported.") | ||
parser.add_argument("-f","--filter", help="Set a custom filter for objects, for example eq(createdBy,sasdemo).") | ||
parser.add_argument("-l","--limit", type=int,help="Specify the number of records to pull in each REST API call. Default is 10.",default=10) | ||
parser.add_argument("-p","--pagelimit", type=int,help="Specify the number of pages to pull before stopping. Default is 10.",default=10) | ||
args = parser.parse_args() | ||
packageid=args.id | ||
filter=args.filter | ||
limit=args.limit | ||
pagelimit=args.pagelimit | ||
|
||
# Build our filter: | ||
if filter is None: | ||
if packageid is None: | ||
reqval = '/transfer/importJobs?limit='+str(limit) | ||
else: | ||
filter = 'eq(packageUri,"/transfer/packages/%s")' % (packageid) | ||
reqval = '/transfer/importJobs?filter='+filter+'&limit='+str(limit) | ||
else: | ||
if packageid is not None: | ||
filter = 'and(%s,eq(packageUri,"/transfer/packages/%s"))' % (filter,packageid) | ||
reqval = '/transfer/importJobs?filter='+filter+'&limit='+str(limit) | ||
|
||
# Step 1: Get the import jobs | ||
reqtype = 'get' | ||
print('Calling REST endpoint:',reqval) | ||
|
||
# Make the rest call using the callrestapi function. | ||
results=callrestapi(reqval,reqtype) | ||
|
||
count=results['count'] | ||
print('Found',count,'matching our query.') | ||
|
||
# Step 2: Pull each importJob ID into an array | ||
ids = [] | ||
for item in results.get("items",[]): | ||
ids.append(item.get("id")) | ||
|
||
# Step 3: If we have mulitple pages, traverse them. | ||
next_link = None | ||
pages = 1 | ||
|
||
for link in results.get("links",[]): | ||
if link.get("rel") == "next": | ||
next_link = link.get("href") | ||
break | ||
|
||
while pages < pagelimit and next_link is not None: | ||
|
||
pages += 1 | ||
|
||
print('Calling REST endpoint:',next_link) | ||
# Call the next link | ||
results=callrestapi(next_link,reqtype) | ||
|
||
# Pull the ids into the array | ||
for item in results.get("items",[]): | ||
ids.append(item.get("id")) | ||
|
||
# Check for a next link again. | ||
next_link = None | ||
for link in results.get("links",[]): | ||
if link.get("rel") == "next": | ||
next_link = link.get("href") | ||
break | ||
print('Pages traversed:',pages) | ||
print('Captured ids:',len(ids)) | ||
if len(ids) != count: | ||
print('WARN: Captured IDs does not match total count:',count,'Increase page or limit settings to capture all results.') | ||
|
||
# We now have an array "ids" of all the importJob ids we need to process. | ||
|
||
# Step 4: Iterate through the Ids to display the import job details and the results of each task | ||
|
||
# Create an array we can use for a table of results | ||
data = [["Task Name","Resource Type","State","Message"]] | ||
|
||
for id in ids: | ||
# Get the details of the import job | ||
reqtype = 'get' | ||
reqval = '/transfer/importJobs/'+id | ||
results=callrestapi(reqval,reqtype) | ||
print() | ||
print("------------------------------------") | ||
print("Import Job ID: ",results.get("id")) | ||
print("Import Job Name: ",results.get("name")) | ||
print("Import Job State: ",results.get("state")) | ||
# Get the details on the individual tasks | ||
reqval = '/transfer/importJobs/'+id+'/tasks' | ||
results=callrestapi(reqval,reqtype) | ||
# Each tasks results are in the items array response here. | ||
print() | ||
for task in results.get("items",[]): | ||
name = task.get("name") | ||
rtype = task.get("resourceType") | ||
state = task.get("state") | ||
message = task.get("message") | ||
data.append([name,rtype,state,message]) | ||
|
||
# Define a length for each column | ||
column_length = [0,0,0,0] | ||
|
||
# Update the length based on the maximum length for each column. | ||
for row in data: | ||
for i in range(4): | ||
if len(row[i])+2 > column_length[i]: | ||
column_length[i] = len(row[i])+2 | ||
|
||
# Print each row using our max column length values to make a readable table. | ||
for row in data: | ||
formatted_row="".join("%-*s" % (width,value) for value, width in zip(row, column_length)) | ||
|
||
print(formatted_row) |
Oops, something went wrong.