Skip to content

Commit

Permalink
PR - Add import results and schedule history tools, update headers an…
Browse files Browse the repository at this point in the history
…d inventory.md (#225)

* update headers with correct names

Signed-off-by: Greg Wootton <[email protected]>

* add output that we are deleting the file

Signed-off-by: Greg Wootton <[email protected]>

* add import results and schedule history tools

Signed-off-by: Greg Wootton <[email protected]>

* update inventory.md listing

Signed-off-by: Greg Wootton <[email protected]>

* add tool deleteorphanedfoldermembers.py

Signed-off-by: Greg Wootton <[email protected]>

---------

Signed-off-by: Greg Wootton <[email protected]>
  • Loading branch information
greg-wootton authored Dec 11, 2024
1 parent 1f0b308 commit b5eca27
Show file tree
Hide file tree
Showing 8 changed files with 396 additions and 4 deletions.
7 changes: 7 additions & 0 deletions INVENTORY.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,13 @@
| Management | deletefolder.py | Deletes a folder | |
| Management | deletefolderandcontent.py | Deletes a folder, any sub-folders and content | |
| Management | deletepublishdest.py | Deletes a publishing destination| |
| Management | deletejobhistory.py | Removes historical job execution data and logs | |
| Management | deleterophanedfiles.py | Removes files with a parentUri that does not exist | |
| Management | setjobrequestexpire.py | Defines the expiresAfter parameter for existing job requests | |
| Management | setjobrequetsfolder.py | Stores job requests in a folder | |
| Management | getimportresults.py | Retrieves results of each import task from an import job | |
| Management | getschedulehistory.py | Retrieves the most recent execution result from scheduled jobs | |
| Management | deleteorphanedfoldermembers.py | Identifies and optionally deletes broken folder members | |
| Configuration | createdomain.py | Create Viya domain | |
| Configuration | createcryptdomain.py | Create an encryption domain | |
| Configuration | modifydomain.py | Modify an existing Viya domain | |
Expand Down
2 changes: 1 addition & 1 deletion deletejobhistory.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# jobdelete.py September 2024
# deletejobhistory.py September 2024
#
# Tool to delete jobExecution jobs (execution history displayed in the "Monitoring" tab of the Jobs and Flows page in Env Mgr)
# without an expiration timestamp set, or that match a given filter.
Expand Down
3 changes: 2 additions & 1 deletion deleteorphanedfiles.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# filedelete.py September 2024
# deleteorphanedfiles.py September 2024
#
# Tool to delete orphaned job files (Files whose parentUri references an undefined object)
#
Expand Down Expand Up @@ -117,6 +117,7 @@
if (httpcode == 404):
print("Found orphaned file:",id,"with non-existent parentUri:",parenturi)
if write:
print("Deleting file...")
results = None
reqtype = 'delete'
results = callrestapi(reqval,reqtype)
Expand Down
110 changes: 110 additions & 0 deletions deleteorphanedfoldermembers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# deleteorphanedfoldermembers.py December 9 2024
#
# Tool to identify and remove folder members that no longer exist.
#
# Change History
#
# 09DEC2024 Initial commit
#
# Copyright © 2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use this file except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing permissions and limitations under the License.

import argparse
from sharedfunctions import callrestapi,getfolderid

parser = argparse.ArgumentParser()
parser.add_argument("-f","--folder", help="Specify the folder to search for orphaned memberships.",required=True)
parser.add_argument("-d","--delete", help="Delete the members found.",action="store_true",default=False)
parser.add_argument("-l","--limit", type=int,help="Specify the number of records to pull in each REST API call. Default is 10.",default=10)
parser.add_argument("-p","--pagelimit", type=int,help="Specify the number of pages to pull before stopping. Default is 10.",default=10)
args = parser.parse_args()
folder=args.folder
write=args.delete
limit=args.limit
pagelimit=args.pagelimit

objectlimit=limit * pagelimit

if not folder.endswith('/'):
folder=folder+'/'
print("Analyzing Folder:",folder)
fid = getfolderid(folder)
reqtype = 'get'
reqval = fid[1]+'/members?recursive=true&limit='+str(limit)
print('Calling REST endpoint: ',reqval)
results=callrestapi(reqval,reqtype)

count=results['count']
print('Found',count,'matching our query.')
if count > objectlimit:
print("WARN: The configured page size (--limit) and maximum pages (--pagelimit) will only allow processing of",objectlimit,"objects of the total",count)
# Write the URIs to an array.
members = []
for item in results["items"]:
if item["type"] == "child":
uri = item["uri"]
href = next((link["href"] for link in item["links"] if link["rel"] == "delete"), None)
members.append([uri,href])

# Paged output handling
# We need to tolerate paged output.
next_link = None
pages = 1

for link in results.get("links",[]):
if link.get("rel") == "next":
next_link = link.get("href")
break

while pages < pagelimit and next_link is not None:

pages += 1

print('Calling REST endpoint from the "next" link:',next_link)
# Call the next link
results=callrestapi(next_link,reqtype)

for item in results["items"]:
if item["type"] == "child":
uri = item["uri"]
href = next((link["href"] for link in item["links"] if link["rel"] == "delete"), None)
members.append([uri,href])

# Check for a next link again.
next_link = None
for link in results.get("links",[]):
if link.get("rel") == "next":
next_link = link.get("href")
break
print('Pages traversed:',pages)
print('After filtering non-child objects, found members to check:',len(members))

# Iterate through the URIs to see if they exist

for member in members:
reqtype = 'head'
results = None
results,httpcode = callrestapi(member[0],reqtype)
# If it does not exist, delete the file object.
if (httpcode == 404):
print("WARN: Found orphaned member",member[0])
if write:
print("Delete option set, attempting to delete member.")
results = None
reqtype = 'delete'
results = callrestapi(member[1],reqtype)
else:
print("Rerun with --delete to remove this object or call this URI with the DELETE HTTP method",member[1])
elif (httpcode == 200):
print("Member", member[0],"exists. HTTP response code:", httpcode)
else:
print("Member",member[0],"returned unexpected return code:", httpcode)
139 changes: 139 additions & 0 deletions getimportresults.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# getimportresults.py November 2024
#
# Tool to get detailed results from import history
#
#
# Change History
#
# 27NOV2024 Initial commit
#
#
# Copyright © 2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use this file except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing permissions and limitations under the License.
#

import argparse
from sharedfunctions import callrestapi

parser = argparse.ArgumentParser()
parser.add_argument("-i","--id", help="Specify the package ID being imported.")
parser.add_argument("-f","--filter", help="Set a custom filter for objects, for example eq(createdBy,sasdemo).")
parser.add_argument("-l","--limit", type=int,help="Specify the number of records to pull in each REST API call. Default is 10.",default=10)
parser.add_argument("-p","--pagelimit", type=int,help="Specify the number of pages to pull before stopping. Default is 10.",default=10)
args = parser.parse_args()
packageid=args.id
filter=args.filter
limit=args.limit
pagelimit=args.pagelimit

# Build our filter:
if filter is None:
if packageid is None:
reqval = '/transfer/importJobs?limit='+str(limit)
else:
filter = 'eq(packageUri,"/transfer/packages/%s")' % (packageid)
reqval = '/transfer/importJobs?filter='+filter+'&limit='+str(limit)
else:
if packageid is not None:
filter = 'and(%s,eq(packageUri,"/transfer/packages/%s"))' % (filter,packageid)
reqval = '/transfer/importJobs?filter='+filter+'&limit='+str(limit)

# Step 1: Get the import jobs
reqtype = 'get'
print('Calling REST endpoint:',reqval)

# Make the rest call using the callrestapi function.
results=callrestapi(reqval,reqtype)

count=results['count']
print('Found',count,'matching our query.')

# Step 2: Pull each importJob ID into an array
ids = []
for item in results.get("items",[]):
ids.append(item.get("id"))

# Step 3: If we have mulitple pages, traverse them.
next_link = None
pages = 1

for link in results.get("links",[]):
if link.get("rel") == "next":
next_link = link.get("href")
break

while pages < pagelimit and next_link is not None:

pages += 1

print('Calling REST endpoint:',next_link)
# Call the next link
results=callrestapi(next_link,reqtype)

# Pull the ids into the array
for item in results.get("items",[]):
ids.append(item.get("id"))

# Check for a next link again.
next_link = None
for link in results.get("links",[]):
if link.get("rel") == "next":
next_link = link.get("href")
break
print('Pages traversed:',pages)
print('Captured ids:',len(ids))
if len(ids) != count:
print('WARN: Captured IDs does not match total count:',count,'Increase page or limit settings to capture all results.')

# We now have an array "ids" of all the importJob ids we need to process.

# Step 4: Iterate through the Ids to display the import job details and the results of each task

# Create an array we can use for a table of results
data = [["Task Name","Resource Type","State","Message"]]

for id in ids:
# Get the details of the import job
reqtype = 'get'
reqval = '/transfer/importJobs/'+id
results=callrestapi(reqval,reqtype)
print()
print("------------------------------------")
print("Import Job ID: ",results.get("id"))
print("Import Job Name: ",results.get("name"))
print("Import Job State: ",results.get("state"))
# Get the details on the individual tasks
reqval = '/transfer/importJobs/'+id+'/tasks'
results=callrestapi(reqval,reqtype)
# Each tasks results are in the items array response here.
print()
for task in results.get("items",[]):
name = task.get("name")
rtype = task.get("resourceType")
state = task.get("state")
message = task.get("message")
data.append([name,rtype,state,message])

# Define a length for each column
column_length = [0,0,0,0]

# Update the length based on the maximum length for each column.
for row in data:
for i in range(4):
if len(row[i])+2 > column_length[i]:
column_length[i] = len(row[i])+2

# Print each row using our max column length values to make a readable table.
for row in data:
formatted_row="".join("%-*s" % (width,value) for value, width in zip(row, column_length))

print(formatted_row)
Loading

0 comments on commit b5eca27

Please sign in to comment.