diff --git a/Resources/Cloud/CloudDirector.py b/Resources/Cloud/CloudDirector.py
index 454fe62b..7c3762d9 100644
--- a/Resources/Cloud/CloudDirector.py
+++ b/Resources/Cloud/CloudDirector.py
@@ -37,7 +37,7 @@ def configureFromSection( self, mySection ):
"""
VMDirector.configureFromSection( self, mySection )
- def _submitInstance( self, imageName, workDir, endpoint, instanceID ):
+ def _submitInstance( self, imageName, endpoint, CPUTime, instanceID ):
"""
Real backend method to submit a new Instance of a given Image
It has the decision logic of sumbission to the multi-endpoint, from the available from a given imageName, first approach: FirstFit
@@ -66,11 +66,11 @@ def _submitInstance( self, imageName, workDir, endpoint, instanceID ):
if ( driver == 'occi-0.9' or driver == 'occi-0.8'):
- instanceType = gConfig.getValue( "%s/%s/%s" % ( endpointsPath, endpoint, 'instanceType' ), "" )
- imageDriver = gConfig.getValue( "%s/%s/%s" % ( endpointsPath, endpoint, 'imageDriver' ), "" )
-
oima = OcciImage( imageName, endpoint )
- result = oima.startNewInstance( instanceType, imageDriver )
+ connOcci = oima.connectOcci()
+ if not connOcci[ 'OK' ]:
+ return connOcci
+ result = oima.startNewInstance( CPUTime )
if not result[ 'OK' ]:
return result
idInstance = result['Value']
@@ -82,7 +82,6 @@ def _submitInstance( self, imageName, workDir, endpoint, instanceID ):
if not connNova[ 'OK' ]:
return connNova
result = nima.startNewInstance( instanceID )
-
if not result[ 'OK' ]:
return result
return S_OK( result['Value'] )
diff --git a/Resources/Cloud/VMDirector.py b/Resources/Cloud/VMDirector.py
index a892bc9c..33a763b4 100644
--- a/Resources/Cloud/VMDirector.py
+++ b/Resources/Cloud/VMDirector.py
@@ -88,7 +88,7 @@ def configureFromSection( self, mySection ):
self.runningPods[runningPodName]['Priority'] = int( runningPodDict['Priority'] )
self.runningPods[runningPodName]['CloudEndpoints'] = runningPodDict['CloudEndpoints']
- def submitInstance( self, imageName, workDir, endpoint, numVMsToSubmit, runningPodName ):
+ def submitInstance( self, imageName, endpoint, numVMsToSubmit, runningPodName ):
"""
"""
# warning: instanceID is the DIRAC instance id, while uniqueID is unique for a particular endpoint
@@ -111,7 +111,12 @@ def submitInstance( self, imageName, workDir, endpoint, numVMsToSubmit, runningP
return newInstance
instanceID = newInstance[ 'Value' ]
- dictVMSubmitted = self._submitInstance( imageName, workDir, endpoint, instanceID )
+ runningRequirementsDict = self.runningPods[runningPodName]['RequirementsDict']
+ cpuTime = runningRequirementsDict['CPUTime']
+ if not cpuTime:
+ return S_ERROR( 'Unknown CPUTime in Requirements of the RunningPod %s' % runningPodName )
+
+ dictVMSubmitted = self._submitInstance( imageName, endpoint, cpuTime, instanceID )
if not dictVMSubmitted[ 'OK' ]:
return dictVMSubmitted
diff --git a/Security/VmProperties.py b/Security/VmProperties.py
new file mode 100644
index 00000000..92c6a818
--- /dev/null
+++ b/Security/VmProperties.py
@@ -0,0 +1,10 @@
+# $HeadURL$
+__RCSID__ = "$Id$"
+#
+#
+VM_WEB_OPERATION = "VmWebOperation"
+#
+VM_RPC_OPERATION = "VmRpcOperation"
+
+#...............................................................................
+#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
diff --git a/Security/__init__.py b/Security/__init__.py
new file mode 100644
index 00000000..1c0c52a4
--- /dev/null
+++ b/Security/__init__.py
@@ -0,0 +1,17 @@
+# $HeadURL$
+__RCSID__ = "$Id$"
+
+import GSI
+
+requiredGSIVersion = "0.3.9"
+if GSI.version.__version__ < requiredGSIVersion:
+ raise Exception( "pyGSI is not the latest version (installed %s required %s)" % ( GSI.version.__version__, requiredGSIVersion ) )
+
+GSI.SSL.set_thread_safe()
+
+nid = GSI.crypto.create_oid( "1.2.42.42", "diracGroup", "DIRAC group" )
+GSI.crypto.add_x509_extension_alias( nid, 78 ) #Alias to netscape comment, text based extension
+nid = GSI.crypto.create_oid( "1.3.6.1.4.1.8005.100.100.5", "vomsExtensions", "VOMS extension" )
+GSI.crypto.add_x509_extension_alias( nid, 78 ) #Alias to netscape comment, text based extension
+
+import VMDIRAC.Security.VmProperties
diff --git a/Web/controllers/systems/virtualmachines.py b/Web/controllers/systems/virtualmachines.py
index 0fb8955b..2ab2b7a7 100644
--- a/Web/controllers/systems/virtualmachines.py
+++ b/Web/controllers/systems/virtualmachines.py
@@ -187,6 +187,19 @@ def getRunningInstancesBEPHistory( self ):
data.append( rL )
return S_OK( data )
+ @jsonify
+ def checkVmWebOperation( self ):
+ try:
+ operation = str( request.params[ 'operation' ] )
+ except Exception, e:
+ print e
+ return S_ERROR( "Oops! Couldn't understand the request" )
+ rpcClient = getRPCClient( "WorkloadManagement/VirtualMachineManager" )
+ result = rpcClient.checkVmWebOperation( operation )
+ if not result[ 'OK' ]:
+ return S_ERROR( result[ 'Message' ] )
+ data = result[ 'Value' ]
+ return S_OK( data )
@jsonify
def declareInstancesStopping( self ):
diff --git a/Web/public/javascripts/systems/virtualmachines/vmBrowser.js b/Web/public/javascripts/systems/virtualmachines/vmBrowser.js
index e3cfabbf..3ac5053f 100644
--- a/Web/public/javascripts/systems/virtualmachines/vmBrowser.js
+++ b/Web/public/javascripts/systems/virtualmachines/vmBrowser.js
@@ -1,7 +1,10 @@
var gMainGrid = false;
var gVMMenu = false;
+var auth_response = false;
function initVMBrowser(){
+ auth_response = '';
+ checkVmWebOperation();
Ext.onReady(function(){
renderPage();
});
@@ -40,6 +43,19 @@ function generateBrowseGrid( config )
vmCond : config.vmFilter
});
+ if( auth_response == "Auth" )
+ aux_tbar= [
+ { handler:function(){ toggleAll(true) }, text:'Select all', width:150, tooltip:'Click to select all rows' },
+ { handler:function(){ toggleAll(false) }, text:'Select none', width:150, tooltip:'Click to select all rows' },
+ '->',
+ { handler:function(){ cbStopSelected() }, text:'Stop', width:150, tooltip:'Click to stop all selected VMs' },
+ ];
+ else
+ aux_tbar= [
+ { handler:function(){ toggleAll(true) }, text:'Select all', width:150, tooltip:'Click to select all rows' },
+ { handler:function(){ toggleAll(false) }, text:'Select none', width:150, tooltip:'Click to select all rows' },
+ ];
+
gMainGrid = new Ext.grid.GridPanel( {
store : store,
/*view: new Ext.grid.GroupingView({
@@ -49,36 +65,31 @@ function generateBrowseGrid( config )
}),*/
columns: [
{ id : 'check', header : '', width : 30, dataIndex: 'inst_InstanceID', renderer : renderSelect },
- { header: "Image", width: 150, sortable: true, dataIndex: 'img_Name'},
- { header: "EndPoint", width: 100, sortable: true, dataIndex: 'img_CloudEndpoints'},
- { header: "Status", width: 100, sortable: true, dataIndex: 'inst_Status'},
- { header: "Endpoint VM ID", width: 220, sortable: true, dataIndex: 'inst_UniqueID'},
- { header: "IP", width: 100, sortable: true, dataIndex: 'inst_PublicIP'},
- { header: "Load", width: 50, sortable: true, dataIndex: 'inst_Load', renderer : renderLoad },
- { header: "Uptime", width: 75, sortable: true, dataIndex: 'inst_Uptime', renderer : renderUptime },
- { header: "Jobs", width: 50, sortable: true, dataIndex: 'inst_Jobs' },
- { header: "Last Update (UTC)", width: 125, sortable: true, dataIndex: 'inst_LastUpdate' },
- { header: "Error", width: 350, sortable: true, dataIndex: 'inst_ErrorMessage'},
- ],
- region : 'center',
- tbar : [
- { handler:function(){ toggleAll(true) }, text:'Select all', width:150, tooltip:'Click to select all rows' },
- { handler:function(){ toggleAll(false) }, text:'Select none', width:150, tooltip:'Click to select all rows' },
- '->',
- { handler:function(){ cbStopSelected() }, text:'Stop', width:150, tooltip:'Click to stop all selected VMs' },
- ],
- bbar: new Ext.PagingToolbar({
- pageSize: 50,
- store: store,
- displayInfo: true,
- displayMsg: 'Displaying entries {0} - {1} of {2}',
- emptyMsg: "No entries to display",
- items:[ '-',
- 'Items displaying per page: ', createNumItemsSelector(),
- '-',
- 'Show VMs in status: ', createStatusSelector() ],
- }),
- listeners : { sortchange : cbMainGridSortChange },
+ { header: "Image", width: 150, sortable: true, dataIndex: 'img_Name'},
+ { header: "EndPoint", width: 100, sortable: true, dataIndex: 'img_CloudEndpoints'},
+ { header: "Status", width: 100, sortable: true, dataIndex: 'inst_Status'},
+ { header: "Endpoint VM ID", width: 220, sortable: true, dataIndex: 'inst_UniqueID'},
+ { header: "IP", width: 100, sortable: true, dataIndex: 'inst_PublicIP'},
+ { header: "Load", width: 50, sortable: true, dataIndex: 'inst_Load', renderer : renderLoad },
+ { header: "Uptime", width: 75, sortable: true, dataIndex: 'inst_Uptime', renderer : renderUptime },
+ { header: "Jobs", width: 50, sortable: true, dataIndex: 'inst_Jobs' },
+ { header: "Last Update (UTC)", width: 125, sortable: true, dataIndex: 'inst_LastUpdate' },
+ { header: "Error", width: 350, sortable: true, dataIndex: 'inst_ErrorMessage'},
+ ],
+ region : 'center',
+ tbar : aux_tbar,
+ bbar: new Ext.PagingToolbar({
+ pageSize: 50,
+ store: store,
+ displayInfo: true,
+ displayMsg: 'Displaying entries {0} - {1} of {2}',
+ emptyMsg: "No entries to display",
+ items:[ '-',
+ 'Items displaying per page: ', createNumItemsSelector(),
+ '-',
+ 'Show VMs in status: ', createStatusSelector() ],
+ }),
+ listeners : { sortchange : cbMainGridSortChange },
} );
if( config.title )
gMainGrid.setTile( config.title );
@@ -252,9 +263,38 @@ function cbShowVMHistory( a,b,c )
}
/*
- * Stopping VMs (stopping mandative management when vmStopPolicy=never, optional if vmStopPolocy=elastic):
+ * Callback with Auth or Unauth string, for current RPC access to Web Operation
*/
+/*
+*/
+function checkVmWebOperation()
+{
+ Ext.Ajax.request({
+ url : "checkVmWebOperation",
+ cache: false,
+ async: false,
+ success : ajaxReturn,
+ failure : ajaxFailure,
+ params : { operation : 'Web' }
+ });
+}
+
+
+function ajaxReturn( ajaxResponse, reqArguments )
+{
+ var retVal = Ext.util.JSON.decode( ajaxResponse.responseText );
+ if( ! retVal.OK )
+ {
+ alert( "Failed to checkVmWebOperation: " + retVal.Message );
+ return
+ }
+ auth_response = retVal.Value
+}
+
+/*
+ * Stopping VMs (stopping mandative management when vmStopPolicy=never, optional if vmStopPolocy=elastic):
+ */
function cbStopSelected()
{
diff --git a/WorkloadManagementSystem/Agent/VirtualMachineConfigUpdater.py b/WorkloadManagementSystem/Agent/VirtualMachineConfigUpdater.py
index 9df2413a..ec72d369 100644
--- a/WorkloadManagementSystem/Agent/VirtualMachineConfigUpdater.py
+++ b/WorkloadManagementSystem/Agent/VirtualMachineConfigUpdater.py
@@ -33,7 +33,6 @@ def __init__( self, *args, **kwargs ):
self.stopAgentPath = ''
self.cfgToUpdate = ''
-
def initialize( self ):
""" initialize
@@ -144,4 +143,4 @@ def touchStopAgents( self ):
return S_OK()
#...............................................................................
-#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
\ No newline at end of file
+#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
diff --git a/WorkloadManagementSystem/Agent/VirtualMachineScheduler.py b/WorkloadManagementSystem/Agent/VirtualMachineScheduler.py
index 1f3361e0..7cd5e594 100644
--- a/WorkloadManagementSystem/Agent/VirtualMachineScheduler.py
+++ b/WorkloadManagementSystem/Agent/VirtualMachineScheduler.py
@@ -270,7 +270,7 @@ def execute( self ):
numVMs = jobsToSubmitDict['NumVMsToSubmit']
ret = pool.generateJobAndQueueIt( director.submitInstance,
- args = ( imageName, self.workDir, endpoint, numVMs, runningPodName ),
+ args = ( imageName, endpoint, numVMs, runningPodName ),
oCallback = self.callBack,
oExceptionCallback = director.exceptionCallBack,
blocking = False )
diff --git a/WorkloadManagementSystem/Client/Nova11.py b/WorkloadManagementSystem/Client/Nova11.py
index ae7493cc..73d423ba 100644
--- a/WorkloadManagementSystem/Client/Nova11.py
+++ b/WorkloadManagementSystem/Client/Nova11.py
@@ -473,8 +473,8 @@ def contextualise( self, imageConfig, endpointConfig, **kwargs ):
vmContextualizeScriptPath = contextConfig[ 'vmContextualizeScriptPath' ]
vmRunJobAgentURL = contextConfig[ 'vmRunJobAgentURL' ]
vmRunVmMonitorAgentURL = contextConfig[ 'vmRunVmMonitorAgentURL' ]
- vmRunLogJobAgentURL = contextConfig[ 'vmRunLogJobAgentURL' ]
- vmRunLogVmMonitorAgentURL = contextConfig[ 'vmRunLogVmMonitorAgentURL' ]
+ vmRunVmUpdaterAgentURL = contextConfig[ 'vmRunVmUpdaterAgentURL' ]
+ vmRunLogAgentURL = contextConfig[ 'vmRunLogAgentURL' ]
vmCvmfsContextURL = contextConfig[ 'vmCvmfsContextURL' ]
vmDiracContextURL = contextConfig[ 'vmDiracContextURL' ]
cpuTime = contextConfig[ 'cpuTime' ]
@@ -493,8 +493,8 @@ def contextualise( self, imageConfig, endpointConfig, **kwargs ):
vmContextualizeScriptPath = vmContextualizeScriptPath,
vmRunJobAgentURL = vmRunJobAgentURL,
vmRunVmMonitorAgentURL = vmRunVmMonitorAgentURL,
- vmRunLogJobAgentURL = vmRunLogJobAgentURL,
- vmRunLogVmMonitorAgentURL = vmRunLogVmMonitorAgentURL,
+ vmRunVmUpdaterAgentURL = vmRunVmUpdaterAgentURL,
+ vmRunLogAgentURL = vmRunLogAgentURL,
vmCvmfsContextURL = vmCvmfsContextURL,
vmDiracContextURL = vmDiracContextURL,
siteName = siteName,
@@ -524,8 +524,8 @@ def __sshContextualise( self,
vmContextualizeScriptPath,
vmRunJobAgentURL,
vmRunVmMonitorAgentURL,
- vmRunLogJobAgentURL,
- vmRunLogVmMonitorAgentURL,
+ vmRunVmUpdaterAgentURL,
+ vmRunLogAgentURL,
vmCvmfsContextURL,
vmDiracContextURL,
siteName,
@@ -578,7 +578,7 @@ def __sshContextualise( self,
try:
remotecmd = "/bin/bash /root/contextualize-script.bash \'%s\' \'%s\' \'%s\' \'%s\' \'%s\' \'%s\' \'%s\' \'%s\' \'%s\' \'%s\' \'%s\' \'%s\' \'%s\' \'%s\'"
remotecmd = remotecmd % ( uniqueId, putCertPath, putKeyPath, vmRunJobAgentURL,
- vmRunVmMonitorAgentURL, vmRunLogJobAgentURL, vmRunLogVmMonitorAgentURL,
+ vmRunVmMonitorAgentURL, vmRunVmUpdaterAgentURL, vmRunLogAgentURL,
vmCvmfsContextURL, vmDiracContextURL, cvmfs_http_proxy, siteName, cloudDriver, cpuTime, vmStopPolicy )
print "remotecmd"
print remotecmd
diff --git a/WorkloadManagementSystem/Client/Occi09.py b/WorkloadManagementSystem/Client/Occi09.py
index 66c998f8..1d830f84 100644
--- a/WorkloadManagementSystem/Client/Occi09.py
+++ b/WorkloadManagementSystem/Client/Occi09.py
@@ -11,6 +11,9 @@
from subprocess import Popen, PIPE, STDOUT
+# DIRAC
+from DIRAC import gLogger, S_OK, S_ERROR
+
__RCSID__ = '$Id: $'
# Classes
@@ -70,11 +73,30 @@ def exec_no_wait(self, cmd, timelife = 10):
class OcciClient:
- def __init__(self, URI = None, User = None, Passwd = None):
- self.id = None
- self.URI = URI
- self.user = User
- self.passwd = Passwd
+ def __init__( self, user, secret, endpointConfig, imageConfig):
+ """
+ Constructor: uses user / secret authentication for the time being.
+ copy the endpointConfig and ImageConfig dictionaries to the OcciClient
+
+ :Parameters:
+ **user** - `string`
+ username that will be used on the authentication
+ **secret** - `string`
+ password used on the authentication
+ **endpointConfig** - `dict`
+ dictionary with the endpoint configuration ( WMS.Utilities.Configuration.OcciConfiguration )
+ **imageConfig** - `dict`
+ dictionary with the image configuration ( WMS.Utilities.Configuration.ImageConfiguration )
+
+ """
+
+ # logger
+ self.log = gLogger.getSubLogger( self.__class__.__name__ )
+
+ self.endpointConfig = endpointConfig
+ self.imageConfig = imageConfig
+ self.__user = user
+ self.__password = secret
def check_connection(self, timelife = 5):
"""
@@ -83,7 +105,7 @@ def check_connection(self, timelife = 5):
"""
request = Request()
- command = 'occi-storage' + ' -U ' + self.user + ' -P ' + self.passwd + ' -R ' + self.URI + ' list'
+ command = 'occi-storage' + ' -U ' + self.__user + ' -P ' + self.__password + ' -R ' + self.endpointConfig['occiURI'] + ' list'
request.exec_and_wait(command, timelife)
return request
@@ -94,7 +116,7 @@ def get_image_id(self, imageName):
"""
request = Request()
- command = 'occi-storage' + ' -U ' + self.user + ' -P ' + self.passwd + ' -R ' + self.URI + ' list '
+ command = 'occi-storage' + ' -U ' + self.__user + ' -P ' + self.__password + ' -R ' + self.endpointConfig['occiURI'] + ' list '
request.exec_no_wait(command)
first = request.stdout.find("name='"+imageName+"'")
if first < 0:
@@ -114,7 +136,7 @@ def get_image_ids_of_instance(self, instanceId):
"""
request = Request()
- command = 'occi-compute' + ' -U ' + self.user + ' -P ' + self.passwd + ' -R ' + self.URI + ' show ' + instanceId
+ command = 'occi-compute' + ' -U ' + self.__user + ' -P ' + self.__password + ' -R ' + self.endpointConfig['occiURI'] + ' show ' + instanceId
request.exec_no_wait(command)
first = request.stdout.find("/storage/")
if first < 0:
@@ -136,24 +158,69 @@ def get_image_ids_of_instance(self, instanceId):
return request
- def create_VMInstance( self, bootImageName, hdcImageName, instanceType, imageDriver,
- bootOII, hdcOII, iface, occiDNS1, occiDNS2, Domain, CVMFS_HTTP_PROXY,
- occiURLcontextfiles, occiNetId, siteName, cloudDriver, cpuTime, vmStopPolicy):
+ def create_VMInstance(self, cpuTime):
"""
- This creates a VM instance for the given boot image and hdc image, and
- also de OCCI context on-the-fly image, taken the given parameters.
- Before the VM instance creation is doing a XML composition
+ This creates a VM instance for the given boot image
+ if context method is adhoc then boot image is create to be in Submitted status
+ if context method is ssh then boot image is created to be in Wait_ssh_context (for contextualization agent)
+ if context method is occi_opennebula context is in hdc image, and also de OCCI context on-the-fly image, taken the given parameters
Successful creation returns instance id and the IP
"""
+ #Comming from running pod specific:
+ strCpuTime = str(cpuTime)
+
+ #DIRAC image context:
+ bootImageName = self.imageConfig[ 'bootImageName' ]
+ flavorName = self.imageConfig[ 'flavorName' ]
+ contextMethod = self.imageConfig[ 'contextMethod' ]
+ hdcImageName = self.imageConfig[ 'contextConfig' ].get( 'hdcImageName' , None )
+ context_files_url = self.imageConfig[ 'contextConfig' ].get( 'context_files_url' , None )
+
+ # endpoint context:
+ siteName = self.endpointConfig[ 'siteName' ]
+ cloudDriver = self.endpointConfig[ 'cloudDriver' ]
+ occiURI = self.endpointConfig[ 'occiURI' ]
+ imageDriver = self.endpointConfig[ 'imageDriver' ]
+ vmStopPolicy = self.endpointConfig[ 'vmStopPolicy' ]
+ netId = self.endpointConfig[ 'netId' ]
+ cvmfs_http_proxy = self.endpointConfig[ 'cvmfs_http_proxy' ]
+ iface = self.endpointConfig[ 'iface' ]
+ if iface == 'static':
+ dns1 = self.endpointConfig[ 'dns1' ]
+ dns2 = self.endpointConfig[ 'dns2' ]
+ domain = self.endpointConfig[ 'domain' ]
+
+ #Get the boot Occi Image Id (OII) from URI server
+ request = self.get_image_id( bootImageName )
+ if request.returncode != 0:
+ self.__errorStatus = "Can't get the boot image id for %s from server %s\n%s" % (bootImageName, occiURI, request.stdout)
+ self.log.error( self.__errorStatus )
+ return
+ bootOII = request.stdout
+
+ if contextMethod == 'occi_opennebula':
+ #Get the hdc Occi Image Id (OII) from URI server
+ request = self.get_image_id( hdcImageName )
+ if request.returncode != 0:
+ self.__errorStatus = "Can't get the contextual image id for %s from server %s\n%s" % (self.__hdcImageName, self.__occiURI, request.stdout)
+ self.log.error( self.__errorStatus )
+ return
+ hdcOII = request.stdout
+
+ if contextMethod == 'occi_opennebula':
+ vm_name = bootImageName + '_' + hdcImageName + '_' + str( time.time() )[0:10]
+ else:
+ vm_name = bootImageName + '_' + contextMethod + '_' + str( time.time() )[0:10]
+
tempXMLname = '/tmp/computeOCCI.%s.xml' % os.getpid()
tempXML = open(tempXMLname, 'w')
tempXML.write('\n')
- tempXML.write(' ' + bootImageName + '+' + hdcImageName + '+' + str(time.time())[0:10] + '\n')
- tempXML.write(' ' + instanceType + '\n')
+ tempXML.write(' ' + vm_name + '\n')
+ tempXML.write(' ' + flavorName + '\n')
tempXML.write(' \n')
- tempXML.write(' \n')
+ tempXML.write(' \n')
tempXML.write(' OS\n')
tempXML.write(' hda\n')
if not imageDriver == 'default':
@@ -164,37 +231,43 @@ def create_VMInstance( self, bootImageName, hdcImageName, instanceType, imageDri
else:
tempXML.write(' ' + imageDriver + '\n')
tempXML.write(' \n')
- if not hdcImageName == 'NO_CONTEXT':
+
+ if contextMethod == 'occi_opennebula':
tempXML.write(' \n')
- tempXML.write(' \n')
+ tempXML.write(' \n')
tempXML.write(' CDROM\n')
if not imageDriver == 'default':
if imageDriver == 'qcow2-one-3.2.1':
tempXML.write(' qcow2\n')
- # elif imageDriver == 'qcow2-one-3.2.0':
- # tempXML.write(' \n')
+ elif imageDriver == 'qcow2-one-3.2.0':
+ tempXML.write(' \n')
else:
tempXML.write(' ' + imageDriver + '\n')
tempXML.write(' \n')
+
tempXML.write(' \n')
- tempXML.write(' \n')
+ tempXML.write(' \n')
tempXML.write(' \n')
- tempXML.write(' \n')
- tempXML.write(' $VMID\n')
- tempXML.write(' $NIC[IP]\n')
- tempXML.write(' $NIC[MAC]\n')
- tempXML.write(' ' + iface + '\n')
- if iface == 'static':
- tempXML.write(' ' + Domain + '\n')
- tempXML.write(' ' + occiDNS1 + '\n')
- tempXML.write(' ' + occiDNS2 + '\n')
- tempXML.write(' ' + CVMFS_HTTP_PROXY + '\n')
- tempXML.write(' ' + siteName + '\n')
- tempXML.write(' ' + cloudDriver + '\n')
- tempXML.write(' ' + cpuTime + '\n')
- tempXML.write(' ' + vmStopPolicy + '\n')
- tempXML.write(' ' + occiURLcontextfiles + '\n')
- tempXML.write(' \n')
+ if contextMethod == 'occi_opennebula':
+ tempXML.write(' \n')
+ tempXML.write(' $VMID\n')
+ tempXML.write(' $NIC[IP]\n')
+ tempXML.write(' $NIC[MAC]\n')
+ tempXML.write(' ' + iface + '\n')
+
+ if iface == 'static':
+ tempXML.write(' ' + domain + '\n')
+ tempXML.write(' ' + dns1 + '\n')
+ tempXML.write(' ' + dns2 + '\n')
+
+ tempXML.write(' ' + cvmfs_http_proxy + '\n')
+ tempXML.write(' ' + siteName + '\n')
+ tempXML.write(' ' + cloudDriver + '\n')
+ tempXML.write(' ' + strCpuTime + '\n')
+ tempXML.write(' ' + vmStopPolicy + '\n')
+ tempXML.write(' ' + context_files_url + '\n')
+ tempXML.write(' \n')
+
tempXML.write('\n')
tempXML.close()
@@ -207,7 +280,7 @@ def create_VMInstance( self, bootImageName, hdcImageName, instanceType, imageDri
request = Request()
- command = 'occi-compute' + ' -U ' + self.user + ' -P ' + self.passwd + ' -R ' + self.URI + ' create ' + tempXMLname
+ command = 'occi-compute' + ' -U ' + self.__user + ' -P ' + self.__password + ' -R ' + occiURI + ' create ' + tempXMLname
request.exec_no_wait(command)
#os.remove(tempXMLname)
first = request.stdout.find("")
@@ -226,7 +299,7 @@ def terminate_VMinstance( self, instanceId ):
Terminate a VM instance corresponding to the instanceId parameter
"""
request = Request()
- command = 'occi-compute' + ' -U ' + self.user + ' -P ' + self.passwd + ' -R ' + self.URI + ' delete ' + instanceId
+ command = 'occi-compute' + ' -U ' + self.__user + ' -P ' + self.__password + ' -R ' + self.endpointConfig['occiURI'] + ' delete ' + instanceId
request.exec_no_wait(command)
if request.stdout == "nil":
request.returncode = 0
@@ -242,7 +315,7 @@ def get_all_VMinstances( self, bootImageName ):
request = Request()
pattern = "name=\\'" + bootImageName + "+"
- command = 'occi-compute' + ' -U ' + self.user + ' -P ' + self.passwd + ' -R ' + self.URI + ' list '
+ command = 'occi-compute' + ' -U ' + self.__user + ' -P ' + self.__password + ' -R ' + self.endpointConfig['occiURI'] + ' list '
request.exec_no_wait(command)
auxstart = request.stdout.find(pattern)
@@ -269,7 +342,7 @@ def get_running_VMinstances( self, bootImageName ):
request = Request()
auxreq = Request()
pattern = "name=\\'" + bootImageName + "+"
- command = 'occi-compute' + ' -U ' + self.user + ' -P ' + self.passwd + ' -R ' + self.URI + ' list '
+ command = 'occi-compute' + ' -U ' + self.__user + ' -P ' + self.__password + ' -R ' + self.endpointConfig['occiURI'] + ' list '
request.exec_no_wait(command)
auxstart = request.stdout.find(pattern)
@@ -296,7 +369,7 @@ def get_status_VMinstance( self, VMinstanceId ):
Get the status VM instance for a given VMinstanceId
"""
request = Request()
- command = 'occi-compute' + ' -U ' + self.user + ' -P ' + self.passwd + ' -R ' + self.URI + ' show ' + VMinstanceId
+ command = 'occi-compute' + ' -U ' + self.__user + ' -P ' + self.__password + ' -R ' + self.endpointConfig['occiURI'] + ' show ' + VMinstanceId
request.exec_no_wait(command)
first = request.stdout.find("")
if first < 0:
diff --git a/WorkloadManagementSystem/Client/OcciImage.py b/WorkloadManagementSystem/Client/OcciImage.py
index d8ff02c5..cef8c65f 100644
--- a/WorkloadManagementSystem/Client/OcciImage.py
+++ b/WorkloadManagementSystem/Client/OcciImage.py
@@ -4,194 +4,104 @@
# Author : Victor Mendez ( vmendez.tic@gmail.com )
########################################################################
+# DIRAC
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
-#VMInstance operations is from VirtualMachineDB.Instances, instead of endpoint interfaced
-# no more OcciVMInstnaces
-#from VMDIRAC.WorkloadManagementSystem.Client.OcciVMInstance import OcciVMInstance
-#occiClient dynamically below, depending on the driver
-#from VMDIRAC.WorkloadManagementSystem.Client.OcciClient import OcciClient
+# VMDIRAC
+from VMDIRAC.WorkloadManagementSystem.Utilities.Configuration import OcciConfiguration, ImageConfiguration
+
__RCSID__ = '$Id: $'
class OcciImage:
- def __init__( self, DIRACImageName, endpoint):
+ def __init__( self, imageName, endPoint):
"""
- The OCCI Image Interface provides the functionality required to use
- a standard occi cloud infrastructure.
- Authentication is provided by an occi user/password attributes
+ The OCCI Image Interface provides the functionality required to use a standard occi cloud infrastructure.
+ Constructor: uses OcciConfiguration to parse the endPoint CS configuration
+ and ImageConfiguration to parse the imageName CS configuration.
+
+ :Parameters:
+ **imageName** - `string`
+ imageName as defined on CS:/Resources/VirtualMachines/Images
+
+ **endPoint** - `string`
+ endPoint as defined on CS:/Resources/VirtualMachines/CloudEndpoint
+
"""
- self.__DIRACImageName = DIRACImageName
- self.__bootImageName = self.__getCSImageOption( "bootImageName" )
- self.__hdcImageName = self.__getCSImageOption( "hdcImageName" )
- self.log = gLogger.getSubLogger( "Image %s(%s,%s): " % ( DIRACImageName, self.__bootImageName, self.__hdcImageName ) )
-# __instances list not used now
- self.__instances = []
- self.__errorStatus = ""
- #Get the image cpuTime to put on the VM /LocalSite/CPUTime
- self.__cpuTime = self.__getCSImageOption( "cpuTime" )
- if not self.__cpuTime:
- self.__cpuTime = 1800
- #Get CloudEndpoint on submission time
- self.__endpoint = endpoint
- if not self.__endpoint:
- self.__errorStatus = "Can't find endpoint for image %s" % self.__DIRACImageName
- self.log.error( self.__errorStatus )
- return
- #Get OCCI URI endpoint
- self.__occiURI = self.__getCSCloudEndpointOption( "occiURI" )
- if not self.__occiURI:
- self.__errorStatus = "Can't find the server occiURI for endpoint %s" % self.__endpoint
- self.log.error( self.__errorStatus )
- return
- #Get OCCI user/password
- # user
- self.__occiUser = self.__getCSCloudEndpointOption( "occiUser" )
- if not self.__occiUser:
- self.__errorStatus = "Can't find the occiUser for endpoint %s" % self.__endpoint
- self.log.error( self.__errorStatus )
- return
- # password
- self.__occiPasswd = self.__getCSCloudEndpointOption( "occiPasswd" )
- if not self.__occiPasswd:
- self.__errorStatus = "Can't find the occiPasswd for endpoint %s" % self.__endpoint
- self.log.error( self.__errorStatus )
- return
- # scheduling policy of the endpoint elastic/static
- self.__vmPolicy = self.__getCSCloudEndpointOption( "vmPolicy" )
- if not ( self.__vmPolicy == 'elastic' or self.__vmPolicy == 'static' ):
- self.__errorStatus = "Can't find valid vmPolicy (elastic/static) for endpoint %s" % self.__endpoint
- self.log.error( self.__errorStatus )
- return
- # stoppage policy of the endpoint elastic/never
- self.__vmStopPolicy = self.__getCSCloudEndpointOption( "vmStopPolicy" )
- if not ( self.__vmStopPolicy == 'elastic' or self.__vmStopPolicy == 'never' ):
- self.__errorStatus = "Can't find valid vmStopPolicy (elastic/never) for endpoint %s" % self.__endpoint
- self.log.error( self.__errorStatus )
- return
- # get the driver
- self.__cloudDriver = self.__getCSCloudEndpointOption( "cloudDriver" )
- if not self.__cloudDriver:
- self.__errorStatus = "Can't find the driver for endpoint %s" % self.__endpoint
- self.log.error( self.__errorStatus )
- return
- # check connection, depending on the driver
- if self.__cloudDriver == "occi-0.8":
- from VMDIRAC.WorkloadManagementSystem.Client.Occi08 import OcciClient
- self.__cliocci = OcciClient(self.__occiURI, self.__occiUser, self.__occiPasswd)
- elif self.__cloudDriver == "occi-0.9":
- from VMDIRAC.WorkloadManagementSystem.Client.Occi09 import OcciClient
- self.__cliocci = OcciClient(self.__occiURI, self.__occiUser, self.__occiPasswd)
- else:
- self.__errorStatus = "Driver %s not supported" % self.__cloudDriver
- self.log.error( self.__errorStatus )
- return
- request = self.__cliocci.check_connection()
- if request.returncode != 0:
- self.__errorStatus = "Can't connect to OCCI server %s\n%s" % (self.__occiURI, request.stdout)
- self.log.error( self.__errorStatus )
- return
+ # logger
+ self.log = gLogger.getSubLogger( 'OcciImage %s: ' % imageName )
- if not self.__errorStatus:
- self.log.info( "Available OCCI server %s" % self.__occiURI )
+ self.imageName = imageName
+ self.endPoint = endPoint
- #Get the boot Occi Image Id (OII) from URI server
- request = self.__cliocci.get_image_id( self.__bootImageName )
- if request.returncode != 0:
- self.__errorStatus = "Can't get the boot image id for %s from server %s\n%s" % (self.__bootImageName, self.__occiURI, request.stdout)
- self.log.error( self.__errorStatus )
- return
- self.__bootOII = request.stdout
-
- #Get the hdc Occi Image Id (OII) from URI server
- if not self.__hdcImageName == 'NO_CONTEXT':
- request = self.__cliocci.get_image_id( self.__hdcImageName )
- if request.returncode != 0:
- self.__errorStatus = "Can't get the contextual image id for %s from server %s\n%s" % (self.__hdcImageName, self.__occiURI, request.stdout)
- self.log.error( self.__errorStatus )
- return
- self.__hdcOII = request.stdout
- else:
- self.__hdcOII = 'NO_CONTEXT'
+ # their config() method returns a dictionary with the parsed configuration
+ # they also provide a validate() method to make sure it is correct
+ self.__imageConfig = ImageConfiguration( imageName )
+ self.__occiConfig = OcciConfiguration( endPoint )
- # iface static or auto (DHCP image)
- self.__iface = self.__getCSCloudEndpointOption( "iface" )
- if not self.__iface:
- self.__errorStatus = "Can't find the iface (static/auto) for endpoint %s" % self.__endpoint
- self.log.error( self.__errorStatus )
- return
+ # this object will connect to the server. Better keep it private.
+ self.__cliocci = None
+ # error status of the image with the asociated context (endpoing + image), basically for check_connction propagation
+ self.__errorStatus = None
- if self.__iface == 'static':
-
- # dns1
- self.__DNS1 = self.__getCSCloudEndpointOption( "DNS1" )
- if not self.__DNS1:
- self.__errorStatus = "Can't find the DNS1 for endpoint %s" % self.__endpoint
- self.log.error( self.__errorStatus )
- return
-
- # dns2
- self.__DNS2 = self.__getCSCloudEndpointOption( "DNS2" )
- if not self.__DNS2:
- self.__errorStatus = "Can't find the DNS2 for endpoint %s" % self.__endpoint
- self.log.error( self.__errorStatus )
- return
-
- # domain
- self.__Domain = self.__getCSCloudEndpointOption( "Domain" )
- if not self.__Domain:
- self.__errorStatus = "Can't find the Domain for endpoint %s" % self.__endpoint
- self.log.error( self.__errorStatus )
- return
-
- # cvmfs http proxy:
- self.__CVMFS_HTTP_PROXY = self.__getCSCloudEndpointOption( "CVMFS_HTTP_PROXY" )
- if not self.__CVMFS_HTTP_PROXY:
- self.__errorStatus = "Can't find the CVMFS_HTTP_PROXY for endpoint %s" % self.__endpoint
- self.log.error( self.__errorStatus )
- return
+ def connectOcci( self ):
+ """
+ Method that issues the connection with the OpenNebula server. In order to do
+ it, validates the CS configurations. For the time being, we authenticate
+ with user / password. It gets it and passes all information to the OcciClient
+ which will check the connection.
- # URL context files:
- self.__URLcontextfiles = self.__getCSImageOption( "URLcontextfiles" )
- if not self.__URLcontextfiles:
- self.__URLcontextfiles = "http://lhcweb.pic.es/vmendez/context/root.pub"
+ :return: S_OK | S_ERROR
+ """
- # Network id
- self.__NetId = self.__getCSCloudEndpointOption( "NetId" )
- if not self.__NetId:
- self.__errorStatus = "Can't find the NetId for endpoint %s" % self.__endpoint
+ # Before doing anything, make sure the configurations make sense
+ # ImageConfiguration
+ validImage = self.__imageConfig.validate()
+ if not validImage[ 'OK' ]:
+ return validImage
+ # EndpointConfiguration
+ validOcci = self.__occiConfig.validate()
+ if not validOcci[ 'OK' ]:
+ return validOcci
+
+ # Get authentication configuration
+ user, secret = self.__occiConfig.authConfig()
+
+ # Create the occiclient objects in OcciClient:
+ if self.__occiConfig.cloudDriver() == "occi-0.8":
+ from VMDIRAC.WorkloadManagementSystem.Client.Occi08 import OcciClient
+ self.__cliocci = OcciClient(user, secret, self.__occiConfig.config(), self.__imageConfig.config())
+ elif self.__occiConfig.cloudDriver() == "occi-0.9":
+ from VMDIRAC.WorkloadManagementSystem.Client.Occi09 import OcciClient
+ self.__cliocci = OcciClient(user, secret, self.__occiConfig.config(), self.__imageConfig.config())
+ else:
+ self.__errorStatus = "Driver %s not supported" % self.__cloudDriver
self.log.error( self.__errorStatus )
return
- # Site name (temporaly at Endpoint, but this sould be get it from Resources LHCbDIRAC like scheme)
- self.__siteName = self.__getCSCloudEndpointOption( "siteName" )
- if not self.__siteName:
- self.__errorStatus = "Can't find the siteName for endpoint %s" % self.__endpoint
+ # Check connection to the server
+ request = self.__cliocci.check_connection()
+ if request.returncode != 0:
+ self.__errorStatus = "Can't connect to OCCI URI %s\n%s" % (self.__occiConfig.occiURI(), request.stdout)
self.log.error( self.__errorStatus )
- return
-
- def __getCSImageOption( self, option, defValue = "" ):
- """
- Following we can see that every CSImageOption are related with the booting
- image, the contextualized hdc image has no asociated options
- """
- return gConfig.getValue( "/Resources/VirtualMachines/Images/%s/%s" % ( self.__DIRACImageName, option ), defValue )
+ return S_ERROR( self.__errorStatus )
+ else:
+ self.log.info( "Successful connection check to %s" % (self.__occiConfig.occiURI()) )
- def __getCSCloudEndpointOption( self, option, defValue = "" ):
- return gConfig.getValue( "/Resources/VirtualMachines/CloudEndpoints/%s/%s" % ( self.__endpoint, option ), defValue )
+ return S_OK( request.stdout )
- def startNewInstance( self, instanceType = "small", imageDriver = "default" ):
+ def startNewInstance( self, cpuTime ):
"""
- Prior to use, virtual machine images are uploaded to the OCCI cloud manager
- assigned an id (OII in a URI).
+ Prior to use, virtual machine images are uploaded to the OCCI cloud manager assigned an id (OII in a URI).
"""
if self.__errorStatus:
return S_ERROR( self.__errorStatus )
- self.log.info( "Starting new instance for image (boot,hdc): %s,%s; to endpoint %s, and driver %s" % ( self.__bootImageName, self.__hdcImageName, self.__endpoint, self.__cloudDriver ) )
- request = self.__cliocci.create_VMInstance( self.__bootImageName, self.__hdcImageName, instanceType, imageDriver, self.__bootOII, self.__hdcOII, self.__iface, self.__DNS1, self.__DNS2, self.__Domain, self.__CVMFS_HTTP_PROXY, self.__URLcontextfiles, self.__NetId, self.__siteName, self.__cloudDriver, self.__cpuTime, self.__vmStopPolicy)
+
+ self.log.info( "Starting new instance for DIRAC image (boot,hdc): %s; to endpoint %s" % ( self.imageName, self.endPoint) )
+ request = self.__cliocci.create_VMInstance(cpuTime)
if request.returncode != 0:
- self.__errorStatus = "Can't create instance for boot image (boot,hdc): %s/%s at server %s\n%s" % (self.__bootImageName, self.__hdcImageName, self.__occiURI, request.stdout)
+ self.__errorStatus = "Can't create instance for DIRAC image (boot,hdc): %s; to endpoint %s" % ( self.imageName, self.endPoint)
self.log.error( self.__errorStatus )
return S_ERROR( self.__errorStatus )
@@ -204,42 +114,9 @@ def stopInstance( self, VMinstanceId ):
request = self.__cliocci.terminate_VMinstance( VMinstanceId )
if request.returncode != 0:
- self.__errorStatus = "Can't delete VM instance ide %s from server %s\n%s" % (VMinstanceId, self.__occiURI, request.stdout)
+ self.__errorStatus = "Can't delete VM instance ide %s from endpoint URL %s\n%s" % (VMinstanceId, self.__occiConfig.occiURI(), request.stdout)
self.log.error( self.__errorStatus )
return S_ERROR( self.__errorStatus )
return S_OK( request.stdout )
-
-#VMInstance operations is from VirtualMachineDB.Instances, instead of endpoint interfaced
-# no more OcciVMInstances
-# def getAllInstances( self ):
-# """
-# Get all instances for this image
-# """
-# instances = []
-# request = self.__cliocci.get_all_VMinstances( self.__bootImageName )
-# if request.returncode != 0:
-# self.__errorStatus = "Error while get all instances of %s from server %s\n%s" % (self.__bootImage, self.__occiURI, request.stdout)
-# self.log.error( self.__errorStatus )
-# return S_ERROR( self.__errorStatus )
-#
-# for instanceId in request.rlist:
-# instances.append( OcciVMInstance ( instanceId, self.__occiURI, self.__occiUser, self.__occiPasswd ) )
-# return instances
-
-# def getAllRunningInstances( self ):
-# """
-# Get all running instances for this image
-# """
-# instances = []
-# request = self.__cliocci.get_running_VMinstances( self.__bootImageName )
-# if request.returncode != 0:
-# self.__errorStatus = "Error while get the running instances of %s from server %s\n%s" % (self.__bootImage, self.__occiURI, request.stdout)
-# self.log.error( self.__errorStatus )
-# return S_ERROR( self.__errorStatus )
-#
-# for instanceId in request.rlist:
-# instances.append( OcciVMInstance ( instanceId, self.__occiURI, self.__occiUser, self.__occiPasswd ) )
-# return instances
-#
diff --git a/WorkloadManagementSystem/Service/VirtualMachineManagerHandler.py b/WorkloadManagementSystem/Service/VirtualMachineManagerHandler.py
index b875f46e..69e4b7d6 100644
--- a/WorkloadManagementSystem/Service/VirtualMachineManagerHandler.py
+++ b/WorkloadManagementSystem/Service/VirtualMachineManagerHandler.py
@@ -24,6 +24,7 @@
from VMDIRAC.WorkloadManagementSystem.Client.NovaImage import NovaImage
from VMDIRAC.WorkloadManagementSystem.Client.OcciImage import OcciImage
from VMDIRAC.WorkloadManagementSystem.DB.VirtualMachineDB import VirtualMachineDB
+from VMDIRAC.Security import VmProperties
__RCSID__ = '$Id: $'
@@ -47,16 +48,15 @@ class VirtualMachineManagerHandler( RequestHandler ):
def initialize( self ):
-# FIXME: is all this actually used ????
-# credDict = self.getRemoteCredentials()
-# self.ownerDN = credDict[ 'DN' ]
-# self.ownerGroup = credDict[ 'group' ]
-# self.userProperties = credDict[ 'properties' ]
-# self.owner = credDict[ 'username' ]
-# self.peerUsesLimitedProxy = credDict[ 'isLimitedProxy' ]
+ credDict = self.getRemoteCredentials()
+ self.rpcProperties = credDict[ 'properties' ]
+
+# self.ownerDN = credDict[ 'DN' ]
+# self.ownerGroup = credDict[ 'group' ]
+# self.owner = credDict[ 'username' ]
+# self.peerUsesLimitedProxy = credDict[ 'isLimitedProxy' ]
#
-# self.diracSetup = self.serviceInfoDict[ 'clientSetup' ]
- pass
+# self.diracSetup = self.serviceInfoDict[ 'clientSetup' ]
@staticmethod
def __logResult( methodName, result ):
@@ -66,6 +66,14 @@ def __logResult( methodName, result ):
if not result[ 'OK' ]:
gLogger.error( '%s%s' % ( methodName, result[ 'Message' ] ) )
+ types_checkVmWebOperation = [ StringType ]
+ def export_checkVmWebOperation( self, operation ):
+ """
+ return true if rpc has VM_WEB_OPERATION
+ """
+ if VmProperties.VM_WEB_OPERATION in self.rpcProperties:
+ return S_OK( 'Auth' )
+ return S_OK( 'Unauth' )
types_insertInstance = [ StringType, ( StringType, UnicodeType ), ]
def export_insertInstance( self, imageName, instanceName, endpoint, runningPodName ):
@@ -109,6 +117,9 @@ def export_declareInstanceRunning( self, uniqueID, privateIP ):
- instanceName does not have a "Submitted" entry
- uniqueID is not unique
"""
+ if not VmProperties.VM_RPC_OPERATION in self.rpcProperties:
+ return S_ERROR( "Unauthorized declareInstanceRunning RPC" )
+
publicIP = self.getRemoteAddress()[ 0 ]
res = gVirtualMachineDB.declareInstanceRunning( uniqueID, publicIP, privateIP )
@@ -127,6 +138,9 @@ def export_instanceIDHeartBeat( self, uniqueID, load, jobs,
Declares "Running" the instance and the image
It returns S_ERROR if the status is not OK
"""
+ if not VmProperties.VM_RPC_OPERATION in self.rpcProperties:
+ return S_ERROR( "Unauthorized declareInstanceIDHeartBeat RPC" )
+
#FIXME: do we really need the try / except. The type is fixed to int / long.
try:
uptime = int( uptime )
@@ -147,6 +161,9 @@ def export_declareInstancesStopping( self, instanceIdList ):
When next instanceID heat beat with stoppig status on the DB the VM will stop the job agent and terminates ordenery
It returns S_ERROR if the status is not OK
"""
+ if not VmProperties.VM_WEB_OPERATION in self.rpcProperties:
+ return S_ERROR( "Unauthorized VM Stopping" )
+
for instanceID in instanceIdList:
gLogger.info( 'Stopping DIRAC instanceID: %s' % ( instanceID ) )
result = gVirtualMachineDB.getInstanceStatus( instanceID )
@@ -172,13 +189,11 @@ def export_declareInstancesStopping( self, instanceIdList ):
msg = 'Cloud not found driver option in the Endpoint %s value %s' % (endpoint, cloudDriver)
return S_ERROR( msg )
result = self.export_declareInstanceHalting( uniqueID, 0, cloudDriver )
-
- if state == 'New':
- result = gVirtualMachineDB.recordDBHalt( instanceID, 0 )
- self.__logResult( 'declareInstanceHalted', result )
-
+ elif state == 'New':
+ result = gVirtualMachineDB.recordDBHalt( instanceID, 0 )
+ self.__logResult( 'declareInstanceHalted', result )
else:
- # this is only aplied to Running, while the rest of trasitions are not allowed and declareInstanceStopping do nothing
+ # this is only aplied to allowed trasitions
result = gVirtualMachineDB.declareInstanceStopping( instanceID )
self.__logResult( 'declareInstancesStopping: on declareInstanceStopping call: ', result )
@@ -192,6 +207,9 @@ def export_declareInstanceHalting( self, uniqueID, load, cloudDriver ):
Declares "Halted" the instance and the image
It returns S_ERROR if the status is not OK
"""
+ if not VmProperties.VM_RPC_OPERATION in self.rpcProperties:
+ return S_ERROR( "Unauthorized declareInstanceHalting RPC" )
+
endpoint = gVirtualMachineDB.getEndpointFromInstance( uniqueID )
if not endpoint[ 'OK' ]:
self.__logResult( 'declareInstanceHalting', endpoint )
@@ -210,7 +228,12 @@ def export_declareInstanceHalting( self, uniqueID, load, cloudDriver ):
return imageName
imageName = imageName[ 'Value' ]
+ gLogger.info( 'Declare instance haltig: %s, endpoint: %s imageName: %s' % (str(uniqueID),endpoint,imageName) )
oima = OcciImage( imageName, endpoint )
+ connOcci = oima.connectOcci()
+ if not connOcci[ 'OK' ]:
+ return connOcci
+
result = oima.stopInstance( uniqueID )
elif cloudDriver == 'nova-1.1':
diff --git a/WorkloadManagementSystem/Utilities/Configuration.py b/WorkloadManagementSystem/Utilities/Configuration.py
index f12c1819..c5587981 100644
--- a/WorkloadManagementSystem/Utilities/Configuration.py
+++ b/WorkloadManagementSystem/Utilities/Configuration.py
@@ -51,6 +51,127 @@ def validate( self ):
#...............................................................................
+class OcciConfiguration( EndpointConfiguration ):
+ """
+ OcciConfiguration Class parses the section
+ and builds a configuration if possible, with the information obtained from the CS.
+ """
+
+ # Keys that MUST be present on ANY Occi CloudEndpoint configuration in the CS
+ MANDATORY_KEYS = [ 'cloudDriver', 'vmPolicy', 'vmStopPolicy', 'siteName', 'occiURI', 'maxEndpointInstances' ]
+
+ def __init__( self, occiEndpoint ):
+ """
+ Constructor
+
+ :Parameters:
+ **occiEndpoint** - `string`
+ string with the name of the CloudEndpoint defined on the CS
+ """
+ super( OcciConfiguration, self ).__init__()
+
+ occiOptions = gConfig.getOptionsDict( '%s/%s' % ( self.ENDPOINT_PATH, occiEndpoint ) )
+ if not occiOptions[ 'OK' ]:
+ self.log.error( occiOptions[ 'Message' ] )
+ occiOptions = {}
+ else:
+ occiOptions = occiOptions[ 'Value' ]
+
+ # FIXME: make it generic !
+
+ # Purely endpoint configuration ............................................
+ # This two are passed as arguments, not keyword arguments
+ self.__user = occiOptions.get( 'user' , None )
+ self.__password = occiOptions.get( 'password' , None )
+
+ self.__cloudDriver = occiOptions.get( 'cloudDriver' , None )
+ self.__vmStopPolicy = occiOptions.get( 'vmStopPolicy' , None )
+ self.__vmPolicy = occiOptions.get( 'vmPolicy' , None )
+ self.__siteName = occiOptions.get( 'siteName' , None )
+ self.__maxEndpointInstances = occiOptions.get( 'maxEndpointInstances' , None )
+
+ self.__occiURI = occiOptions.get( 'occiURI' , None )
+ self.__imageDriver = occiOptions.get( 'imageDriver' , None )
+ self.__netId = occiOptions.get( 'netId' , None )
+ self.__iface = occiOptions.get( 'iface' , None )
+ self.__dns1 = occiOptions.get( 'DNS1' , None )
+ self.__dns2 = occiOptions.get( 'DNS2' , None )
+ self.__domain = occiOptions.get( 'domain' , None )
+ self.__cvmfs_http_proxy = occiOptions.get( 'CVMFS_HTTP_PROXY' , None )
+
+ def config( self ):
+
+ config = {}
+
+ config[ 'user' ] = self.__user
+ config[ 'password' ] = self.__password
+
+ config[ 'cloudDriver' ] = self.__cloudDriver
+ config[ 'vmPolicy' ] = self.__vmPolicy
+ config[ 'vmStopPolicy' ] = self.__vmStopPolicy
+ config[ 'siteName' ] = self.__siteName
+ config[ 'maxEndpointInstances' ] = self.__maxEndpointInstances
+
+ config[ 'occiURI' ] = self.__occiURI
+ config[ 'imageDriver' ] = self.__imageDriver
+ config[ 'netId' ] = self.__netId
+ config[ 'iface' ] = self.__iface
+ config[ 'dns1' ] = self.__dns1
+ config[ 'dns2' ] = self.__dns2
+ config[ 'domain' ] = self.__domain
+ config[ 'cvmfs_http_proxy' ] = self.__cvmfs_http_proxy
+
+ # Do not return dictionary with None values
+ for key, value in config.items():
+ if value is None:
+ del config[ key ]
+
+ return config
+
+ def validate( self ):
+
+
+ endpointConfig = self.config()
+
+
+ missingKeys = set( self.MANDATORY_KEYS ).difference( set( endpointConfig.keys() ) )
+ if missingKeys:
+ return S_ERROR( 'Missing mandatory keys on endpointConfig %s' % str( missingKeys ) )
+
+ # on top of the MANDATORY_KEYS, we make sure the user & password are set
+ if self.__user is None:
+ return S_ERROR( 'User is None' )
+ if self.__password is None:
+ return S_ERROR( 'Password is None' )
+
+ self.log.info( '*' * 50 )
+ self.log.info( 'Displaying endpoint info' )
+ for key, value in endpointConfig.iteritems():
+ if key == 'user':
+ self.log.info( '%s : *********' % ( key ) )
+ elif key == 'password':
+ self.log.info( '%s : *********' % ( key ) )
+ else:
+ self.log.info( '%s : %s' % ( key, value ) )
+ self.log.info( 'User and Password are NOT printed.')
+ self.log.info( '*' * 50 )
+
+ return S_OK()
+
+ def authConfig( self ):
+
+ return ( self.__user, self.__password )
+
+ def cloudDriver( self ):
+
+ return ( self.__cloudDriver )
+
+ def occiURI( self ):
+
+ return ( self.__occiURI )
+
+#...............................................................................
+
class NovaConfiguration( EndpointConfiguration ):
"""
NovaConfiguration Class parses the section
@@ -58,7 +179,7 @@ class NovaConfiguration( EndpointConfiguration ):
"""
# Keys that MUST be present on ANY Nova CloudEndpoint configuration in the CS
- MANDATORY_KEYS = [ 'ex_force_auth_url', 'ex_force_service_region', 'ex_tenant_name' ]
+ MANDATORY_KEYS = [ 'ex_force_auth_url', 'ex_force_service_region', 'ex_tenant_name', 'vmPolicy', 'vmStopPolicy', 'cloudDriver', 'siteName', 'maxEndpointInstances' ]
def __init__( self, novaEndpoint ):
"""
@@ -81,13 +202,14 @@ def __init__( self, novaEndpoint ):
# Purely endpoint configuration ............................................
# This two are passed as arguments, not keyword arguments
- self.__user = novaOptions.get( 'user' , None )
- self.__password = novaOptions.get( 'password' , None )
+ self.__user = novaOptions.get( 'user' , None )
+ self.__password = novaOptions.get( 'password' , None )
- self.__cloudDriver = novaOptions.get( 'cloudDriver' , None )
- self.__vmStopPolicy = novaOptions.get( 'vmStopPolicy' , None )
- self.__vmPolicy = novaOptions.get( 'vmPolicy' , None )
- self.__siteName = novaOptions.get( 'siteName' , None )
+ self.__cloudDriver = novaOptions.get( 'cloudDriver' , None )
+ self.__vmStopPolicy = novaOptions.get( 'vmStopPolicy' , None )
+ self.__vmPolicy = novaOptions.get( 'vmPolicy' , None )
+ self.__siteName = novaOptions.get( 'siteName' , None )
+ self.__maxEndpointInstances = novaOptions.get( 'maxEndpointInstances' , None )
self.__ex_force_ca_cert = novaOptions.get( 'ex_force_ca_cert' , None )
self.__ex_force_auth_token = novaOptions.get( 'ex_force_auth_token' , None )
@@ -103,6 +225,15 @@ def config( self ):
config = {}
+ config[ 'user' ] = self.__user
+ config[ 'password' ] = self.__password
+
+ config[ 'cloudDriver' ] = self.__cloudDriver
+ config[ 'vmPolicy' ] = self.__vmPolicy
+ config[ 'vmStopPolicy' ] = self.__vmStopPolicy
+ config[ 'siteName' ] = self.__siteName
+ config[ 'maxEndpointInstances' ] = self.__maxEndpointInstances
+
config[ 'ex_force_ca_cert' ] = self.__ex_force_ca_cert
config[ 'ex_force_auth_token' ] = self.__ex_force_auth_token
config[ 'ex_force_auth_url' ] = self.__ex_force_auth_url
@@ -112,12 +243,6 @@ def config( self ):
config[ 'ex_force_service_region' ] = self.__ex_force_service_region
config[ 'ex_force_service_type' ] = self.__ex_force_service_type
config[ 'ex_tenant_name' ] = self.__ex_tenant_name
- config[ 'cloudDriver' ] = self.__cloudDriver
- config[ 'vmPolicy' ] = self.__vmPolicy
- config[ 'vmStopPolicy' ] = self.__vmStopPolicy
- config[ 'siteName' ] = self.__siteName
- config[ 'user' ] = self.__user
- config[ 'password' ] = self.__password
# Do not return dictionary with None values
for key, value in config.items():
@@ -170,11 +295,11 @@ def __init__( self, imageName ):
else:
imageOptions = imageOptions[ 'Value' ]
- self.__ic_bootImageName = imageOptions.get( 'bootImageName', None )
- self.__ic_contextMethod = imageOptions.get( 'contextMethod', None )
- self.__ic_flavorName = imageOptions.get( 'flavorName' , None )
+ self.__ic_bootImageName = imageOptions.get( 'bootImageName' , None )
+ self.__ic_contextMethod = imageOptions.get( 'contextMethod' , None )
+ self.__ic_flavorName = imageOptions.get( 'flavorName' , None )
#self.__ic_contextConfig = ContextConfig( self.__ic_bootImageName, self.__ic_contextMethod )
- self.__ic_contextConfig = ContextConfig( imageName, self.__ic_contextMethod )
+ self.__ic_contextConfig = ContextConfig( imageName, self.__ic_contextMethod )
def config( self ):
@@ -210,6 +335,6 @@ def validate( self ):
self.log.info( '*' * 50 )
return S_OK()
-
+
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
diff --git a/WorkloadManagementSystem/Utilities/Context.py b/WorkloadManagementSystem/Utilities/Context.py
index 6d844943..236ed756 100644
--- a/WorkloadManagementSystem/Utilities/Context.py
+++ b/WorkloadManagementSystem/Utilities/Context.py
@@ -28,12 +28,12 @@ class ContextConfig( object ):
# Mandatory keys for the basic Context Configuration. Are the options that
# will be used by other components. Is a sanity check against a miss-configured
# ConfigurationService.
- MANDATORY_KEYS = [ 'instanceType' ]
+ MANDATORY_KEYS = [ 'bootImageName', 'flavorName', 'contextMethod' ]
def __new__( cls, _imageName, contextName ):
"""
Uses the contextName parameter to decide which class to load. If not in
- `ssh`, `adhoc` or `amiconfig` raises a NotImplementedException
+ `ssh`, `adhoc`, `amiconfig` or `OcciOpennebulaContext` raises a NotImplementedException
:Parameters:
**_imageName** - `string`
@@ -51,6 +51,8 @@ def __new__( cls, _imageName, contextName ):
cls = AdHocContext
elif contextName == 'amiconfig':
cls = AmiconfigContext
+ elif contextName == 'occi_opennebula':
+ cls = OcciOpennebulaContext
else:
raise NotImplementedError( "No context implemented for %s" % contextName )
@@ -66,7 +68,7 @@ def __init__( self, imageName, contextName ):
name of the image on the CS
**contextName** - `string`
string with the type of context on the CS. It decides which class to load.
- Either `ssh`,`adhoc`,`amiconfig`.
+ Either `ssh`,`adhoc`,`amiconfig`, `occi_opennebula`.
"""
# Get sublogger with the class name loaded in __new__
@@ -122,7 +124,6 @@ class SSHContext( ContextConfig ):
"""
SSHContext defines the following mandatory keys:
- * hdcImageName
* flavorName
* vmOsIpPool
* vmCertPath : the virtualmachine cert to be copied on the VM of a specific endpoint
@@ -132,16 +133,16 @@ class SSHContext( ContextConfig ):
* vmDiracContextURL : the dirac specific context URL
* vmRunJobAgentURL : the runsvdir run file for JobAgent
* vmRunVmMonitorAgentURL : the runsvdir run file vmMonitorAgent
- * vmRunLogJobAgentURL : the runsvdir run.log file forjobAgent
- * vmRunLogVmMonitorAgentURL : the runsvdir run.log file vmMonitorAgent
+ * vmRunVmUpdaterJobAgentURL : the runsvdir run file VirtualMachineConfigUpdater agent
+ * vmRunLogAgentURL : the runsvdir run.log file
* cpuTime : the VM cpuTime of the image
* cloudDriver : the endpoint dirac cloud driver
"""
- MANDATORY_KEYS = [ 'hdcImageName', 'flavorName', 'vmOsIpPool', 'vmCertPath',
+ MANDATORY_KEYS = [ 'vmOsIpPool', 'vmCertPath',
'vmKeyPath', 'vmContextualizeScriptPath', 'vmCvmfsContextURL',
'vmDiracContextURL', 'vmRunJobAgentURL', 'vmRunVmMonitorAgentURL',
- 'vmRunLogJobAgentURL', 'vmRunLogVmMonitorAgentURL', 'cpuTime', 'cloudDriver' ]
+ 'vmRunVmUpdaterAgentURL', 'vmRunLogAgentURL' ]
#...............................................................................
# AdHoc Context
@@ -159,7 +160,6 @@ class AmiconfigContext( ContextConfig ):
"""
AmiconfigContext defines the following mandatory keys:
- * flavorName
* ex_size
* ex_image
* ex_keyname
@@ -168,8 +168,21 @@ class AmiconfigContext( ContextConfig ):
"""
- MANDATORY_KEYS = [ 'flavorName', 'ex_size', 'ex_image', 'ex_keyname',
+ MANDATORY_KEYS = [ 'ex_size', 'ex_image', 'ex_keyname',
'ex_security_groups', 'ex_userdata' ]
+#...............................................................................
+# OcciOpennebulaContext
+
+class OcciOpennebulaContext( ContextConfig ):
+ """
+ AmiconfigContext defines the following mandatory keys:
+
+ * hdcImangeName
+
+ """
+
+ MANDATORY_KEYS = [ 'hdcImageName','context_files_url' ]
+
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
diff --git a/WorkloadManagementSystem/private/bootstrap/contextualize-script.bash b/WorkloadManagementSystem/private/bootstrap/contextualize-script.bash
index 3137db69..a4c6b9b9 100644
--- a/WorkloadManagementSystem/private/bootstrap/contextualize-script.bash
+++ b/WorkloadManagementSystem/private/bootstrap/contextualize-script.bash
@@ -8,7 +8,7 @@
if [ $# -ne 14 ]
then
- echo "bash contextualize-script.bash "
+ echo "bash contextualize-script.bash "
exit 1
fi
@@ -17,8 +17,8 @@ vmCertPath=$2
vmKeyPath=$3
vmRunJobAgent=$4
vmRunVmMonitorAgent=$5
-vmRunLogJobAgent=$6
-vmRunLogVmMonitorAgent=$7
+vmRunVmUpdaterAgent=$6
+vmRunLogAgent=$7
cvmfsContextPath=$8
diracContextPath=$9
cvmfs_http_proxy=${10}
@@ -29,8 +29,8 @@ vmStopPolicy=${14}
localVmRunJobAgent=/root/run.job-agent
localVmRunVmMonitorAgent=/root/run.vm-monitor-agent
-localVmRunLogJobAgent=/root/run.log.job-agent
-localVmRunLogVmMonitorAgent=/root/run.log.vm-monitor-agent
+localVmRunVmUpdaterAgent=/root/run.vm-updater-agent
+localVmRunLogAgent=/root/run.log.agent
localCvmfsContextPath=/root/cvmfs-context.sh
localDiracContextPath=/root/dirac-context.sh
@@ -41,8 +41,8 @@ echo "2 $vmCertPath" >> /var/log/contextualize-script.log 2>&1
echo "3 $vmKeyPath" >> /var/log/contextualize-script.log 2>&1
echo "4 $vmRunJobAgent" >> /var/log/contextualize-script.log 2>&1
echo "5 $vmRunVmMonitorAgent" >> /var/log/contextualize-script.log 2>&1
-echo "6 $vmRunLogJobAgent" >> /var/log/contextualize-script.log 2>&1
-echo "7 $vmRunLogVmMonitorAgent" >> /var/log/contextualize-script.log 2>&1
+echo "6 $vmRunVmUpdaterAgent" >> /var/log/contextualize-script.log 2>&1
+echo "7 $vmRunLogAgent" >> /var/log/contextualize-script.log 2>&1
echo "8 $cvmfsContextPath" >> /var/log/contextualize-script.log 2>&1
echo "9 $diracContextPath" >> /var/log/contextualize-script.log 2>&1
echo "10 $cvmfs_http_proxy" >> /var/log/contextualize-script.log 2>&1
@@ -58,8 +58,8 @@ echo ${uniqueId} > /etc/VMID
# 1) download the necesary files:
wget --no-check-certificate -O ${localVmRunJobAgent} ${vmRunJobAgent} >> /var/log/contextualize-script.log 2>&1
wget --no-check-certificate -O ${localVmRunVmMonitorAgent} ${vmRunVmMonitorAgent} >> /var/log/contextualize-script.log 2>&1
-wget --no-check-certificate -O ${localVmRunLogJobAgent} ${vmRunLogJobAgent} >> /var/log/contextualize-script.log 2>&1
-wget --no-check-certificate -O ${localVmRunLogVmMonitorAgent} ${vmRunLogVmMonitorAgent} >> /var/log/contextualize-script.log 2>&1
+wget --no-check-certificate -O ${localVmRunVmUpdaterAgent} ${vmRunVmUpdaterAgent} >> /var/log/contextualize-script.log 2>&1
+wget --no-check-certificate -O ${localVmRunLogAgent} ${vmRunLogAgent} >> /var/log/contextualize-script.log 2>&1
wget --no-check-certificate -O ${localCvmfsContextPath} ${cvmfsContextPath} >> /var/log/contextualize-script.log 2>&1
wget --no-check-certificate -O ${localDiracContextPath} ${diracContextPath} >> /var/log/contextualize-script.log 2>&1
@@ -74,6 +74,6 @@ fi
echo ${cpuTime} > /etc/CPU_TIME
chmod u+x ${localDiracContextPath} >> /var/log/contextualize-script.log 2>&1
-bash ${localDiracContextPath} "${siteName}" "${vmStopPolicy}" "${vmCertPath}" "${vmKeyPath}" "${localVmRunJobAgent}" "${localVmRunVmMonitorAgent}" "${localVmRunLogJobAgent}" "${localVmRunLogVmMonitorAgent}" "${cloudDriver}" >> /var/log/contextualize-script.log 2>&1
+bash ${localDiracContextPath} "${siteName}" "${vmStopPolicy}" "${vmCertPath}" "${vmKeyPath}" "${localVmRunJobAgent}" "${localVmRunVmMonitorAgent}" "${localVmRunVmUpdaterAgent}" "${localVmRunLogAgent}" "${cloudDriver}" >> /var/log/contextualize-script.log 2>&1
exit 0
diff --git a/WorkloadManagementSystem/private/bootstrap/general-DIRAC-context.sh b/WorkloadManagementSystem/private/bootstrap/general-DIRAC-context.sh
index ae3000fe..67f68e19 100644
--- a/WorkloadManagementSystem/private/bootstrap/general-DIRAC-context.sh
+++ b/WorkloadManagementSystem/private/bootstrap/general-DIRAC-context.sh
@@ -8,7 +8,7 @@
if [ $# -ne 9 ]
then
- echo "ERROR: general-DIRAC-context.bash " > /var/log/dirac-context-script.log 2>&1
+ echo "ERROR: general-DIRAC-context.bash " > /var/log/dirac-context-script.log 2>&1
exit 1
fi
@@ -18,8 +18,8 @@ putCertPath=$3
putKeyPath=$4
localVmRunJobAgent=$5
localVmRunVmMonitorAgent=$6
-localVmRunLogJobAgent=$7
-localVmRunLogVmMonitorAgent=$8
+localVmRunVmUpdaterAgent=$7
+localVmRunLogAgent=$8
cloudDriver=$9
cpuTime=`cat /etc/CPU_TIME`
@@ -96,20 +96,25 @@ cpuTime=`cat /etc/CPU_TIME`
cat etc/dirac.cfg >> /var/log/dirac-context-script.log 2>&1
echo >> /var/log/dirac-context-script.log 2>&1
-# start the agents: VirtualMachineMonitor, JobAgent
+# start the agents: VirtualMachineMonitor, JobAgent, VirtualMachineConfigUpdater
cd /opt/dirac
mkdir -p startup/WorkloadManagement_JobAgent/log >> /var/log/dirac-context-script.log 2>&1
mkdir -p startup/WorkloadManagement_VirtualMachineMonitorAgent/log >> /var/log/dirac-context-script.log 2>&1
+ mkdir -p startup/WorkloadManagement_VirtualMachineConfigUpdater/log >> /var/log/dirac-context-script.log 2>&1
mv ${localVmRunJobAgent} startup/WorkloadManagement_JobAgent/run >> /var/log/dirac-context-script.log 2>&1
- mv ${localVmRunLogJobAgent} startup/WorkloadManagement_JobAgent/log/run >> /var/log/dirac-context-script.log 2>&1
+ mv ${localVmRunLogAgent} startup/WorkloadManagement_JobAgent/log/run >> /var/log/dirac-context-script.log 2>&1
mv ${localVmRunVmMonitorAgent} startup/WorkloadManagement_VirtualMachineMonitorAgent/run >> /var/log/dirac-context-script.log 2>&1
- mv ${localVmRunLogVmMonitorAgent} startup/WorkloadManagement_VirtualMachineMonitorAgent/log/run >> /var/log/dirac-context-script.log 2>&1
+ mv ${localVmRunLogAgent} startup/WorkloadManagement_VirtualMachineMonitorAgent/log/run >> /var/log/dirac-context-script.log 2>&1
+ mv ${localVmRunVmUpdaterAgent} startup/WorkloadManagement_VirtualMachineConfigUpdater/run >> /var/log/dirac-context-script.log 2>&1
+ mv ${localVmRunLogAgent} startup/WorkloadManagement_VirtualMachineConfigUpdater/log/run >> /var/log/dirac-context-script.log 2>&1
chmod 755 startup/WorkloadManagement_JobAgent/log/run
chmod 755 startup/WorkloadManagement_JobAgent/run
chmod 755 startup/WorkloadManagement_VirtualMachineMonitorAgent/log/run
chmod 755 startup/WorkloadManagement_VirtualMachineMonitorAgent/run
+ chmod 755 startup/WorkloadManagement_VirtualMachineConfigUpdater/log/run
+ chmod 755 startup/WorkloadManagement_VirtualMachineConfigUpdater/run
echo "rights and permissions to control and work JobAgent dirs" >> /var/log/dirac-context-script.log 2>&1
mkdir -p /opt/dirac/control/WorkloadManagement/JobAgent >> /var/log/dirac-context-script.log 2>&1
@@ -124,7 +129,7 @@ cpuTime=`cat /etc/CPU_TIME`
ls -l /opt/dirac/work/WorkloadManagement >> /var/log/dirac-context-script.log 2>&1
echo >> /var/log/dirac-context-script.log 2>&1
- echo "runsvdir startup, have a look to DIRAC JobAgent and VirtualMachineMonitorAgent logs" >> /var/log/dirac-context-script.log 2>&1
+ echo "runsvdir startup, have a look to DIRAC JobAgent, VirtualMachineMonitorAgent and VirtualMachineConfigUpdater logs" >> /var/log/dirac-context-script.log 2>&1
runsvdir -P /opt/dirac/startup 'log: DIRAC runsv' &
#
diff --git a/release.notes b/release.notes
index 7aa7bd33..89a93a9a 100644
--- a/release.notes
+++ b/release.notes
@@ -1,3 +1,7 @@
+[v0r8]
+ VM local dirac.cfg updater agent for pilot/dirac release is updated
+ OcciImage and Occi09 migrated to new WMS/Utilities style
+
[v0r7]
NEW: endpoint vmPolicy "static" -> slots driven, indicated by maxEndpointInstances, endpoint CS parameter
endpoint vmPolicy "elastic" -> jobs driven, one by one