FAQ Search Today's Posts Mark Forums Read
» Video Reviews

» Linux Archive

Linux-archive is a website aiming to archive linux email lists and to make them easily accessible for linux users/developers.


» Sponsor

» Partners

» Sponsor

Go Back   Linux Archive > Redhat > Cluster Development

 
 
LinkBack Thread Tools
 
Old 10-13-2010, 08:08 PM
Chris Feist
 
Default Current CLI status

I've attached the latest version of the command line interface which you can use
to configure your cluster from the command line.


You can see most of the options with 'ccs.py -h' and you can configure most
major parts of the cluster.conf file (nodes, fencing, most services, etc.).


Please take a look and let me know what you feel may be missing or what should
be changed.


Thanks,
Chris
#!/usr/bin/python

import getopt, sys
import socket, ssl
from xml.dom import minidom
import logging
import os
import os.path

RICCI_PORT = 11111
CLUSTERRNG = "cluster.rng.in"

password = None
debug = False
propagate = False

def main(argv):
hostname = "localhost"
getconf = False
status = fence = start = stop = False
listnodes = listservices = listdomains = False
addnode = removenode = getversion = False
incrementversion = setversion = False
addmethod = removemethod = createcluster = False
addfencedev = removefencedev = False
addfenceinst = removefenceinst = False
lsfencedev = lsfenceinst = False
addfailoverdomain = removefailoverdomain = False
addservice = False
addsubservice = False
passwordset = False
global password
global debug
global propagate
# logging.basicConfig(level=logging.DEBUGRemove
try:
opts, args = getopt.getopt(argv, "dh:", ["help","host=","getconf","status","fence=","start" ,"stop","lsnodes",
"lsservices", "listdomains", "addnode=", "removenode=", "getversion","setversion=","incversion",
"createcluster=", "password=", "addmethod=", "removemethod=", "addfencedev=", "removefencedev=",
"addfenceinst=", "removefenceinst=", "lsfencedev", "lsfenceinst=", "propagate", "addfailoverdomain=",
"removefailoverdomain=", "addservice=", "addsubservice="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == ("--help"): usage() ; sys.exit()
elif opt in ("--host","-h"):
hostname = arg
logging.debug("Hostname = %s" % hostname)
elif opt in ("--password", "-p"): passwordset = True ; password = arg
elif opt in ("--getconf"): getconf = True
elif opt in ("--status"): status = True
elif opt in ("--fence"): fence = True ; node = arg
elif opt in ("--start"): start = True
elif opt in ("--stop"): stop = True
elif opt in ("--lsnodes"): listnodes = True
elif opt in ("--lsservices"): listservices = True
elif opt in ("--listdomains"): listdomains = True
elif opt in ("--addnode"): addnode = True ; node = arg
elif opt in ("--removenode"): removenode = True ; node = arg
elif opt in ("--getversion"): getversion = True
elif opt in ("--setversion"): setversion = True ; version = arg
elif opt in ("--incversion"): incrementversion = True
elif opt in ("--propagate"): propagate = True
elif opt in ("--createcluster"):
createcluster = True
clustername = arg
elif opt in ("--addmethod"):
addmethod = True
method = arg
options = args
elif opt in ("--removemethod"):
removemethod = True
method = arg
options = args
elif opt in ("--addfencedev"):
addfencedev = True
name = arg
options = args
elif opt in ("--removefencedev"): removefencedev = True; name = arg
elif opt in ("--addfenceinst"):
addfenceinst = True
name = arg
options = args
elif opt in ("--removefenceinst"):
removefenceinst = True
name = arg
options = args
elif opt in ("--lsfencedev"): lsfencedev = True;
elif opt in ("--lsfenceinst"): lsfenceinst = True; instance = arg
elif opt in ("--addfailoverdomain"):
addfailoverdomain = True
name = arg
options = args
elif opt in ("--removefailoverdomain"): removefailoverdomain = True; name = arg
elif opt in ("--addservice"):
addservice = True
name = arg
options = args
elif opt in ("--addsubservice"):
addsubservice = True
name = arg
options = args
elif opt in ("-d"): debug = True

if (getconf): get_cluster_conf(hostname)
if (status): get_cluster_status(hostname)
if (fence): fence_node(hostname, node)
if (stop): stop_node(hostname)
if (start): start_node(hostname)
if (listnodes): list_nodes(hostname)
if (listservices): list_services(hostname)
if (listdomains): list_domains(hostname)
if (addnode): add_node(hostname, node)
if (removenode): remove_node(hostname, node)
if (getversion): print get_version(hostname)
if (setversion): set_version(hostname, version)
if (incrementversion): increment_version(hostname)
if (createcluster): create_cluster(hostname, clustername)
if (addmethod): add_method(hostname, method, options)
if (removemethod): remove_method(hostname, method, options)
if (addfencedev): add_fencedev(hostname, name, options)
if (removefencedev): remove_fencedev(hostname, name)
if (addfenceinst): add_fenceinst(hostname, name, options)
if (removefenceinst): remove_fenceinst(hostname, name, options)
if (lsfencedev): ls_fencedev()
if (lsfenceinst): ls_fenceinst(arg)
if (addfailoverdomain): add_failoverdomain(hostname, name, options)
if (removefailoverdomain): remove_failoverdomain(hostname, name)
if (addservice): add_service(hostname, name, options)
if (addsubservice): add_subservice(hostname, name, options)

def usage():
print """Usage: ccs [OPTION]...
Remotely control cluster infrastructure.

--help Display this help and exit
-h, --host Ricci host to connect to (defaults to localhost)
-p, --password Root password for node running ricci
--getconf Print current cluster.conf file
--status Print current cluster status
--createcluster <cluster>
Create a new cluster.conf
--addnode <node> Add node <node> to the cluster
--removenode <node>
Remove a node from the cluster
--addmethod <method> <node>
Add a method to a specific node
--removemethod <method> <node>
Remove a method from a specific node
--addfencedev <device> <options>
Add fence device
--removefencedev <fence device name>
Remove fence device
--addfenceinst <fence device name> <node> <method> <options>
Add fence instance
--removefenceinst <fence device name> <node> <method>
Remove all instances of the fence device listed from
the given method and node
--addfailoverdomain <name> [restricted] [ordered] [nofailback]
Add failover domain
--removefailoverdomain <name>
Remove failover domain
--addfailoverdomainnode <failover domain> <node> [priority=xx]
Add node to given failover domain
--addservice <servicename> [service options] ...
Add service to cluster
--addsubservice <servicename> <service type> [service options] ...
Add individual services
--lsfencedev Lists available fence devices and their options
--lsfenceinst <fence device>
Lists available fence instances options for given
fence device
--lsnodes List all nodes in the cluster
--lsservices List all services in the cluster
--start Start cluster services on host
--stop Stop cluster services on host
--fence <node> Fence the node <node>
--getversion Get the current cluster.conf version
--setversion Set the cluster.conf version
--incversion Increment the cluster.conf version by 1
scp and put them in the current directory
"""

def fence_node(hostname, node):
xml = send_ricci_command(hostname, "cluster", "fence_node",("nodename","string",node))
print xml

def stop_node(hostname):
xml = send_ricci_command(hostname, "cluster", "stop_node")
print xml

def start_node(hostname):
xml = send_ricci_command(hostname, "cluster", "start_node")
print xml

def get_cluster_conf(hostname):
xml = get_cluster_conf_xml(hostname)
xml = minidom.parseString(xml).getElementsByTagName('clu ster')[0].toprettyxml(indent=' ',newl=')
print xml

def get_cluster_status(hostname):
xml = send_ricci_command(hostname,"cluster","status")
xml = minidom.parseString(xml).getElementsByTagName('clu ster')[0].toprettyxml(indent=' ',newl=')
print xml

def list_nodes(hostname):
xml = get_cluster_conf_xml(hostname)
dom = minidom.parseString(xml)
for node in dom.getElementsByTagName('clusternode'):
print node.getAttribute("name")

def list_services(hostname):
xml = get_cluster_conf_xml(hostname)
dom = minidom.parseString(xml)
for node in dom.getElementsByTagName('service'):
print_services_map(node,0)

def print_services_map(node,level):
num_spaces = level * 2
prefix = ""

if node.nodeType == minidom.Node.TEXT_NODE:
return
for i in range(num_spaces):
prefix = " " + prefix

nodeattr = ""
if node.attributes != None:
length = node.attributes.length
for i in range(length):
nodeattr = nodeattr + node.attributes.item(i).value


print prefix + node.tagName + ": " + nodeattr
for cn in node.childNodes:
print_services_map(cn, level + 1)

def list_domains(hostname):
xml = get_cluster_conf_xml(hostname)
dom = minidom.parseString(xml)
for node in dom.getElementsByTagName('failoverdomain'):
print node.getAttribute("name")

# Add a node to the cluster.conf
# Before adding a node we need to verify another node
# with the same name doesn't already exist
def add_node(hostname, node_to_add):
nodeid_list = set()

dom = minidom.parseString(get_cluster_conf_xml(hostname) )
for node in dom.getElementsByTagName('clusternode'):
if (node.getAttribute("name") == node_to_add):
print "Node '%s' already exists in cluster.conf" % node_to_add
sys.exit(1)
nodeid_list.update(node.getAttribute("nodeid"))
node = dom.createElement("clusternode")
node.setAttribute("name",node_to_add)

# Use the first nodeid above 0 that isn't already used
nodeid = 0
while (True):
nodeid = nodeid + 1
if (str(nodeid) not in nodeid_list): break

node.setAttribute("nodeid",str(nodeid))
node.setAttribute("votes","1")
dom.getElementsByTagName("clusternodes")[0].appendChild(node)
set_cluster_conf(hostname,dom.toxml())
print "Node %s added." % (node_to_add)

def remove_node(hostname, node_to_remove):
nodeFound = False

dom = minidom.parseString(get_cluster_conf_xml(hostname) )
for node in dom.getElementsByTagName('clusternode'):
if (node.getAttribute("name") == node_to_remove):
node.parentNode.removeChild(node)
nodeFound = True

if (nodeFound == False):
print "Unable to find node %s" % node_to_remove
sys.exit(1)

set_cluster_conf(hostname,dom.toxml())

def get_version(hostname):
dom = minidom.parseString(get_cluster_conf_xml(hostname) )
return dom.getElementsByTagName('cluster')[0].getAttribute("config_version")

def set_version(hostname, version):
dom = minidom.parseString(get_cluster_conf_xml(hostname) )
dom.getElementsByTagName('cluster')[0].setAttribute("config_version",version)
set_cluster_conf(hostname,dom.toxml(), False)

def increment_version(hostname):
new_version = int(get_version(hostname)) + 1
set_version(hostname,str(new_version))
print new_version

def get_cluster_conf_xml(hostname):
xml = send_ricci_command(hostname, "cluster", "get_cluster.conf")
dom = minidom.parseString(xml)
if dom.getElementsByTagName('cluster').length > 0:
return dom.getElementsByTagName('cluster')[0].toxml()
else:
return empty_cluster_conf()

# Create a minimal cluster.conf file similiar to the one
# created by system-config-cluster
def empty_cluster_conf(name="cluster"):
impl = minidom.getDOMImplementation()
newdoc = impl.createDocument(None, "cluster", None)

top = newdoc.documentElement
top.setAttribute('config_version','1')
top.setAttribute('name',name)
fence_daemon = newdoc.createElement("fence_daemon")
fence_daemon.setAttribute('post_fail_delay','0')
fence_daemon.setAttribute('post_join_delay','3')
clusternodes = newdoc.createElement("clusternodes")
cman = newdoc.createElement("cman")
fencedevices = newdoc.createElement("fencedevices")
rm = newdoc.createElement("rm")
failoverdomains = newdoc.createElement("failoverdomains")
resources = newdoc.createElement("resources")

top.appendChild(fence_daemon)
top.appendChild(clusternodes)
top.appendChild(cman)
top.appendChild(fencedevices)
rm.appendChild(failoverdomains)
rm.appendChild(resources)
top.appendChild(rm)

return newdoc.toprettyxml()

def create_cluster(hostname, clustername):
xml = empty_cluster_conf(clustername)
set_cluster_conf(hostname,xml)

def add_method(hostname, method, options):
method_found = False
node_found = False

if len(options) != 1:
usage()
sys.exit(2)

nodename = options[0]

dom = minidom.parseString(get_cluster_conf_xml(hostname) )
for node in dom.getElementsByTagName('clusternode'):
if (node.getAttribute("name") == nodename):
node_found = True
for methodnode in node.getElementsByTagName("method"):
if methodnode.getAttribute("name") == method:
method_found = True
break
break

if node_found == False:
print "Node '%s' does not currently exist in cluster.conf." % (nodename)
sys.exit(1)

if method_found == True:
print "Method '%s' already exists in cluster.conf." % (method)
sys.exit(1)

fencenodes = node.getElementsByTagName("fence")
if len(fencenodes) == 0:
fencenode = dom.createElement("fence")
else:
fencenode = fencenodes[0]

methodnode = dom.createElement("method")
methodnode.setAttribute("name", method)
fencenode.appendChild(methodnode)
node.appendChild(fencenode)
set_cluster_conf(hostname, dom.toxml())
print "Method %s added to %s." % (method, nodename)

def remove_method(hostname, method, options):
method_found = False
node_found = False

if len(options) != 1:
usage()
sys.exit(2)

nodename = options[0]
dom = minidom.parseString(get_cluster_conf_xml(hostname) )
for node in dom.getElementsByTagName('clusternode'):
if (node.getAttribute("name") == nodename):
node_found = True
for methodnode in node.getElementsByTagName("method"):
if methodnode.getAttribute("name") == method:
method_found = True
break
break

if node_found == False:
print "Node %s does not exist in cluster.conf." % (nodename)
sys.exit(1)
if method_found == False:
print "Method %s is not present for %s." % (method, nodename)
sys.exit(1)

methodnode.parentNode.removeChild(methodnode)
set_cluster_conf(hostname, dom.toxml())
print "Method %s removed from %s." % (method, nodename)

def add_fencedev(hostname, name, options):
dom = minidom.parseString(get_cluster_conf_xml(hostname) )

# Verify fencedevices section exists in cluster.conf
fencedevices = dom.getElementsByTagName('fencedevices')
if len(fencedevices) == 0:
dom.getElementsByTagName('cluster')[0].appendChild(dom.createElement("fencedevices"))
elif len(fencedevices) > 1:
print "Error: Too many fencedevices elements in cluster.conf"
sys.exit(1)

# Verify fence device with same name does not already exist
for fencedev in dom.getElementsByTagName('fencedevice'):
if fencedev.getAttribute("name") == name:
print "Fence device '%s' already exists in cluster.conf." % name
sys.exit(1)

newfencedev = dom.createElement("fencedevice")
for option in options:
(attr, sep, val) = option.partition('=')
if (sep == ""):
print "Invalid option: %s" % option
sys.exit(1)
newfencedev.setAttribute(attr,val)

newfencedev.setAttribute("name", name)
fencedevelem = dom.getElementsByTagName('fencedevices')[0]
fencedevelem.appendChild(newfencedev)
set_cluster_conf(hostname, dom.toxml())

def remove_fencedev(hostname, name):
fencedev_found = False
dom = minidom.parseString(get_cluster_conf_xml(hostname) )

# Verify fence device exists before attempting to remove
for fencedev in dom.getElementsByTagName('fencedevice'):
if fencedev.getAttribute("name") == name:
fencedev.parentNode.removeChild(fencedev)
fencedev_found = True

if fencedev_found == False:
print "Fence device '%s' does not exist in cluster.conf." % name
sys.exit(1)

set_cluster_conf(hostname, dom.toxml())

def add_fenceinst(hostname, name, options):
fencedev_found = method_found = False

if len(options) < 2:
usage()
sys.exit(2)

nodename = options[0]
methodname = options[1]
dom = minidom.parseString(get_cluster_conf_xml(hostname) )

# Verify fence device exists
for fencedev in dom.getElementsByTagName('fencedevice'):
if fencedev.getAttribute("name") == name:
fencedev_found = True
break

# Verify method exists for specified node
for node in dom.getElementsByTagName('clusternode'):
if node.getAttribute("name") == nodename:
for method in node.getElementsByTagName('method'):
if method.getAttribute("name") == methodname:
method_found = True
break
break

if fencedev_found == False:
print "Fence device '%s' not found." % name
sys.exit(1)

if method_found == False:
print "Method '%s' not found in node '%s'." % (methodname, nodename)
sys.exit(1)

newfenceinst = dom.createElement("device")
for option in options[2:]:
(attr, sep, val) = option.partition('=')
if (sep == ""):
print "Invalid option: %s" % option
sys.exit(1)
newfenceinst.setAttribute(attr,val)

newfenceinst.setAttribute("name", name)
method.appendChild(newfenceinst)
set_cluster_conf(hostname, dom.toxml())

def remove_fenceinst(hostname, name, options):
fenceinst_found = False

if len(options) < 2:
usage()
sys.exit(2)

nodename = options[0]
methodname = options[1]
dom = minidom.parseString(get_cluster_conf_xml(hostname) )

# Verify fence instance exists before attempting to remove
for node in dom.getElementsByTagName('clusternode'):
if node.getAttribute("name") == nodename:
for method in node.getElementsByTagName('method'):
if method.getAttribute("name") == methodname:
for instance in method.getElementsByTagName('device'):
if instance.getAttribute("name") == name:
instance.parentNode.removeChild(instance)
fenceinst_found = True


if fenceinst_found == False:
print "Fence instance '%s' for node '%s' in method '%s' does not exist in cluster.conf." % (name, nodename, methodname)
sys.exit(1)

set_cluster_conf(hostname, dom.toxml())

def add_failoverdomain(hostname, name, options):
dom = minidom.parseString(get_cluster_conf_xml(hostname) )

failoverdomains_array = dom.getElementsByTagName("failoverdomains")

# Verify failoverdomains and rm exist in cluster.conf file
if len(failoverdomains_array) > 0:
failoverdomains = failoverdomains_array[0]
else:
rm_array = dom.getElementsByTagName("rm")
if len(rm_array) == 0:
rm = dom.getElementsByTagName("cluster")[0].appendChild(dom.createElement("rm"));
else:
rm = rm_array[0]
failoverdomains = rm.appendChild(dom.createElement("failoverdomains" ))

# Verify that there already isn't a failover domain with the same name
failoverdomain_found = False
for failoverdomain in failoverdomains.getElementsByTagName("failoverdoma in"):
if failoverdomain.getAttribute("name") == name:
failoverdomain_found = True
break

if failoverdomain_found:
print "Failover domain '%s' already exists." % (name)
sys.exit(1)

failoverdomain = failoverdomains.appendChild(dom.createElement("fai loverdomain"))
failoverdomain.setAttribute("name",name)

if "restricted" in options: failoverdomain.setAttribute("restricted", "1")
else: failoverdomain.setAttribute("restricted", "0")
if "ordered" in options: failoverdomain.setAttribute("ordered", "1")
else: failoverdomain.setAttribute("ordered", "0")
if "nofailback" in options: failoverdomain.setAttribute("nofailback", "1")
else: failoverdomain.setAttribute("nofailback", "0")

set_cluster_conf(hostname, dom.toxml())

def remove_failoverdomain(hostname, name):
dom = minidom.parseString(get_cluster_conf_xml(hostname) )

failoverdomains = dom.getElementsByTagName("failoverdomains")
if len(failoverdomains) > 0:
for failoverdomain in failoverdomains[0].getElementsByTagName("failoverdomain"):
if failoverdomain.getAttribute("name") == name:
failoverdomains[0].removeChild(failoverdomain)
set_cluster_conf(hostname, dom.toxml())
return
else:
print "No failoverdomains section found in cluster.conf"
sys.exit(1)

print "Unable to find failover domain '%s' in cluster.conf file." % (name)
sys.exit(1)

def ls_fencedev():
rng = open(CLUSTERRNG)
dom = minidom.parseString(rng.read())
for elem in dom.getElementsByTagName("group"):
if elem.getAttribute("rha:fence_agent"):
print elem.getAttribute("rha:fence_agent")

def ls_fenceinst(fencedev):
rng = open(CLUSTERRNG)
dom = minidom.parseString(rng.read())
for elem in dom.getElementsByTagName("group"):
if elem.getAttribute("rha:fence_agent") == fencedev:
for attr in elem.getElementsByTagName("attribute"):
attrname = attr.getAttribute("name")
invisible_attrs = ["verbose", "debug", "version", "help",
"option", "action", "separator"]
if attrname not in invisible_attrs:
print attr.getAttribute("name") + " - " + attr.getAttribute("rha:description")

def add_service(hostname, name, options):
dom = minidom.parseString(get_cluster_conf_xml(hostname) )

# Verify rm section exists in cluster.conf
rm_array = dom.getElementsByTagName("rm")
if len(rm_array) == 0:
rm = dom.getElementsByTagName("cluster")[0].appendChild(dom.createElement("rm"));
else:
rm = rm_array[0]

# Verify service doesn't exist with the same name
for service in dom.getElementsByTagName('service'):
if service.getAttribute("name") == name:
print "Service '%s' already exists in cluster.conf." % name
sys.exit(1)

service = dom.getElementsByTagName('rm')[0].appendChild(dom.createElement("service"))
for option in options:
(attr, sep, val) = option.partition('=')
if (sep == ""):
print "Invalid option: %s" % option
sys.exit(1)
service.setAttribute(attr,val)

service.setAttribute("name", name)
set_cluster_conf(hostname, dom.toxml())

def add_subservice(hostname, name, options):
dom = minidom.parseString(get_cluster_conf_xml(hostname) )
serviceFound = False

# Verify top level service exists
for service in dom.getElementsByTagName("service"):
if service.getAttribute("name") == name:
serviceFound = True
break;

if serviceFound == False:
print "Unable to find service: %s" % name
sys.exit(1)

subservice_type = options.pop(0)
subservice = service.appendChild(dom.createElement(subservice_t ype))
for option in options:
(attr, sep, val) = option.partition('=')
if (sep == ""):
print "Invalid option: %s" % option
sys.exit(1)
subservice.setAttribute(attr,val)
set_cluster_conf(hostname, dom.toxml())

# Set the cluster.conf file and increment the version
# If global variable propagate is true, propagate to entire cluster
def set_cluster_conf(hostname, xml, increment = True):
dom = minidom.parseString(xml)

if increment:
version = dom.getElementsByTagName("cluster")[0].getAttribute("config_version")
version = int(version) + 1
dom.getElementsByTagName("cluster")[0].setAttribute("config_version",str(version))
xml = dom.toxml()

xml = xml.replace('<?xml version="1.0" ?>',')
log_msg (xml)

if propagate:
log_msg (send_ricci_command(hostname, "cluster", "set_cluster.conf", ("cluster.conf","xml","",xml), ("propagate", "boolean", "true")))
else:
log_msg (send_ricci_command(hostname, "cluster", "set_cluster.conf", ("cluster.conf","xml","",xml)))


def send_ricci_command(hostname, module, command, *vars):
global password
variables = ""

for value in vars:
variables = variables + '<var mutable="false" name="%s" type="%s" value="%s">' % (value[0],value[1],value[2])
if (len(value) > 3):
variables = variables + value[3]
variables = variables + "</var>"

# If a password is set, then we authenticate
if password != None:
msg = '<ricci function="authenticate" password="%s" version="1.0"/>' % password
res = send_to_ricci(hostname, msg)
dom = minidom.parseString(res[1])
ricci_elem = dom.getElementsByTagName('ricci')
if (ricci_elem[0].getAttribute("authenticated") != "true"):
print "Unable to authenticate with ricci node, please check root password."
sys.exit(1)

msg = '<ricci function="process_batch" async="false" version="1.0"><batch><module name="%s"><request API_version="1.0"><function_call name="%s">%s</function_call></request></module></batch></ricci>' % (module,command,variables)
res = send_to_ricci(hostname, msg)
dom = minidom.parseString(res[1].replace(' ','))
xml = dom.getElementsByTagName('function_response')[0].toxml()
return xml

def send_to_ricci(hostname, msg):
cert = os.path.expanduser("~/.ricci/cacert.pem")
key = os.path.expanduser("~/.ricci/privkey.pem")
config = os.path.expanduser("~/.ricci/cacert.config")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

# Make sure we have a client certificate and private key
# If not we need to autogenerate them (including creating an
# openssl configuration file
if (os.path.isfile(cert) == False or os.path.isfile(key) == False):
print "Autogenerating private key and certificate."
if not os.path.exists(os.path.expanduser("~/.ricci")):
os.mkdir(os.path.expanduser("~/.ricci"),0700);
f = open (config, 'w')
f.write("""
[ req ]
distinguished_name = req_distinguished_name
attributes = req_attributes
prompt = no

[ req_distinguished_name ]
C = US
ST = State or Province
L = Locality
O = Organization Name
OU = Organizational Unit Name
CN = Common Name
emailAddress = root@localhost

[ req_attributes ]
""")
f.close()
os.system ("/usr/bin/openssl genrsa -out %s 2048" % key)
os.system ("/usr/bin/openssl req -new -x509 -key %s -out %s -days 1825 -config ~/.ricci/cacert.config" % (key,cert))

ss = ssl.wrap_socket(s, key, cert)
try:
ss.connect((hostname, RICCI_PORT))
except socket.error:
print "Unable to connect to %s, make sure the ricci server is started." % hostname
sys.exit(1)

log_msg ("***Sending to ricci server:")
log_msg (msg)
log_msg ("***Sending End")
logging.debug("Connected...")
res1 = ss.read(1024)
logging.debug("Writing...")
logging.debug(msg)
ss.write(msg)
logging.debug("Writen...")
res2 = '
while True:
logging.debug("Waiting to read...")
buff = ss.read(10485760)
logging.debug(buff)
logging.debug("Read...")
if buff == ':
break
res2 += buff
try:
minidom.parseString(res2)
break
except:
pass
log_msg ("***Received from ricci server")
log_msg (res2)
log_msg ("***Receive End")
return res1, res2

def log_msg(message):
global debug

if debug == True:
print message

if __name__ == "__main__":
main(sys.argv[1:])
<grammar datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes"
xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:rha="http://redhat.com/~pkennedy/annotation_namespace/cluster_conf_annot_namespace">

<!-- The cluster.conf schema follows this outline:

cluster
- cman
- totem
- quorumd
- fence_daemon
- fence_xvmd
- dlm
- gfs_controld
- group
- logging
- clusternodes
- fencedevices
- rm
- clvmd

Element defnitions:
- Resource
- Fence

To validate your cluster.conf against this schema, run:

xmllint --relaxng cluster.rng /path/to/cluster.conf

-->

<start>
<element name="cluster" rha:description="Defines cluster properties, and
contains all other configuration. cluster.conf(5)">
<attribute name="name" rha:description="Name of the cluster.
cluster.conf(5)"/>
<attribute name="config_version" rha:description="Revision level
of cluster.conf file. cluster.conf(5)"/>
<interleave>

<!-- cman block -->
<optional>
<element name="cman" rha:description="The cman element contains
attributes that define the following cluster-wide parameters and
behaviors: whether the cluster is a two-node cluster, expected
votes, user-specified multicast address, and logging.">
<optional>
<attribute name="two_node" rha:description="The two_node attribute
allows you to configure a cluster with only two
nodes. Ordinarily, the loss of quorum after one of two nodes
fails prevents the remaining node from continuing (if both
nodes have one vote.) To enable a two-node cluster, set the
two_node value equal to 1. If the two_node value is enabled,
the expected_votes value must be set to 1." rha:sample="1"/>
</optional>
<optional>
<attribute name="expected_votes" rha:description="The expected
votes value is used by cman to determine quorum. The cluster
is quorate if the sum of votes of members is over
half of the expected votes value. By default, cman sets the
expected votes value to be the sum of votes of all nodes listed
in cluster.conf. This can be overriden by setting an explicit
expected_votes value." rha:sample="4">
<data type="positiveInteger"/>
</attribute>
</optional>
<optional>
<attribute name="upgrading" rha:description="Set this if you are
performing a rolling upgrade of the cluster between major
releases." rha:sample="no"/>
</optional>
<optional>
<attribute name="disallowed" rha:description="Set this to 1 enable
cman's Disallowed mode. This is usually only needed for
backwards compatibility." rha:sample="1">
<data type="nonNegativeInteger"/>
</attribute>
</optional>
<optional>
<attribute name="quorum_dev_poll" rha:description="The amount of time
after a qdisk poll, in milliseconds, before a quorum disk is
considered dead. The quorum disk daemon, qdisk, periodically
sends hello messages to cman and ais, indicating that qdisk
is present. If qdisk takes more time to send a hello message
to cman and ais than by quorum_dev_poll, then cman declares
qdisk dead and prints a message indicating that connection to
the quorum device has been lost." rha:sample="50000">
<data type="nonNegativeInteger"/>
</attribute>
</optional>
<!--FIXME: Clarify the following. What is meant by "service"? Also, is
there a default value? What is a good sample value?-->
<optional>
<attribute name="shutdown_timeout" rha:description="Timeout period,
in milliseconds, to allow a service to respond during a
shutdown." rha:sample="5000">
<data type="nonNegativeInteger"/>
</attribute>
</optional>
<optional>
<attribute name="ccsd_poll" rha:description="" rha:sample=""
rha:default="1000">
<data type="nonNegativeInteger"/>
</attribute>
</optional>
<optional>
<attribute name="debug_mask" rha:description="" rha:sample=""/>
</optional>
<optional>
<attribute name="port">
<data type="nonNegativeInteger"/>
</attribute>
</optional>
<optional>
<attribute name="cluster_id">
<data type="nonNegativeInteger"/>
</attribute>
</optional>
<optional>
<attribute name="hash_cluster_id" rha:description="Enable stronger hashing of cluster ID to avoid collisions." />
</optional>
<optional>
<attribute name="nodename" rha:description="Local node name; this is set internally by cman-preconfig and should never be set by a user."/>
</optional>
<optional>
<attribute name="broadcast" rha:description="enable cman broadcast" rha:default="no"/>
</optional>
<optional>
<attribute name="keyfile" rha:description=""/>
</optional>
<optional>
<attribute name="disable_openais"/>
</optional>
<optional>
<element name="multicast" rha:description="The multicast element
provides the ability for a user to specify a multicast address
instead of using the multicast address generated by cman. If
a user does not specify a multicast address, cman creates one. It
forms the upper 16 bits of the multicast address with 239.192 and
forms the lower 16 bits based on the cluster ID."> <attribute
name="addr" rha:description="A multicast address specified
by a user. If you do specify a multicast address, you should
use the 239.192.x.x series that cman uses. Otherwise, using a
multicast address outside that range may cause unpredictable
results. For example, using 224.0.0.x (All hosts on the network)
may not be routed correctly, or even routed at all by some
hardware." rha:sample="239.192.0.1"/>
</element>
</optional>
</element>
</optional>
<!-- end cman block -->

<!-- totem block -->
<optional>
<element name="totem" rha:description="OpenAIS msg transport
protocol">
<optional>
<attribute name="consensus" rha:description="This is a timeout value
that specifies how many milliseconds to wait for consensus
to be achieved before starting a new round of membership
configuration." rha:default="200" rha:sample="235"/>
</optional>
<optional>
<attribute name="join" rha:description="This is a timeout value that
specifies how many milliseconds to wait for join messages in
the membership protocol." rha:default="100" rha:sample="95"/>
</optional>
<optional>
<attribute name="token" rha:description="This is a timeout value
that specifies how many milliseconds elapse before a
token loss is declared after not receiving a token. This
is the time spent detecting a failure of a processor in
the current configuration. Reforming a new configuration
takes about 50 milliseconds in addition to this
timeout." rha:default="5000" rha:sample="5300"/>
</optional>
<optional>
<attribute name="fail_recv_const" />
</optional>
<optional>
<attribute name="token_retransmits_before_loss_const"
rha:description="This value identifies how many token retransmits
should be attempted before forming a new configuration. If
this value is set, retransmit and hold will be automatically
calculated from retransmits_before_loss and token." rha:default="4"
rha:sample="5"/>
</optional>
<!-- FIXME: The following description was adapted from the man page.
It may be tool long for the schema docuement. Consider cutting text
after the second sentence and referring the reader to the openais.conf
man page. -->
<optional>
<attribute name="rrp_mode" rha:description="This attribute
specifies the redundant ring protocol mode. Its value can be
set to active, passive, or none. Active replication offers
slightly lower latency from transmit to delivery in faulty
network environments but with less performance. Passive
replication may nearly double the speed of the totem protocol
if the protocol doesn't become cpu bound. The final option is
none, in which case only one network interface is used to
operate the totem protocol. If only one interface directive is
specified, none is automatically chosen. If multiple interface
directives are specified, only active or passive may be
chosen." rha:sample="active"/>
</optional>
<optional>
<attribute name="secauth" rha:description="This attribute specifies
that HMAC/SHA1 authentication should be used to authenticate all
messages. It further specifies that all data should be encrypted
with the sober128 encryption algorithm to protect data from
eavesdropping. For more information setting this value, refer
the the openais.conf man page." rha:default="on" rha:sample=""/>
</optional>
<optional>
<attribute name="keyfile" rha:description="" rha:sample=""/>
</optional>
<!-- multicast address -->
<zeroOrMore>
<element name="interface" rha:description="Defines Totem interface options. corosync.conf(5)" rha:sample="">
<optional>
<attribute name="ringnumber" rha:description="Sets the ring interface for the interface for RRP mode. corosync.conf(5)" rha:sample=""/>
</optional>
<optional>
<attribute name="bindnetaddr" rha:description="Specifies the address to which the corosync executive should bind. corosync.conf(5)" rha:sample=""/>
</optional>
<optional>
<attribute name="mcastaddr" rha:description="Defines the multicast address used by corosync for this interface. corosync.conf(5)" rha:sample=""/>
</optional>
<optional>
<attribute name="mcastport" rha:description="Specifies the UDP port number when using multicast. corosync.conf(5)" rha:sample=""/>
</optional>
<optional>
<attribute name="broadcast" rha:description="If set to yes, use broadcast instead of multicast for communication. corosync.conf(5)" rha:sample=""/>
</optional>
</element>
</zeroOrMore>
</element>
</optional>
<!-- end totem block -->

<!-- quorumd block -->
<optional>
<element name="quorumd" rha:description="This element and its
attributes define parameters for the quorum disk daemon,
quorumd. qdisk(5).">
<optional>
<attribute name="interval" rha:description="The frequency of
read/write cycles, in seconds. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="tko" rha:description="The number of cycles a node
must miss to be declared dead. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="votes" rha:description="The number of votes the
quorum daemon advertises to CMAN when it has a high enough
score. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="min_score" rha:description="The minimum score for a
node to be considered alive. If omitted or set to 0, the default
function, floor((n+1)/2), is used, where n is the sum of the
heuristics scores. The Minimum Score value must never exceed the
sum of the heuristic scores; otherwise, the quorum disk cannot
be available. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="device" rha:description="The storage device the
quorum daemon uses. The device must be the same on all
nodes. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="label" rha:description="Specifies the quorum disk
label created by the mkqdisk utility. If this field contains an
entry, the label overrides the Device field. If this field is
used, the quorum daemon reads /proc/partitions and checks for
qdisk signatures on every block device found, comparing the
label against the specified label. This is useful in configurations
where the quorum device name differs among nodes. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="cman_label" rha:description="This is the name used by CMAN for the quorum device instead of the device name. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="status_file" rha:description="Debugging file. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="scheduler" rha:description="Scheduler. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="reboot" rha:description="Reboot if our score drops too low. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="priority" rha:description="Scheduler priority. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="stop_cman" rha:description="Stop cman if the quorum disk cannot be found during startup. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="paranoid" rha:description="Reboot if we are running too slowly. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="allow_kill" rha:description="Instruct cman to evict nodes which are not updating the quorum disk. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="max_error_cycles" rha:description="Die after this many cycles which receive I/O errors. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="io_timeout" rha:description="Die if we cannot get a write out to disk after interval*tko. qdisk(5)." rha:sample=""/>
</optional>
<optional>
<attribute name="master_wins" rha:description="Enable master-wins mode (two node clusters). qdisk(5)." rha:sample=""/>
</optional>
<zeroOrMore>
<element name="heuristic" rha:description="Defines a heuristic. qdisk(5).">
<attribute name="program" rha:description="The program used to
determine if this heuristic is alive. This can be anything that
can be executed by /bin/sh -c. A return value of 0 indicates
success; anything else indicates failure." rha:sample=""/>
<optional>
<attribute name="score" rha:description="The weight of this
heuristic. Be careful when determining scores for
heuristics." rha:default="1" rha:sample=""/>
</optional>
<optional>
<attribute name="interval" rha:description="The frequency (in
seconds) at which the heuristic is polled. qdisk(5)." rha:default="2"
rha:sample=""/>
</optional>
<optional>
<attribute name="tko" rha:description="The number of consecutive failures before a heuristic is discounted. qdisk(5)." rha:sample=""/>
</optional>
</element>
</zeroOrMore>
</element>
</optional>
<!-- end quorumd block -->

<!-- fence_daemon block -->
<optional>
<element name="fence_daemon" rha:description="Configuration for fenced
daemon. fenced(8)">
<optional>
<attribute name="post_join_delay" rha:description="Number of seconds
the daemon will wait before fencing any victims after a node joins
the fence domain. fenced(8)"/>
</optional>
<optional>
<attribute name="post_fail_delay" rha:description="Number of seconds
the daemon will wait before fencing any victims after a node
fails. fenced(8)"/>
</optional>
<optional>
<attribute name="override_path" rha:description="Location of a FIFO
used for communication between fenced and fence_ack_manual.
fenced(8)"/>
</optional>
<optional>
<attribute name="override_time" rha:description="Number of seconds to
wait for a manual override after a failed fencing attempt before
the next attempt. fenced(8)"/>
</optional>
<optional>
<attribute name="clean_start" rha:description="Set to 1 to disable
startup fencing. fenced(8)"/>
</optional>

<optional>
<attribute name="skip_undefined" rha:description="Set to 1 to disable
startup fencing of nodes with no fence methods defined.
fenced(8)"/>
</optional>
</element>
</optional>
<!-- end fence_daemon block -->

<!-- fence_xvmd block -->
<optional>
<element name="fence_xvmd" rha:description="Fence_xvm daemon. The
fence_xvmd fence device is an I/O fencing host that resides
on dom0 and is used in conjunction with the fence_xvm fencing
agent. Together, these two programs fence Xen virtual machines
that are in a cluster. There is a requirement that the parent
dom0s are also a part of their own CMAN/OpenAIS based cluster,
and that the dom0 cluster does not share any members with the domU
cluster. Furthermore, the dom0 cluster is required to have fencing
if domU recovery is expected to be automatic.">
<optional>
<attribute name="debug" rha:description="" >
<data type="integer"/>
</attribute>
</optional>
<optional>
<attribute name="port" rha:description="" >
<data type="integer"/>
</attribute>
</optional>
<optional>
<attribute name="use_uuid" rha:description="" />
</optional>
<optional>
<attribute name="multicast_address" rha:description="" />
</optional>
<optional>
<attribute name="auth" rha:description="" />
</optional>
<optional>
<attribute name="hash" rha:description="" />
</optional>
<optional>
<attribute name="uri" rha:description="" />
</optional>
<optional>
<attribute name="key_file" rha:description="" />
</optional>
<optional>
<attribute name="multicast_interface" rha:description="" />
</optional>
</element>
</optional>
<!-- end fence_xvmd block -->

<!-- dlm block -->
<optional>
<element name="dlm" rha:description="Configuration for dlm and
dlm_controld daemon. dlm_controld(8)">

<optional>
<attribute name="log_debug" rha:description="Set to 1 to enable
dlm kernel debugging messages. dlm_controld(8)"/>
</optional>

<optional>
<attribute name="timewarn" rha:description="Number of centiseconds
a lock is blocked before notifying dlm_controld deadlock code.
dlm_controld(8)"/>
</optional>

<optional>
<attribute name="protocol" rha:description="The dlm lowcomms protocol.
dlm_controld(8)"/>
</optional>

<optional>
<attribute name="enable_fencing" rha:description="Fencing recovery
dependency. dlm_controld(8)" />
</optional>

<optional>
<attribute name="enable_quorum" rha:description="Quorum recovery
dependency. dlm_controld(8)"/>
</optional>

<optional>
<attribute name="enable_deadlk" rha:description="Deadlock detection
capability. dlm_controld(8)"/>
</optional>

<optional>
<attribute name="enable_plock" rha:description="Cluster fs posix
lock capability. dlm_controld(8)"/>
</optional>

<optional>
<attribute name="plock_debug" rha:description="Set to 1 to enable
posix lock debugging. dlm_controld(8)"/>
</optional>

<optional>
<attribute name="plock_rate_limit" rha:description="Limit the rate of
plock operations. dlm_controld(8)"/>
</optional>

<optional>
<attribute name="plock_ownership" rha:description="Set to 1/0 to
enable/disable plock ownership. dlm_controld(8)"/>
</optional>

<optional>
<attribute name="drop_resources_time" rha:description="Plock ownership
drop resources time. dlm_controld(8)"/>
</optional>

<optional>
<attribute name="drop_resources_count" rha:description="Plock ownership
drop resources count. dlm_controld(8)"/>
</optional>

<optional>
<attribute name="drop_resources_age" rha:description="Plock ownership
drop resources age. dlm_controld(8)"/>
</optional>

<optional>
<zeroOrMore>
<element name="lockspace" rha:description="Individual lockspace
configuration. dlm_controld(8)">
<attribute name="name" rha:description="Name of the lockspace.
dlm_controld(8)"/>

<optional>
<attribute name="nodir" rha:description="Set to 1 to disable the
internal resource directory. dlm_controld(8)"/>
</optional>

<optional>
<zeroOrMore>
<element name="master" rha:description="Defines a master node.
dlm_controld(8)">

<attribute name="name" rha:description="The name of a node that
should be master resources/locks. dlm_controld(8)"/>

<optional>
<attribute name="weight" rha:description="The proportion of
resources this node should master. dlm_controld(8)"/>
</optional>
</element>
</zeroOrMore>
</optional>

</element>
</zeroOrMore>
</optional>
</element>
</optional>
<!-- end dlm block -->

<!-- gfs_controld block -->
<optional>
<element name="gfs_controld" rha:description="Configuration for
gfs_controld daemon. gfs_controld(8)">

<optional>
<attribute name="enable_withdraw" rha:description="Set to 1/0 to
enable/disable a response to a withdraw. gfs_controld(8)"/>
</optional>

<optional>
<attribute name="enable_plock" rha:description="Cluster fs posix
lock capability. gfs_controld(8)"/>
</optional>

<optional>
<attribute name="plock_debug" rha:description="Set to 1 to enable
posix lock debugging. gfs_controld(8)"/>
</optional>

<optional>
<attribute name="plock_rate_limit" rha:description="Limit the rate of
plock operations. gfs_controld(8)"/>
</optional>

<optional>
<attribute name="plock_ownership" rha:description="Set to 1/0 to
enable/disable plock ownership. gfs_controld(8)"/>
</optional>

<optional>
<attribute name="drop_resources_time" rha:description="Plock ownership
drop resources time. gfs_controld(8)"/>
</optional>

<optional>
<attribute name="drop_resources_count" rha:description="Plock ownership
drop resources count. gfs_controld(8)"/>
</optional>

<optional>
<attribute name="drop_resources_age" rha:description="Plock ownership
drop resources age. gfs_controld(8)"/>
</optional>

</element>
</optional>
<!-- end gfs_controld block -->

<!-- group block -->
<optional>
<element name="group" rha:description="Defines groupd configuration.
groupd(8)">
<optional>
<attribute name="groupd_compat" rha:description="Enable compatibility with
cluster2 nodes. groupd(8)"/>
</optional>
</element>
</optional>
<!-- end group block -->

<!-- logging block -->
<optional>
<element name="logging" rha:description="Defines global logging
configuration, and contains daemon-specific configuration.
cluster.conf(5)">

<optional>
<attribute name="to_syslog" rha:description="Set to yes/no to
enable/disable messages to syslog. cluster.conf(5)"/>
</optional>

<optional>
<attribute name="to_logfile" rha:description="Set to yes/no to
enable/disable messages to log file. cluster.conf(5)"/>
</optional>

<optional>
<attribute name="syslog_facility" rha:description="The facility
used for syslog messages. cluster.conf(5)"/>
</optional>

<optional>
<attribute name="syslog_priority" rha:description="Messages at this
level and higher are sent to syslog. cluster.conf(5)"/>
</optional>

<optional>
<attribute name="logfile_priority" rha:description="Messages at this
level and higher are written to log file. cluster.conf(5)"/>
</optional>

<optional>
<attribute name="logfile" rha:description="The log file path name.
cluster.conf(5)"/>
</optional>

<optional>
<attribute name="debug" rha:description="Set to on to enable debugging
messages in log file. cluster.conf(5)"/>
</optional>

<zeroOrMore>
<element name="logging_daemon" rha:description="Defines
daemon-specific logging configuration. cluster.conf(5)">

<attribute name="name" rha:description="The daemon name.
cluster.conf(5)"/>

<optional>
<attribute name="subsys" rha:description="A corosync subsystem name.
cluster.conf(5)"/>
</optional>

<optional>
<attribute name="to_syslog" rha:description="Same as global."/>
</optional>
<optional>
<attribute name="to_logfile" rha:description="Same as global."/>
</optional>
<optional>
<attribute name="syslog_facility" rha:description="Same as global."/>
</optional>
<optional>
<attribute name="syslog_priority" rha:description="Same as global."/>
</optional>
<optional>
<attribute name="logfile_priority" rha:description="Same as global."/>
</optional>
<optional>
<attribute name="logfile" rha:description="Same as global."/>
</optional>
<optional>
<attribute name="debug" rha:description="Same as global."/>
</optional>

</element>
</zeroOrMore>

</element>
</optional>
<!-- end logging block -->

<!-- clusternodes block -->
<element name="clusternodes" rha:description="Contains all cluster
node definitions. cluster.conf(5)">

<zeroOrMore>
<element name="clusternode" rha:description="Defines cluster node
properties, and contains other node specific configuration.
cluster.conf(5)">

<attribute name="name" rha:description="The hostname or IP address
of the node. cluster.conf(5)"/>

<attribute name="nodeid" rha:description="A unique integer to use
as a node identifier. cluster.conf(5)">
<data type="positiveInteger"/>
</attribute>

<optional>
<attribute name="votes" rha:description="The number of votes the
node contributes to quorum. cman(5)">
<data type="positiveInteger"/>
</attribute>
</optional>

<optional>
<attribute name="weight" rha:description="The dlm locking weight.
dlm_controld(8)"/>
</optional>

<optional>
<element name="altname" rha:description="Defines a second network
interface to use for corosync redundant ring mode. cman(5)">

<attribute name="name" rha:description="A second hostname or IP
address of the node. cman(5)"/>

<optional>
<attribute name="port" rha:description="The network port to use
on the second interface. cman(5)"/>
</optional>

<optional>
<attribute name="mcast" rha:description="The multicast address
to use on the second interface. cman(5)"/>
</optional>
</element>
</optional>

<interleave>
<optional>
<ref name="FENCE"/>
</optional>
<optional>
<ref name="UNFENCE"/>
</optional>
</interleave>

</element>
</zeroOrMore>
</element>
<!-- end clusternode block -->

<!-- fencedevices block -->
<optional>
<element name="fencedevices" rha:description="Contains all fence
device definitions. fenced(8)">
<zeroOrMore>
<element name="fencedevice" rha:description="Defines fence device
properties. fenced(8)">

<attribute name="name" rha:description="A name that is used to
reference this fence device from clusternode fence section.
fenced(8)">
<data type="ID"/>
</attribute>

<attribute name="agent" rha:description="The fence agent to be
used. fenced(8)"/>

<ref name="FENCEDEVICEOPTIONS"/>

</element>
</zeroOrMore>
</element>
</optional>
<!-- end fencedevices block -->

<!-- rm block -->
<optional>
<element name="rm" rha:description="This element and its attributes
define resources (for example an IP address) required to create HA
cluster services, the HA cluster services themselves, and failover
domains for the HA cluster services.">
<optional>
<!-- FIXME: The following text needs clarifying. What is meant by
"...for all levels less than the selected."? -->
<attribute name="log_level" rha:description="An integer 0-7,
inclusive, for all levels less than the selected.
0, system is unusable, emergency;
1, action must be taken immediately;
2, critical conditions;
3, error conditions;
4, warning conditions;
5, normal but significant condition;
6, informational;
7, debug-level messages." rha:sample="6">
<data type="integer"/>
</attribute>
</optional>
<optional>
<attribute name="status_child_max" rha:description="Maximum number of status child threads." rha:sample="">
<data type="integer"/>
</attribute>
</optional>
<optional>
<attribute name="status_poll_interval" rha:description="Scan the resource tree every X seconds for resources which need to be checked."
rha:sample="">
<data type="integer"/>
</attribute>
</optional>
<optional>
<attribute name="transition_throttling" rha:description="During transitions, keep the event processor alive for this many seconds."
rha:sample="">
<data type="integer"/>
</attribute>
</optional>
<optional>
<attribute name="central_processing" rha:description="Enable central processing mode (requires cluster-wide shut down and restart of rgmanager.)."
rha:sample="">
<data type="integer"/>
</attribute>
</optional>
<optional>
<attribute name="log_facility" rha:description="The facility is one
of the following keywords: auth, authpriv, cron, daemon, kern,
lpr, mail, news, syslog, user, uucp and local0 through local7"/>
</optional>
<interleave>
<optional>
<element name="failoverdomains" rha:description="Failover domain definitions.">
<zeroOrMore>
<element name="failoverdomain" rha:description="Specifies
properties of a specific failover domain">
<attribute name="name" rha:description="The name of the failover
domain." rha:sample="foo"/>
<optional>
<attribute name="ordered" rha:description="Set value to 1 if
the failover domain is ordered; set value to 0 if
unordered." rha:default="0" rha:sample="1"/>
</optional>
<optional>
<attribute name="restricted" rha:description="Set value to 1 if
the failover domain is restricted; set value to 0 if
unrestricted." rha:default="0" rha:sample="1"/>
</optional>
<optional>
<attribute name="nofailback" rha:description="Do not move service to a more preferred node if it is currently running." rha:sample=""/>
</optional>
<zeroOrMore>
<element name="failoverdomainnode" rha:description="A node in
a failover domain">
<optional>
<attribute name="priority" rha:description="A number
specifying the priority; lower numbers having higher
priority"
rha:sample="1"/>
</optional>
<attribute name="name" rha:description="Name of the node."
rha:sample="member2"/>
</element>
</zeroOrMore>
</element>
</zeroOrMore>
</element>
</optional> <!-- End of failoverdomains block -->
<optional>
<element name="events" rha:description="Event definitions (central_processing only).">
<zeroOrMore>
<element name="event" rha:description="Defines an event.">
<attribute name="name" rha:description="Symbolic name for an event." rha:sample=""/>
<optional>
<text/>
</optional>
<optional>
<attribute name="file" rha:description="Path to S/Lang script to execute." rha:sample=""/>
</optional>
<optional>
<attribute name="priority" rha:description="Order (1..99) of event." rha:sample=""/>
</optional>
<optional>
<attribute name="class" rha:description="Event class (service, node)." rha:sample=""/>
</optional>
<!-- Service event class attributes -->
<optional>
<attribute name="service" rha:description="(Service) The service name (service:foo) must match the specified value in order for the event script to be run." rha:sample=""/>
</optional>
<optional>
<attribute name="service_state" rha:description="(Service) The service's state must match the specified value in order for the script to be run (started, stopped, disabled, failed)." rha:sample=""/>
</optional>
<optional>
<attribute name="service_owner" rha:description="(Service) The service owner must match the specified value in order for the event script to be run." rha:sample=""/>
</optional>
<!-- Node event -->
<optional>
<attribute name="node" rha:description="(Node) The node name must match the specified value in order for the script to be run." rha:sample=""/>
</optional>
<optional>
<attribute name="node_id" rha:description="(Node) The node ID must match the specified value in order for the script to be run." rha:sample=""/>
</optional>
<optional>
<attribute name="node_state" rha:description="(Node) The node state must match the specified value (0 or 1) in order for the script to be run." rha:sample=""/>
</optional>
<optional>
<attribute name="node_clean" rha:description="(Node) The node must have been fenced in order for the script to be run." rha:sample=""/>
</optional>
<optional>
<attribute name="node_local" rha:description="(Node) This script may only run on the current central processing node." rha:sample=""/>
</optional>
<!-- Config event attributes -->
<!-- NOT USED -->
</element>
</zeroOrMore>
</element>
</optional> <!-- End of events block -->
<optional>
<element name="resources" rha:description="Defines global resources which may be referenced in services. You may redefine actions for resources here, but child resource definitions are ignored in this section.">
<zeroOrMore>
<ref name="CHILDREN"/>
</zeroOrMore>
</element>
</optional>
<zeroOrMore>
<ref name="SERVICE"/>
</zeroOrMore>
<zeroOrMore>
<ref name="VM"/>
</zeroOrMore>
</interleave>
</element>
</optional>

<!-- clvmd block -->
<optional>
<element name="clvmd" rha:description="The clvmd element contains
attributes that define parameters for the cluster LVM daemon.">
<optional>
<attribute name="interface" rha:description="The interface attribute
tells clvmd which cluster interface it should use for internode
communications and locking. Valid values for this depend on
how the daemon is configured at compile-time, but are typically
cman, corosync or openais." rha:sample="cman"/>
</optional>
</element>
</optional>

</interleave>


</element> <!-- cluster end -->
</start>


<!--Beginning of resource definitions-->
<!-- Autogenerated. Paste in to cluster.ng in the 'resources' section -->

<define name="SERVICE">
<element name="service" rha:description="Defines a service (resource group).">
<choice>
<group>
<!-- rgmanager specific stuff -->
<attribute name="ref" rha:description="Reference to existing service resource in the resources section."/>
</group>
<group>
<attribute name="name" rha:description="Name."/>
<optional>
<attribute name="domain" rha:description="Failover domain."/>
</optional>
<optional>
<attribute name="autostart" rha:description="Automatic start after quorum formation"/>
</optional>
<optional>
<attribute name="exclusive" rha:description="Exclusive service."/>
</optional>
<optional>
<attribute name="nfslock" rha:description="Enable NFS lock workarounds."/>
</optional>
<optional>
<attribute name="nfs_client_cache" rha:description="Enable e
 

Thread Tools




All times are GMT. The time now is 06:55 AM.

VBulletin, Copyright ©2000 - 2014, Jelsoft Enterprises Ltd.
Content Relevant URLs by vBSEO ©2007, Crawlability, Inc.
Copyright 2007 - 2008, www.linux-archive.org